From 7c229742d39b667b5efa886bd92f486c66f94219 Mon Sep 17 00:00:00 2001 From: Petenerd Date: Fri, 4 Mar 2022 12:24:23 -0500 Subject: [PATCH 001/785] Update install.mdx missing a quote escape --- website/content/docs/ecs/manual/install.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/ecs/manual/install.mdx b/website/content/docs/ecs/manual/install.mdx index 88403851e..1f6f5239f 100644 --- a/website/content/docs/ecs/manual/install.mdx +++ b/website/content/docs/ecs/manual/install.mdx @@ -229,7 +229,7 @@ Each task must include a Consul client container in order for the task to join y ], "entryPoint": ["/bin/sh", "-ec"], "command": [ - "cp /bin/consul /bin/consul-inject/consul\n\nECS_IPV4=$(curl -s $ECS_CONTAINER_METADATA_URI_V4 | jq -r '.Networks[0].IPv4Addresses[0]')\n\n\ncat << EOF > /consul/agent-defaults.hcl\naddresses = {\n dns = \"127.0.0.1\"\n grpc = \"127.0.0.1\"\n http = \"127.0.0.1\"\n}\nadvertise_addr = \"$ECS_IPV4\"\nadvertise_reconnect_timeout = \"15m\"\nclient_addr = \"0.0.0.0\"\ndatacenter = \"dc1\"\nenable_central_service_config = true\nleave_on_terminate = true\nports {\n grpc = 8502\n}\nretry_join = [\n \"",\n]\ntelemetry {\n disable_compat_1.9 = true\n}\n\nEOF\n\ncat << EOF > /consul/agent-extra.hcl\naddresses = {\n dns = \"0.0.0.0\"\n}\nlog_level = \"debug\"\n\nEOF\n\nexec consul agent \\\n -data-dir /consul/data \\\n -config-file /consul/agent-defaults.hcl \\\n -config-file /consul/agent-extra.hcl\n" + "cp /bin/consul /bin/consul-inject/consul\n\nECS_IPV4=$(curl -s $ECS_CONTAINER_METADATA_URI_V4 | jq -r '.Networks[0].IPv4Addresses[0]')\n\n\ncat << EOF > /consul/agent-defaults.hcl\naddresses = {\n dns = \"127.0.0.1\"\n grpc = \"127.0.0.1\"\n http = \"127.0.0.1\"\n}\nadvertise_addr = \"$ECS_IPV4\"\nadvertise_reconnect_timeout = \"15m\"\nclient_addr = \"0.0.0.0\"\ndatacenter = \"dc1\"\nenable_central_service_config = true\nleave_on_terminate = true\nports {\n grpc = 8502\n}\nretry_join = [\n \"\",\n]\ntelemetry {\n disable_compat_1.9 = true\n}\n\nEOF\n\ncat << EOF > /consul/agent-extra.hcl\naddresses = {\n dns = \"0.0.0.0\"\n}\nlog_level = \"debug\"\n\nEOF\n\nexec consul agent \\\n -data-dir /consul/data \\\n -config-file /consul/agent-defaults.hcl \\\n -config-file /consul/agent-extra.hcl\n" ] } ] From 5cb24b9bf84c5b68d1d01c6ddf6b5fbf3ecba426 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Tue, 22 Mar 2022 16:30:00 -0400 Subject: [PATCH 002/785] split `pbcommon` to `pbcommon` and `pbcommongogo` (#12587) * mogify needed pbcommon structs * mogify needed pbconnect structs * fix compilation errors and make config_translate_test pass * add missing file * remove redundant oss func declaration * fix EnterpriseMeta to copy the right data for enterprise * rename pbcommon package to pbcommongogo * regenerate proto and mog files * add missing mog files * add pbcommon package * pbcommon no mog * fix enterprise meta code generation * fix enterprise meta code generation (pbcommongogo) * fix mog generation for gogo * use `protoc-go-inject-tag` to inject tags * rename proto package * pbcommon no mog * use `protoc-go-inject-tag` to inject tags * add non gogo proto to make file * fix proto get --- GNUmakefile | 5 +- agent/auto-config/config_translate_test.go | 2 +- agent/connect/testing_ca.go | 1 + .../services/subscribe/subscribe_test.go | 28 +- agent/http.go | 4 +- agent/rpcclient/health/view_test.go | 10 +- agent/structs/protobuf_compat.go | 62 +- agent/structs/structs.go | 43 + agent/submatview/store_test.go | 12 +- agent/submatview/streaming_test.go | 8 +- build-support/scripts/proto-gen-entry.sh | 11 + build-support/scripts/proto-gen-no-gogo.sh | 142 ++ proto/pbcommon/common.gen.go | 56 + proto/pbcommon/common.go | 187 +- proto/pbcommon/common.pb.go | 2017 +++------------- proto/pbcommon/common.proto | 49 +- proto/pbcommon/common_oss.go | 14 +- proto/pbcommongogo/common.gen.go | 70 + proto/pbcommongogo/common.go | 303 +++ proto/pbcommongogo/common.pb.binary.go | 78 + proto/pbcommongogo/common.pb.go | 2036 +++++++++++++++++ proto/pbcommongogo/common.proto | 182 ++ proto/pbcommongogo/common_oss.go | 25 + proto/pbconnect/connect.gen.go | 116 + proto/pbconnect/connect.go | 229 +- proto/pbconnect/connect.pb.go | 165 +- proto/pbconnect/connect.proto | 41 +- proto/pbservice/convert.go | 8 +- proto/pbservice/convert_oss.go | 8 +- proto/pbservice/healthcheck.pb.go | 146 +- proto/pbservice/healthcheck.proto | 6 +- proto/pbservice/ids_test.go | 6 +- proto/pbservice/node.pb.go | 104 +- proto/pbservice/node.proto | 8 +- proto/pbservice/service.pb.go | 158 +- proto/pbservice/service.proto | 4 +- 36 files changed, 3879 insertions(+), 2465 deletions(-) create mode 100644 build-support/scripts/proto-gen-entry.sh create mode 100755 build-support/scripts/proto-gen-no-gogo.sh create mode 100644 proto/pbcommon/common.gen.go create mode 100644 proto/pbcommongogo/common.gen.go create mode 100644 proto/pbcommongogo/common.go create mode 100644 proto/pbcommongogo/common.pb.binary.go create mode 100644 proto/pbcommongogo/common.pb.go create mode 100644 proto/pbcommongogo/common.proto create mode 100644 proto/pbcommongogo/common_oss.go create mode 100644 proto/pbconnect/connect.gen.go diff --git a/GNUmakefile b/GNUmakefile index 7737f978d..578e826c5 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -12,7 +12,8 @@ GOTOOLS = \ github.com/hashicorp/protoc-gen-go-binary@master \ github.com/vektra/mockery/cmd/mockery@master \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ - github.com/hashicorp/lint-consul-retry@master + github.com/hashicorp/lint-consul-retry@master \ + github.com/favadi/protoc-go-inject-tag@v1.3.0 GOTAGS ?= GOPATH=$(shell go env GOPATH) @@ -346,7 +347,7 @@ proto: $(PROTOGOFILES) $(PROTOGOBINFILES) %.pb.go %.pb.binary.go: %.proto - @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --import-replace "$<" + @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen-entry.sh --grpc --import-replace "$<" .PHONY: module-versions # Print a list of modules which can be updated. diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index c60b84a9c..ed605672d 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -38,7 +38,7 @@ func mustTranslateCARootsToProtobuf(t *testing.T, in *structs.IndexedCARoots) *p } func mustTranslateIssuedCertToProtobuf(t *testing.T, in *structs.IssuedCert) *pbconnect.IssuedCert { - out, err := pbconnect.NewIssuedCertFromStructs(in) + var out, err = pbconnect.NewIssuedCertFromStructs(in) require.NoError(t, err) return out } diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index 1bbfdc18c..16ffb6536 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -118,6 +118,7 @@ func testCA(t testing.T, xc *structs.CARoot, keyType string, keyBits int, ttl ti result.NotAfter = template.NotAfter.UTC() result.PrivateKeyType = keyType result.PrivateKeyBits = keyBits + result.IntermediateCerts = []string{} // If there is a prior CA to cross-sign with, then we need to create that // and set it as the signing cert. diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index b11438d7e..a084b6f55 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -24,7 +24,7 @@ import ( grpc "github.com/hashicorp/consul/agent/grpc/private" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -122,7 +122,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { streamHandle, err := streamClient.Subscribe(ctx, &pbsubscribe.SubscribeRequest{ Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis", - Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) @@ -159,7 +159,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Expose: pbservice.ExposeConfig{}, }, RaftIndex: raftIndex(ids, "reg2", "reg2"), - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, }, }, @@ -190,7 +190,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Expose: pbservice.ExposeConfig{}, }, RaftIndex: raftIndex(ids, "reg3", "reg3"), - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, }, }, @@ -240,7 +240,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Expose: pbservice.ExposeConfig{}, }, RaftIndex: raftIndex(ids, "reg3", "reg3"), - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, Checks: []*pbservice.HealthCheck{ { @@ -251,7 +251,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { ServiceID: "redis1", ServiceName: "redis", RaftIndex: raftIndex(ids, "update", "update"), - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, }, }, @@ -395,8 +395,8 @@ func newCounter() *counter { return &counter{labels: make(map[string]uint64)} } -func raftIndex(ids *counter, created, modified string) pbcommon.RaftIndex { - return pbcommon.RaftIndex{ +func raftIndex(ids *counter, created, modified string) pbcommongogo.RaftIndex { + return pbcommongogo.RaftIndex{ CreateIndex: ids.For(created), ModifyIndex: ids.For(modified), } @@ -475,7 +475,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis", Datacenter: "dc2", - Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) go recvEvents(chEvents, streamHandle) @@ -511,7 +511,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { MeshGateway: pbservice.MeshGatewayConfig{}, Expose: pbservice.ExposeConfig{}, }, - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, RaftIndex: raftIndex(ids, "reg2", "reg2"), }, }, @@ -542,7 +542,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { MeshGateway: pbservice.MeshGatewayConfig{}, Expose: pbservice.ExposeConfig{}, }, - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, RaftIndex: raftIndex(ids, "reg3", "reg3"), }, }, @@ -593,7 +593,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { MeshGateway: pbservice.MeshGatewayConfig{}, Expose: pbservice.ExposeConfig{}, }, - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, Checks: []*pbservice.HealthCheck{ { @@ -604,7 +604,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { ServiceID: "redis1", ServiceName: "redis", RaftIndex: raftIndex(ids, "update", "update"), - EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, }, }, }, @@ -720,7 +720,7 @@ node "node1" { Topic: pbsubscribe.Topic_ServiceHealth, Key: "foo", Token: token, - Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) diff --git a/agent/http.go b/agent/http.go index e899d2420..90885bc3f 100644 --- a/agent/http.go +++ b/agent/http.go @@ -31,7 +31,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" ) var HTTPSummaries = []prometheus.SummaryDefinition{ @@ -981,7 +981,7 @@ func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Requ } // parseConsistencyReadRequest is used to parse the ?consistent query param. -func parseConsistencyReadRequest(resp http.ResponseWriter, req *http.Request, b *pbcommon.ReadRequest) { +func parseConsistencyReadRequest(resp http.ResponseWriter, req *http.Request, b *pbcommongogo.ReadRequest) { query := req.URL.Query() if _, ok := query["consistent"]; ok { b.RequireConsistent = true diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index 841669858..bc5795c05 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/submatview" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -74,7 +74,7 @@ func TestHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T) { t.Skip("too slow for testing.Short") } - namespace := getNamespace(pbcommon.DefaultEnterpriseMeta.Namespace) + namespace := getNamespace(pbcommongogo.DefaultEnterpriseMeta.Namespace) streamClient := newStreamClient(validateNamespace(namespace)) ctx, cancel := context.WithCancel(context.Background()) @@ -572,7 +572,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub Node: node, Address: addr, Datacenter: "dc1", - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -581,7 +581,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub ID: svc, Service: svc, Port: 8080, - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -612,7 +612,7 @@ func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string) *pbs Passing: 1, Warning: 1, }, - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ // The original insertion index since a delete doesn't update // this. This magic value came from state store tests where we // setup at index 10 and then mutate at index 100. It can be diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index 1517b03cf..93358e8e3 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -5,7 +5,7 @@ import ( ) // QueryOptionsCompat is the interface that both the structs.QueryOptions -// and the proto/pbcommon.QueryOptions structs need to implement so that they +// and the proto/pbcommongogo.QueryOptions structs need to implement so that they // can be operated on interchangeably type QueryOptionsCompat interface { GetToken() string @@ -33,7 +33,7 @@ type QueryOptionsCompat interface { } // QueryMetaCompat is the interface that both the structs.QueryMeta -// and the proto/pbcommon.QueryMeta structs need to implement so that they +// and the proto/pbcommongogo.QueryMeta structs need to implement so that they // can be operated on interchangeably type QueryMetaCompat interface { GetLastContact() (time.Duration, error) @@ -50,7 +50,7 @@ type QueryMetaCompat interface { } // GetToken helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetToken() string { if m != nil { return m.Token @@ -59,7 +59,7 @@ func (m *QueryOptions) GetToken() string { } // GetMinQueryIndex helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMinQueryIndex() uint64 { if m != nil { return m.MinQueryIndex @@ -68,7 +68,7 @@ func (m *QueryOptions) GetMinQueryIndex() uint64 { } // GetMaxQueryTime helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxQueryTime() (time.Duration, error) { if m != nil { return m.MaxQueryTime, nil @@ -77,7 +77,7 @@ func (m *QueryOptions) GetMaxQueryTime() (time.Duration, error) { } // GetAllowStale helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetAllowStale() bool { if m != nil { return m.AllowStale @@ -86,7 +86,7 @@ func (m *QueryOptions) GetAllowStale() bool { } // GetRequireConsistent helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetRequireConsistent() bool { if m != nil { return m.RequireConsistent @@ -95,7 +95,7 @@ func (m *QueryOptions) GetRequireConsistent() bool { } // GetUseCache helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetUseCache() bool { if m != nil { return m.UseCache @@ -104,7 +104,7 @@ func (m *QueryOptions) GetUseCache() bool { } // GetMaxStaleDuration helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { if m != nil { return m.MaxStaleDuration, nil @@ -113,7 +113,7 @@ func (m *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { } // GetMaxAge helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxAge() (time.Duration, error) { if m != nil { return m.MaxAge, nil @@ -122,7 +122,7 @@ func (m *QueryOptions) GetMaxAge() (time.Duration, error) { } // GetMustRevalidate helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMustRevalidate() bool { if m != nil { return m.MustRevalidate @@ -131,7 +131,7 @@ func (m *QueryOptions) GetMustRevalidate() bool { } // GetStaleIfError helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetStaleIfError() (time.Duration, error) { if m != nil { return m.StaleIfError, nil @@ -140,7 +140,7 @@ func (m *QueryOptions) GetStaleIfError() (time.Duration, error) { } // GetFilter helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetFilter() string { if m != nil { return m.Filter @@ -149,67 +149,67 @@ func (m *QueryOptions) GetFilter() string { } // SetToken is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetToken(token string) { q.Token = token } // SetMinQueryIndex is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { q.MinQueryIndex = minQueryIndex } // SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { q.MaxQueryTime = maxQueryTime } // SetAllowStale is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetAllowStale(allowStale bool) { q.AllowStale = allowStale } // SetRequireConsistent is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetRequireConsistent(requireConsistent bool) { q.RequireConsistent = requireConsistent } // SetUseCache is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetUseCache(useCache bool) { q.UseCache = useCache } // SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { q.MaxStaleDuration = maxStaleDuration } // SetMaxAge is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { q.MaxAge = maxAge } // SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { q.MustRevalidate = mustRevalidate } // SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { q.StaleIfError = staleIfError } // SetFilter is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetFilter(filter string) { q.Filter = filter } @@ -223,7 +223,7 @@ func (m *QueryMeta) GetIndex() uint64 { } // GetLastContact helps implement the QueryMetaCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetLastContact() (time.Duration, error) { if m != nil { return m.LastContact, nil @@ -232,7 +232,7 @@ func (m *QueryMeta) GetLastContact() (time.Duration, error) { } // GetKnownLeader helps implement the QueryMetaCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetKnownLeader() bool { if m != nil { return m.KnownLeader @@ -241,7 +241,7 @@ func (m *QueryMeta) GetKnownLeader() bool { } // GetConsistencyLevel helps implement the QueryMetaCompat interface -// Copied from proto/pbcommon/common.pb.go +// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetConsistencyLevel() string { if m != nil { return m.ConsistencyLevel @@ -250,25 +250,25 @@ func (m *QueryMeta) GetConsistencyLevel() string { } // SetLastContact is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetLastContact(lastContact time.Duration) { q.LastContact = lastContact } // SetKnownLeader is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetKnownLeader(knownLeader bool) { q.KnownLeader = knownLeader } // SetIndex is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetIndex(index uint64) { q.Index = index } // SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommon/common.go +// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { q.ConsistencyLevel = consistencyLevel } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 9a6d69f47..9efd02c9b 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -6,6 +6,8 @@ import ( "crypto/sha256" "encoding/json" "fmt" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" "math/rand" "reflect" "regexp" @@ -19,6 +21,8 @@ import ( "github.com/hashicorp/serf/coordinate" "github.com/mitchellh/hashstructure" + gtype "github.com/gogo/protobuf/types" + ptypes "github.com/golang/protobuf/ptypes" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/api" @@ -2677,3 +2681,42 @@ func (m MessageType) String() string { return "Unknown(" + strconv.Itoa(int(m)) + ")" } + +func DurationToProtoGogo(d time.Duration) gtype.Duration { + return *gtype.DurationProto(d) +} + +func DurationFromProtoGogo(d gtype.Duration) time.Duration { + duration, _ := gtype.DurationFromProto(&d) + return duration +} + +func TimeFromProtoGogo(s *gtype.Timestamp) time.Time { + time, _ := gtype.TimestampFromProto(s) + return time +} + +func TimeToProtoGogo(s time.Time) *gtype.Timestamp { + proto, _ := gtype.TimestampProto(s) + return proto +} + +func DurationToProto(d time.Duration) *duration.Duration { + return ptypes.DurationProto(d) +} + +func DurationFromProto(d *duration.Duration) time.Duration { + ret, _ := ptypes.Duration(d) + return ret + +} + +func TimeFromProto(s *timestamp.Timestamp) time.Time { + ret, _ := ptypes.Timestamp(s) + return ret +} + +func TimeToProto(s time.Time) *timestamp.Timestamp { + ret, _ := ptypes.TimestampProto(s) + return ret +} diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index 93b04d1e8..2055cf911 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/lib/ttlcache" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -25,7 +25,7 @@ func TestStore_Get(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( newEndOfSnapshotEvent(2), @@ -232,7 +232,7 @@ func (r *fakeRequest) NewMaterializer() (*Materializer, error) { Token: "abcd", Datacenter: "dc1", Index: index, - Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, } return req }, @@ -292,7 +292,7 @@ func TestStore_Notify(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( newEndOfSnapshotEvent(2), @@ -361,7 +361,7 @@ func TestStore_Notify_ManyRequests(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) @@ -473,7 +473,7 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) diff --git a/agent/submatview/streaming_test.go b/agent/submatview/streaming_test.go index 80fec094f..be484cac6 100644 --- a/agent/submatview/streaming_test.go +++ b/agent/submatview/streaming_test.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -120,7 +120,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub Node: node, Address: addr, Datacenter: "dc1", - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -129,7 +129,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub ID: svc, Service: svc, Port: 8080, - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -160,7 +160,7 @@ func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string) *pbs Passing: 1, Warning: 1, }, - RaftIndex: pbcommon.RaftIndex{ + RaftIndex: pbcommongogo.RaftIndex{ // The original insertion index since a delete doesn't update // this. This magic value came from state store tests where we // setup at index 10 and then mutate at index 100. It can be diff --git a/build-support/scripts/proto-gen-entry.sh b/build-support/scripts/proto-gen-entry.sh new file mode 100644 index 000000000..c02df1b5a --- /dev/null +++ b/build-support/scripts/proto-gen-entry.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +FILENAME=$3 +echo $PWD +if [[ "$FILENAME" =~ .*pbcommon/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +else + echo "$FILENAME gogo" + ./build-support/scripts/proto-gen.sh $1 $2 $3 +fi \ No newline at end of file diff --git a/build-support/scripts/proto-gen-no-gogo.sh b/build-support/scripts/proto-gen-no-gogo.sh new file mode 100755 index 000000000..c736d49de --- /dev/null +++ b/build-support/scripts/proto-gen-no-gogo.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash + +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Description: + Generate the Go files from protobuf definitions. In addition to + running the protoc generator it will also fixup build tags in the + generated code. + +Options: + --import-replace Replace imports of google types with those from the protobuf repo. + --grpc Enable the gRPC plugin + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + local -i grpc=0 + local -i imp_replace=0 + local proto_path= + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + --grpc ) + grpc=1 + shift + ;; + --import-replace ) + imp_replace=1 + shift + ;; + * ) + proto_path="$1" + shift + ;; + esac + done + + if test -z "${proto_path}" + then + err_usage "ERROR: No proto file specified" + return 1 + fi + + local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) + local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") + + + local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" + golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" + + local proto_go_path=${proto_path%%.proto}.pb.go + local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go + + local go_proto_out="paths=source_relative" + if is_set "${grpc}" + then + go_proto_out="${go_proto_out},plugins=grpc" + fi + + if is_set "${imp_replace}" + then + go_proto_out="${go_proto_out},${golang_proto_imp_replace}" + fi + + if test -n "${go_proto_out}" + then + go_proto_out="${go_proto_out}:" + fi + + # How we run protoc probably needs some documentation. + # + # This is the path to where + # -I="${golang_proto_path}/protobuf" \ + local -i ret=0 + status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} (NO GOGO)" + debug_run protoc \ + -I="${golang_proto_path}" \ + -I="${golang_proto_mod_path}" \ + -I="${SOURCE_DIR}" \ + --go_out="${go_proto_out}${SOURCE_DIR}" \ + --go-binary_out="${SOURCE_DIR}" \ + "${proto_path}" + debug_run protoc-go-inject-tag \ + -input="${proto_go_path}" + + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" + if test $? -ne 0 + then + err "Failed to generate outputs from ${proto_path}" + return 1 + fi + + BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') + if test -n "${BUILD_TAGS}" + then + echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" + cat "${proto_go_path}" >> "${proto_go_path}.new" + mv "${proto_go_path}.new" "${proto_go_path}" + + echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" + cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" + mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" + fi + + return 0 +} + +main "$@" +exit $? diff --git a/proto/pbcommon/common.gen.go b/proto/pbcommon/common.gen.go new file mode 100644 index 000000000..14aa7128b --- /dev/null +++ b/proto/pbcommon/common.gen.go @@ -0,0 +1,56 @@ +// Code generated by mog. DO NOT EDIT. + +package pbcommon + +import "github.com/hashicorp/consul/agent/structs" + +func QueryMetaToStructs(s *QueryMeta, t *structs.QueryMeta) { + if s == nil { + return + } + t.Index = s.Index + t.LastContact = structs.DurationFromProto(s.LastContact) + t.KnownLeader = s.KnownLeader + t.ConsistencyLevel = s.ConsistencyLevel + t.ResultsFilteredByACLs = s.ResultsFilteredByACLs +} +func QueryMetaFromStructs(t *structs.QueryMeta, s *QueryMeta) { + if s == nil { + return + } + s.Index = t.Index + s.LastContact = structs.DurationToProto(t.LastContact) + s.KnownLeader = t.KnownLeader + s.ConsistencyLevel = t.ConsistencyLevel + s.ResultsFilteredByACLs = t.ResultsFilteredByACLs +} +func QueryOptionsToStructs(s *QueryOptions, t *structs.QueryOptions) { + if s == nil { + return + } + t.Token = s.Token + t.MinQueryIndex = s.MinQueryIndex + t.MaxQueryTime = structs.DurationFromProto(s.MaxQueryTime) + t.AllowStale = s.AllowStale + t.RequireConsistent = s.RequireConsistent + t.UseCache = s.UseCache + t.MaxStaleDuration = structs.DurationFromProto(s.MaxStaleDuration) + t.MaxAge = structs.DurationFromProto(s.MaxAge) + t.MustRevalidate = s.MustRevalidate + t.Filter = s.Filter +} +func QueryOptionsFromStructs(t *structs.QueryOptions, s *QueryOptions) { + if s == nil { + return + } + s.Token = t.Token + s.MinQueryIndex = t.MinQueryIndex + s.MaxQueryTime = structs.DurationToProto(t.MaxQueryTime) + s.AllowStale = t.AllowStale + s.RequireConsistent = t.RequireConsistent + s.UseCache = t.UseCache + s.MaxStaleDuration = structs.DurationToProto(t.MaxStaleDuration) + s.MaxAge = structs.DurationToProto(t.MaxAge) + s.MustRevalidate = t.MustRevalidate + s.Filter = t.Filter +} diff --git a/proto/pbcommon/common.go b/proto/pbcommon/common.go index 1925aedda..fbff7e4ae 100644 --- a/proto/pbcommon/common.go +++ b/proto/pbcommon/common.go @@ -4,7 +4,6 @@ import ( "time" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbutil" ) // IsRead is always true for QueryOption @@ -37,7 +36,7 @@ func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { // SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { - q.MaxQueryTime = *pbutil.DurationToProto(maxQueryTime) + q.MaxQueryTime = structs.DurationToProto(maxQueryTime) } // SetAllowStale is needed to implement the structs.QueryOptionsCompat interface @@ -57,12 +56,12 @@ func (q *QueryOptions) SetUseCache(useCache bool) { // SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { - q.MaxStaleDuration = *pbutil.DurationToProto(maxStaleDuration) + q.MaxStaleDuration = structs.DurationToProto(maxStaleDuration) } // SetMaxAge is needed to implement the structs.QueryOptionsCompat interface func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { - q.MaxAge = *pbutil.DurationToProto(maxAge) + q.MaxAge = structs.DurationToProto(maxAge) } // SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface @@ -72,15 +71,11 @@ func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { // SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { - q.StaleIfError = *pbutil.DurationToProto(staleIfError) + q.StaleIfError = structs.DurationToProto(staleIfError) } func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - maxTime, err := pbutil.DurationFromProto(&q.MaxQueryTime) - if err != nil { - return false, err - } - + maxTime := structs.DurationFromProto(q.MaxQueryTime) o := structs.QueryOptions{ MaxQueryTime: maxTime, MinQueryIndex: q.MinQueryIndex, @@ -93,97 +88,9 @@ func (q *QueryOptions) SetFilter(filter string) { q.Filter = filter } -// GetMaxQueryTime is required to implement blockingQueryOptions -func (q *QueryOptions) GetMaxQueryTime() (time.Duration, error) { - if q != nil { - return pbutil.DurationFromProto(&q.MaxQueryTime) - } - return 0, nil -} - -// GetMinQueryIndex is required to implement blockingQueryOptions -func (q *QueryOptions) GetMinQueryIndex() uint64 { - if q != nil { - return q.MinQueryIndex - } - return 0 -} - -// GetRequireConsistent is required to implement blockingQueryOptions -func (q *QueryOptions) GetRequireConsistent() bool { - if q != nil { - return q.RequireConsistent - } - return false -} - -// GetToken is required to implement blockingQueryOptions -func (q *QueryOptions) GetToken() string { - if q != nil { - return q.Token - } - return "" -} - -// GetAllowStale is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetAllowStale() bool { - if q != nil { - return q.AllowStale - } - return false -} - -// GetFilter is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetFilter() string { - if q != nil { - return q.Filter - } - return "" -} - -// GetMaxAge is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMaxAge() (time.Duration, error) { - if q != nil { - return pbutil.DurationFromProto(&q.MaxAge) - } - return 0, nil -} - -// GetMaxStaleDuration is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { - if q != nil { - return pbutil.DurationFromProto(&q.MaxStaleDuration) - } - return 0, nil -} - -// GetMustRevalidate is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMustRevalidate() bool { - if q != nil { - return q.MustRevalidate - } - return false -} - -// GetStaleIfError is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetStaleIfError() (time.Duration, error) { - if q != nil { - return pbutil.DurationFromProto(&q.StaleIfError) - } - return 0, nil -} - -// GetUseCache is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetUseCache() bool { - if q != nil { - return q.UseCache - } - return false -} - // SetLastContact is needed to implement the structs.QueryMetaCompat interface func (q *QueryMeta) SetLastContact(lastContact time.Duration) { - q.LastContact = *pbutil.DurationToProto(lastContact) + q.LastContact = structs.DurationToProto(lastContact) } // SetKnownLeader is needed to implement the structs.QueryMetaCompat interface @@ -210,46 +117,6 @@ func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { q.ResultsFilteredByACLs = v } -// GetIndex is required to implement blockingQueryResponseMeta -func (q *QueryMeta) GetIndex() uint64 { - if q != nil { - return q.Index - } - return 0 -} - -// GetConsistencyLevel is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetConsistencyLevel() string { - if q != nil { - return q.ConsistencyLevel - } - return "" -} - -// GetKnownLeader is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetKnownLeader() bool { - if q != nil { - return q.KnownLeader - } - return false -} - -// GetLastContact is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetLastContact() (time.Duration, error) { - if q != nil { - return pbutil.DurationFromProto(&q.LastContact) - } - return 0, nil -} - -// GetResultsFilteredByACLs is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetResultsFilteredByACLs() bool { - if q != nil { - return q.ResultsFilteredByACLs - } - return false -} - // WriteRequest only applies to writes, always false // // IsRead implements structs.RPCInfo @@ -309,45 +176,3 @@ func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, func (td TargetDatacenter) RequestDatacenter() string { return td.Datacenter } - -func QueryMetaToStructs(s *QueryMeta) (structs.QueryMeta, error) { - var t structs.QueryMeta - if s == nil { - return t, nil - } - t.Index = s.Index - lastContact, err := pbutil.DurationFromProto(&s.LastContact) - if err != nil { - return t, err - } - t.LastContact = lastContact - t.KnownLeader = s.KnownLeader - t.ConsistencyLevel = s.ConsistencyLevel - return t, nil -} - -func NewQueryMetaFromStructs(s structs.QueryMeta) (*QueryMeta, error) { - var t QueryMeta - t.Index = s.Index - t.LastContact = *pbutil.DurationToProto(s.LastContact) - t.KnownLeader = s.KnownLeader - t.ConsistencyLevel = s.ConsistencyLevel - return &t, nil -} - -func RaftIndexToStructs(s *RaftIndex) structs.RaftIndex { - if s == nil { - return structs.RaftIndex{} - } - return structs.RaftIndex{ - CreateIndex: s.CreateIndex, - ModifyIndex: s.ModifyIndex, - } -} - -func NewRaftIndexFromStructs(s structs.RaftIndex) *RaftIndex { - return &RaftIndex{ - CreateIndex: s.CreateIndex, - ModifyIndex: s.ModifyIndex, - } -} diff --git a/proto/pbcommon/common.pb.go b/proto/pbcommon/common.pb.go index 11441c577..21f3b1dee 100644 --- a/proto/pbcommon/common.pb.go +++ b/proto/pbcommon/common.pb.go @@ -1,16 +1,13 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbcommon/common.proto package pbcommon import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - io "io" + duration "github.com/golang/protobuf/ptypes/duration" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -27,8 +24,13 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // RaftIndex is used to track the index used while creating // or modifying a given struct type. type RaftIndex struct { + // @gotags: bexpr:"-" CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` - ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` + // @gotags: bexpr:"-" + ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RaftIndex) Reset() { *m = RaftIndex{} } @@ -37,26 +39,18 @@ func (*RaftIndex) ProtoMessage() {} func (*RaftIndex) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{0} } + func (m *RaftIndex) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_RaftIndex.Unmarshal(m, b) } func (m *RaftIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) } func (m *RaftIndex) XXX_Merge(src proto.Message) { xxx_messageInfo_RaftIndex.Merge(m, src) } func (m *RaftIndex) XXX_Size() int { - return m.Size() + return xxx_messageInfo_RaftIndex.Size(m) } func (m *RaftIndex) XXX_DiscardUnknown() { xxx_messageInfo_RaftIndex.DiscardUnknown(m) @@ -64,10 +58,27 @@ func (m *RaftIndex) XXX_DiscardUnknown() { var xxx_messageInfo_RaftIndex proto.InternalMessageInfo +func (m *RaftIndex) GetCreateIndex() uint64 { + if m != nil { + return m.CreateIndex + } + return 0 +} + +func (m *RaftIndex) GetModifyIndex() uint64 { + if m != nil { + return m.ModifyIndex + } + return 0 +} + // TargetDatacenter is intended to be used within other messages used for RPC routing // amongst the various Consul datacenters type TargetDatacenter struct { - Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TargetDatacenter) Reset() { *m = TargetDatacenter{} } @@ -76,26 +87,18 @@ func (*TargetDatacenter) ProtoMessage() {} func (*TargetDatacenter) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{1} } + func (m *TargetDatacenter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_TargetDatacenter.Unmarshal(m, b) } func (m *TargetDatacenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) } func (m *TargetDatacenter) XXX_Merge(src proto.Message) { xxx_messageInfo_TargetDatacenter.Merge(m, src) } func (m *TargetDatacenter) XXX_Size() int { - return m.Size() + return xxx_messageInfo_TargetDatacenter.Size(m) } func (m *TargetDatacenter) XXX_DiscardUnknown() { xxx_messageInfo_TargetDatacenter.DiscardUnknown(m) @@ -103,10 +106,20 @@ func (m *TargetDatacenter) XXX_DiscardUnknown() { var xxx_messageInfo_TargetDatacenter proto.InternalMessageInfo +func (m *TargetDatacenter) GetDatacenter() string { + if m != nil { + return m.Datacenter + } + return "" +} + type WriteRequest struct { // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } @@ -115,26 +128,18 @@ func (*WriteRequest) ProtoMessage() {} func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{2} } + func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_WriteRequest.Unmarshal(m, b) } func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) } func (m *WriteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_WriteRequest.Merge(m, src) } func (m *WriteRequest) XXX_Size() int { - return m.Size() + return xxx_messageInfo_WriteRequest.Size(m) } func (m *WriteRequest) XXX_DiscardUnknown() { xxx_messageInfo_WriteRequest.DiscardUnknown(m) @@ -142,6 +147,13 @@ func (m *WriteRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WriteRequest proto.InternalMessageInfo +func (m *WriteRequest) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + // ReadRequest is a type that may be embedded into any requests for read // operations. // It is a replacement for QueryOptions now that we no longer need any of those @@ -153,7 +165,10 @@ type ReadRequest struct { // token is assumed for backwards compatibility. Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` // RequireConsistent indicates that the request must be sent to the leader. - RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` + RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ReadRequest) Reset() { *m = ReadRequest{} } @@ -162,26 +177,18 @@ func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{3} } + func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ReadRequest.Unmarshal(m, b) } func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) } func (m *ReadRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadRequest.Merge(m, src) } func (m *ReadRequest) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ReadRequest.Size(m) } func (m *ReadRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReadRequest.DiscardUnknown(m) @@ -189,7 +196,28 @@ func (m *ReadRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ReadRequest proto.InternalMessageInfo +func (m *ReadRequest) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *ReadRequest) GetRequireConsistent() bool { + if m != nil { + return m.RequireConsistent + } + return false +} + // QueryOptions is used to specify various flags for read queries +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryOptions +// output=common.gen.go +// name=Structs +// ignore-fields=StaleIfError,AllowNotModifiedResponse,state,sizeCache,unknownFields type QueryOptions struct { // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. @@ -198,7 +226,8 @@ type QueryOptions struct { // with MaxQueryTime. MinQueryIndex uint64 `protobuf:"varint,2,opt,name=MinQueryIndex,proto3" json:"MinQueryIndex,omitempty"` // Provided with MinQueryIndex to wait for change. - MaxQueryTime types.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxQueryTime *duration.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime,omitempty"` // If set, any follower can service the request. Results // may be arbitrarily stale. AllowStale bool `protobuf:"varint,4,opt,name=AllowStale,proto3" json:"AllowStale,omitempty"` @@ -216,7 +245,8 @@ type QueryOptions struct { // If set and AllowStale is true, will try first a stale // read, and then will perform a consistent read if stale // read is older than value. - MaxStaleDuration types.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxStaleDuration *duration.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration,omitempty"` // MaxAge limits how old a cached value will be returned if UseCache is true. // If there is a cached response that is older than the MaxAge, it is treated // as a cache miss and a new fetch invoked. If the fetch fails, the error is @@ -224,7 +254,8 @@ type QueryOptions struct { // StaleIfError to a longer duration to change this behavior. It is ignored // if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. - MaxAge types.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxAge *duration.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge,omitempty"` // MustRevalidate forces the agent to fetch a fresh version of a cached // resource or at least validate that the cached version is still fresh. It is // implied by either max-age=0 or must-revalidate Cache-Control headers. It @@ -236,10 +267,13 @@ type QueryOptions struct { // UseCache is true and MaxAge is set to a lower, non-zero value. It is // ignored if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. - StaleIfError types.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError"` + StaleIfError *duration.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError,omitempty"` // Filter specifies the go-bexpr filter expression to be used for // filtering the data prior to returning a response - Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` + Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *QueryOptions) Reset() { *m = QueryOptions{} } @@ -248,26 +282,18 @@ func (*QueryOptions) ProtoMessage() {} func (*QueryOptions) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{4} } + func (m *QueryOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_QueryOptions.Unmarshal(m, b) } func (m *QueryOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) } func (m *QueryOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryOptions.Merge(m, src) } func (m *QueryOptions) XXX_Size() int { - return m.Size() + return xxx_messageInfo_QueryOptions.Size(m) } func (m *QueryOptions) XXX_DiscardUnknown() { xxx_messageInfo_QueryOptions.DiscardUnknown(m) @@ -275,15 +301,100 @@ func (m *QueryOptions) XXX_DiscardUnknown() { var xxx_messageInfo_QueryOptions proto.InternalMessageInfo +func (m *QueryOptions) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *QueryOptions) GetMinQueryIndex() uint64 { + if m != nil { + return m.MinQueryIndex + } + return 0 +} + +func (m *QueryOptions) GetMaxQueryTime() *duration.Duration { + if m != nil { + return m.MaxQueryTime + } + return nil +} + +func (m *QueryOptions) GetAllowStale() bool { + if m != nil { + return m.AllowStale + } + return false +} + +func (m *QueryOptions) GetRequireConsistent() bool { + if m != nil { + return m.RequireConsistent + } + return false +} + +func (m *QueryOptions) GetUseCache() bool { + if m != nil { + return m.UseCache + } + return false +} + +func (m *QueryOptions) GetMaxStaleDuration() *duration.Duration { + if m != nil { + return m.MaxStaleDuration + } + return nil +} + +func (m *QueryOptions) GetMaxAge() *duration.Duration { + if m != nil { + return m.MaxAge + } + return nil +} + +func (m *QueryOptions) GetMustRevalidate() bool { + if m != nil { + return m.MustRevalidate + } + return false +} + +func (m *QueryOptions) GetStaleIfError() *duration.Duration { + if m != nil { + return m.StaleIfError + } + return nil +} + +func (m *QueryOptions) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + // QueryMeta allows a query response to include potentially // useful metadata about a query +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryMeta +// output=common.gen.go +// name=Structs +// ignore-fields=NotModified,Backend,state,sizeCache,unknownFields type QueryMeta struct { // This is the index associated with the read Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` // If AllowStale is used, this is time elapsed since // last contact between the follower and leader. This // can be used to gauge staleness. - LastContact types.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + LastContact *duration.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact,omitempty"` // Used to indicate if there is a known leader node KnownLeader bool `protobuf:"varint,3,opt,name=KnownLeader,proto3" json:"KnownLeader,omitempty"` // Consistencylevel returns the consistency used to serve the query @@ -293,7 +404,10 @@ type QueryMeta struct { // ResultsFilteredByACLs is true when some of the query's results were // filtered out by enforcing ACLs. It may be false because nothing was // removed, or because the endpoint does not yet support this flag. - ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` + ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *QueryMeta) Reset() { *m = QueryMeta{} } @@ -302,26 +416,18 @@ func (*QueryMeta) ProtoMessage() {} func (*QueryMeta) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{5} } + func (m *QueryMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_QueryMeta.Unmarshal(m, b) } func (m *QueryMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) } func (m *QueryMeta) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryMeta.Merge(m, src) } func (m *QueryMeta) XXX_Size() int { - return m.Size() + return xxx_messageInfo_QueryMeta.Size(m) } func (m *QueryMeta) XXX_DiscardUnknown() { xxx_messageInfo_QueryMeta.DiscardUnknown(m) @@ -329,13 +435,51 @@ func (m *QueryMeta) XXX_DiscardUnknown() { var xxx_messageInfo_QueryMeta proto.InternalMessageInfo +func (m *QueryMeta) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *QueryMeta) GetLastContact() *duration.Duration { + if m != nil { + return m.LastContact + } + return nil +} + +func (m *QueryMeta) GetKnownLeader() bool { + if m != nil { + return m.KnownLeader + } + return false +} + +func (m *QueryMeta) GetConsistencyLevel() string { + if m != nil { + return m.ConsistencyLevel + } + return "" +} + +func (m *QueryMeta) GetResultsFilteredByACLs() bool { + if m != nil { + return m.ResultsFilteredByACLs + } + return false +} + // EnterpriseMeta contains metadata that is only used by the Enterprise version // of Consul. type EnterpriseMeta struct { // Namespace in which the entity exists. Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` // Partition in which the entity exists. - Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *EnterpriseMeta) Reset() { *m = EnterpriseMeta{} } @@ -344,26 +488,18 @@ func (*EnterpriseMeta) ProtoMessage() {} func (*EnterpriseMeta) Descriptor() ([]byte, []int) { return fileDescriptor_a6f5ac44994d718c, []int{6} } + func (m *EnterpriseMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_EnterpriseMeta.Unmarshal(m, b) } func (m *EnterpriseMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) } func (m *EnterpriseMeta) XXX_Merge(src proto.Message) { xxx_messageInfo_EnterpriseMeta.Merge(m, src) } func (m *EnterpriseMeta) XXX_Size() int { - return m.Size() + return xxx_messageInfo_EnterpriseMeta.Size(m) } func (m *EnterpriseMeta) XXX_DiscardUnknown() { xxx_messageInfo_EnterpriseMeta.DiscardUnknown(m) @@ -371,6 +507,20 @@ func (m *EnterpriseMeta) XXX_DiscardUnknown() { var xxx_messageInfo_EnterpriseMeta proto.InternalMessageInfo +func (m *EnterpriseMeta) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *EnterpriseMeta) GetPartition() string { + if m != nil { + return m.Partition + } + return "" +} + func init() { proto.RegisterType((*RaftIndex)(nil), "common.RaftIndex") proto.RegisterType((*TargetDatacenter)(nil), "common.TargetDatacenter") @@ -381,1632 +531,45 @@ func init() { proto.RegisterType((*EnterpriseMeta)(nil), "common.EnterpriseMeta") } -func init() { proto.RegisterFile("proto/pbcommon/common.proto", fileDescriptor_a6f5ac44994d718c) } +func init() { + proto.RegisterFile("proto/pbcommon/common.proto", fileDescriptor_a6f5ac44994d718c) +} var fileDescriptor_a6f5ac44994d718c = []byte{ - // 639 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, - 0x10, 0x8e, 0xfb, 0xa7, 0xae, 0xbd, 0x69, 0xab, 0xfc, 0xab, 0x82, 0x4c, 0x41, 0x6e, 0x65, 0x55, - 0xa8, 0xaa, 0x20, 0x96, 0x0a, 0x12, 0x12, 0xb7, 0x24, 0x2d, 0x52, 0xdb, 0x18, 0xe8, 0x52, 0x84, - 0xc4, 0x6d, 0x63, 0x4f, 0x1c, 0x0b, 0xc7, 0x6b, 0x76, 0xd7, 0x6d, 0x72, 0xe7, 0x01, 0x38, 0xf2, - 0x48, 0x3d, 0xf6, 0xc8, 0xa9, 0x82, 0xe6, 0x0d, 0x10, 0x0f, 0x80, 0xbc, 0x4e, 0x5b, 0x97, 0xa6, - 0x28, 0xa7, 0xe4, 0xfb, 0xf6, 0x9b, 0xd9, 0x99, 0xf9, 0x66, 0x8d, 0x1e, 0xa6, 0x9c, 0x49, 0xe6, - 0xa6, 0x5d, 0x9f, 0x0d, 0x06, 0x2c, 0x71, 0x8b, 0x9f, 0x86, 0x62, 0xb1, 0x5e, 0xa0, 0x55, 0x3b, - 0x64, 0x2c, 0x8c, 0xc1, 0x55, 0x6c, 0x37, 0xeb, 0xb9, 0x41, 0xc6, 0xa9, 0x8c, 0x2e, 0x75, 0xab, - 0x2b, 0x21, 0x0b, 0x59, 0x91, 0x28, 0xff, 0x57, 0xb0, 0xce, 0x00, 0x99, 0x84, 0xf6, 0xe4, 0x5e, - 0x12, 0xc0, 0x10, 0xbb, 0xa8, 0xd6, 0xe6, 0x40, 0x25, 0x28, 0x68, 0x69, 0xeb, 0xda, 0x66, 0xb5, - 0xb5, 0xf4, 0xeb, 0x7c, 0xcd, 0xec, 0xc2, 0x30, 0xe5, 0x2f, 0x9d, 0xa7, 0x0e, 0x29, 0x2b, 0xf2, - 0x00, 0x8f, 0x05, 0x51, 0x6f, 0x54, 0x04, 0xcc, 0x4d, 0x0d, 0x28, 0x29, 0x9c, 0x6d, 0x54, 0x3f, - 0xa2, 0x3c, 0x04, 0xb9, 0x43, 0x25, 0xf5, 0x21, 0x91, 0xc0, 0xb1, 0x8d, 0xd0, 0x35, 0x52, 0x97, - 0x9a, 0xa4, 0xc4, 0x38, 0x1b, 0x68, 0xf1, 0x03, 0x8f, 0x24, 0x10, 0xf8, 0x9c, 0x81, 0x90, 0x78, - 0x05, 0xcd, 0x1f, 0xb1, 0x4f, 0x90, 0x4c, 0xa4, 0x05, 0x70, 0x0e, 0x51, 0x8d, 0x00, 0x0d, 0xfe, - 0x29, 0xc2, 0x4f, 0xd0, 0xff, 0xb9, 0x20, 0xe2, 0xd0, 0x66, 0x89, 0x88, 0x84, 0x84, 0x44, 0xaa, - 0xaa, 0x0d, 0x72, 0xfb, 0xc0, 0xf9, 0x52, 0x45, 0x8b, 0x87, 0x19, 0xf0, 0xd1, 0x9b, 0x34, 0x9f, - 0xa3, 0xb8, 0x23, 0xe9, 0x06, 0x5a, 0xf2, 0xa2, 0x44, 0x09, 0x4b, 0x63, 0x20, 0x37, 0x49, 0xdc, - 0x46, 0x8b, 0x1e, 0x1d, 0x2a, 0xe2, 0x28, 0x1a, 0x80, 0xf5, 0xdf, 0xba, 0xb6, 0x59, 0xdb, 0x7e, - 0xd0, 0x28, 0x5c, 0x6b, 0x5c, 0xba, 0xd6, 0xd8, 0x99, 0xb8, 0xd6, 0xaa, 0x9e, 0x9e, 0xaf, 0x55, - 0xc8, 0x8d, 0xa0, 0x7c, 0x54, 0xcd, 0x38, 0x66, 0x27, 0xef, 0x24, 0x8d, 0xc1, 0xaa, 0xaa, 0xc2, - 0x4b, 0xcc, 0xf4, 0xfe, 0xe6, 0xef, 0xe8, 0x0f, 0xaf, 0x22, 0xe3, 0xbd, 0x80, 0x36, 0xf5, 0xfb, - 0x60, 0xe9, 0x4a, 0x74, 0x85, 0xf1, 0x01, 0xaa, 0x7b, 0x74, 0xa8, 0xb2, 0x5e, 0x56, 0x64, 0x2d, - 0xcc, 0x56, 0xf2, 0xad, 0x40, 0xfc, 0x02, 0xe9, 0x1e, 0x1d, 0x36, 0x43, 0xb0, 0x8c, 0xd9, 0x52, - 0x4c, 0xe4, 0xf8, 0x31, 0x5a, 0xf6, 0x32, 0x21, 0x09, 0x1c, 0xd3, 0x38, 0x0a, 0xa8, 0x04, 0xcb, - 0x54, 0x75, 0xfe, 0xc5, 0xe6, 0xc3, 0x55, 0x37, 0xee, 0xf5, 0x76, 0x39, 0x67, 0xdc, 0x42, 0x33, - 0x0e, 0xb7, 0x1c, 0x84, 0xef, 0x23, 0xfd, 0x55, 0x14, 0xe7, 0x3b, 0x58, 0x53, 0xf6, 0x4e, 0x90, - 0xf3, 0x5b, 0x43, 0xa6, 0xb2, 0xc0, 0x03, 0x49, 0xf3, 0x1d, 0x28, 0xbd, 0x0e, 0x52, 0x00, 0xdc, - 0x44, 0xb5, 0x0e, 0x15, 0xb2, 0xcd, 0x12, 0x49, 0xfd, 0x62, 0xa5, 0x66, 0xb8, 0xbf, 0x1c, 0x83, - 0xd7, 0x51, 0xed, 0x20, 0x61, 0x27, 0x49, 0x07, 0x68, 0x00, 0x5c, 0xed, 0x87, 0x41, 0xca, 0x14, - 0xde, 0x42, 0xf5, 0x2b, 0xf7, 0xfc, 0x51, 0x07, 0x8e, 0x21, 0x56, 0x3b, 0x60, 0x92, 0x5b, 0x3c, - 0x7e, 0x8e, 0xee, 0x11, 0x10, 0x59, 0x2c, 0x45, 0xd1, 0x05, 0x04, 0xad, 0x51, 0xb3, 0xdd, 0x11, - 0xca, 0x44, 0x83, 0x4c, 0x3f, 0xdc, 0xaf, 0x1a, 0xf3, 0x75, 0x7d, 0xbf, 0x6a, 0xe8, 0xf5, 0x05, - 0xa7, 0x83, 0x96, 0x77, 0xf3, 0xf7, 0x97, 0xf2, 0x48, 0x80, 0x6a, 0xfd, 0x11, 0x32, 0x5f, 0xd3, - 0x01, 0x88, 0x94, 0xfa, 0x30, 0x79, 0x02, 0xd7, 0x44, 0x7e, 0xfa, 0x96, 0x72, 0x19, 0xa9, 0x55, - 0x99, 0x2b, 0x4e, 0xaf, 0x88, 0x56, 0xe7, 0xf4, 0xa7, 0x5d, 0x39, 0xbd, 0xb0, 0xb5, 0xb3, 0x0b, - 0x5b, 0xfb, 0x71, 0x61, 0x6b, 0x5f, 0xc7, 0x76, 0xe5, 0xdb, 0xd8, 0xae, 0x9c, 0x8d, 0xed, 0xca, - 0xf7, 0xb1, 0x5d, 0xf9, 0xb8, 0x15, 0x46, 0xb2, 0x9f, 0x75, 0x1b, 0x3e, 0x1b, 0xb8, 0x7d, 0x2a, - 0xfa, 0x91, 0xcf, 0x78, 0xea, 0xfa, 0x2c, 0x11, 0x59, 0xec, 0xde, 0xfc, 0x00, 0x76, 0x75, 0x85, - 0x9f, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x69, 0xe7, 0xf4, 0x19, 0x05, 0x00, 0x00, + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x51, 0x6f, 0xd3, 0x30, + 0x10, 0x56, 0xb7, 0x2e, 0x4b, 0xae, 0x65, 0x2a, 0x16, 0xa0, 0x30, 0xd0, 0x54, 0x45, 0x13, 0x9a, + 0xa6, 0xa9, 0x11, 0x83, 0x37, 0xc4, 0x43, 0xd7, 0x15, 0x69, 0xa3, 0x61, 0xcc, 0x14, 0x21, 0xf1, + 0xe6, 0x26, 0xd7, 0xd6, 0x22, 0x8d, 0x83, 0xed, 0x6c, 0xed, 0x7f, 0x46, 0xfc, 0x06, 0x14, 0xa7, + 0xed, 0x52, 0xba, 0xad, 0x4f, 0xd1, 0xf7, 0xdd, 0xe7, 0xf3, 0xdd, 0x7d, 0xe7, 0xc0, 0xab, 0x54, + 0x0a, 0x2d, 0xfc, 0x74, 0x10, 0x8a, 0xc9, 0x44, 0x24, 0x7e, 0xf1, 0x69, 0x19, 0x96, 0x58, 0x05, + 0xda, 0x3f, 0x18, 0x09, 0x31, 0x8a, 0xd1, 0x37, 0xec, 0x20, 0x1b, 0xfa, 0x51, 0x26, 0x99, 0xe6, + 0x0b, 0x9d, 0x77, 0x05, 0x0e, 0x65, 0x43, 0x7d, 0x91, 0x44, 0x38, 0x25, 0x4d, 0xa8, 0x75, 0x24, + 0x32, 0x8d, 0x06, 0xba, 0x95, 0x66, 0xe5, 0xa8, 0x4a, 0xcb, 0x54, 0xae, 0x08, 0x44, 0xc4, 0x87, + 0xb3, 0x42, 0xb1, 0x55, 0x28, 0x4a, 0x94, 0x77, 0x0a, 0x8d, 0x3e, 0x93, 0x23, 0xd4, 0xe7, 0x4c, + 0xb3, 0x10, 0x13, 0x8d, 0x92, 0x1c, 0x00, 0xdc, 0x21, 0x93, 0xd6, 0xa1, 0x25, 0xc6, 0x3b, 0x84, + 0xfa, 0x0f, 0xc9, 0x35, 0x52, 0xfc, 0x9d, 0xa1, 0xd2, 0xe4, 0x19, 0xec, 0xf4, 0xc5, 0x2f, 0x4c, + 0xe6, 0xd2, 0x02, 0x78, 0xd7, 0x50, 0xa3, 0xc8, 0xa2, 0x47, 0x45, 0xe4, 0x04, 0x9e, 0xe6, 0x02, + 0x2e, 0xb1, 0x23, 0x12, 0xc5, 0x95, 0xc6, 0x44, 0x9b, 0x32, 0x6d, 0xba, 0x1e, 0xf0, 0xfe, 0x6c, + 0x43, 0xfd, 0x3a, 0x43, 0x39, 0xbb, 0x4a, 0xf3, 0x99, 0xa8, 0x07, 0x92, 0x1e, 0xc2, 0x93, 0x80, + 0x27, 0x46, 0x58, 0xee, 0x7b, 0x95, 0x24, 0x1f, 0xa1, 0x1e, 0xb0, 0xa9, 0x21, 0xfa, 0x7c, 0x82, + 0xee, 0x76, 0xb3, 0x72, 0x54, 0x3b, 0x7d, 0xd9, 0x2a, 0x1c, 0x68, 0x2d, 0x1c, 0x68, 0x9d, 0xcf, + 0x1d, 0xa0, 0x2b, 0xf2, 0x7c, 0x48, 0xed, 0x38, 0x16, 0xb7, 0xdf, 0x34, 0x8b, 0xd1, 0xad, 0x9a, + 0x92, 0x4b, 0xcc, 0xfd, 0x9d, 0xed, 0x3c, 0xd0, 0x19, 0xd9, 0x07, 0xfb, 0xbb, 0xc2, 0x0e, 0x0b, + 0xc7, 0xe8, 0x5a, 0x46, 0xb4, 0xc4, 0xa4, 0x0b, 0x8d, 0x80, 0x4d, 0x4d, 0xd6, 0x45, 0x2d, 0xee, + 0xee, 0xa6, 0x62, 0xd7, 0x8e, 0x90, 0xb7, 0x60, 0x05, 0x6c, 0xda, 0x1e, 0xa1, 0x6b, 0x6f, 0x3a, + 0x3c, 0x17, 0x92, 0x37, 0xb0, 0x17, 0x64, 0x4a, 0x53, 0xbc, 0x61, 0x31, 0x8f, 0x98, 0x46, 0xd7, + 0x31, 0xb5, 0xfd, 0xc7, 0xe6, 0xa3, 0x34, 0x77, 0x5d, 0x0c, 0xbb, 0x52, 0x0a, 0xe9, 0xc2, 0xc6, + 0x51, 0x96, 0xe5, 0xe4, 0x05, 0x58, 0x9f, 0x78, 0x9c, 0xef, 0x5a, 0xcd, 0xd8, 0x38, 0x47, 0xde, + 0xdf, 0x0a, 0x38, 0x66, 0xe0, 0x01, 0x6a, 0x96, 0x7b, 0x5d, 0xde, 0xf3, 0x02, 0x90, 0x0f, 0x50, + 0xeb, 0x31, 0xa5, 0x3b, 0x22, 0xd1, 0x2c, 0x2c, 0x56, 0xe7, 0xd1, 0x9b, 0xcb, 0xea, 0xfc, 0x79, + 0x7c, 0x4e, 0xc4, 0x6d, 0xd2, 0x43, 0x16, 0xa1, 0x34, 0x1b, 0x60, 0xd3, 0x32, 0x45, 0x8e, 0xa1, + 0xb1, 0x74, 0x29, 0x9c, 0xf5, 0xf0, 0x06, 0x63, 0xe3, 0xb5, 0x43, 0xd7, 0x78, 0xf2, 0x1e, 0x9e, + 0x53, 0x54, 0x59, 0xac, 0x55, 0x51, 0x3f, 0x46, 0x67, 0xb3, 0x76, 0xa7, 0xa7, 0x8c, 0x59, 0x36, + 0xbd, 0x3f, 0x78, 0x59, 0xb5, 0x77, 0x1a, 0xd6, 0x65, 0xd5, 0xb6, 0x1a, 0xbb, 0x5e, 0x0f, 0xf6, + 0xba, 0xf9, 0x0b, 0x4b, 0x25, 0x57, 0x68, 0x9a, 0x7e, 0x0d, 0xce, 0x17, 0x36, 0x41, 0x95, 0xb2, + 0x10, 0xe7, 0x4b, 0x7e, 0x47, 0xe4, 0xd1, 0xaf, 0x4c, 0x6a, 0x6e, 0x56, 0x62, 0xab, 0x88, 0x2e, + 0x89, 0xb3, 0x93, 0x9f, 0xc7, 0x23, 0xae, 0xc7, 0xd9, 0xa0, 0x15, 0x8a, 0x89, 0x3f, 0x66, 0x6a, + 0xcc, 0x43, 0x21, 0x53, 0x3f, 0x14, 0x89, 0xca, 0x62, 0x7f, 0xf5, 0x77, 0x34, 0xb0, 0x0c, 0x7e, + 0xf7, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x35, 0xe5, 0x62, 0x05, 0xa7, 0x04, 0x00, 0x00, } - -func (m *RaftIndex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RaftIndex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RaftIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ModifyIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.ModifyIndex)) - i-- - dAtA[i] = 0x10 - } - if m.CreateIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.CreateIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TargetDatacenter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TargetDatacenter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TargetDatacenter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RequireConsistent { - i-- - if m.RequireConsistent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0x5a - } - { - size, err := m.StaleIfError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - if m.MustRevalidate { - i-- - if m.MustRevalidate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - { - size, err := m.MaxAge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - { - size, err := m.MaxStaleDuration.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if m.UseCache { - i-- - if m.UseCache { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.RequireConsistent { - i-- - if m.RequireConsistent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.AllowStale { - i-- - if m.AllowStale { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - { - size, err := m.MaxQueryTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.MinQueryIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.MinQueryIndex)) - i-- - dAtA[i] = 0x10 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResultsFilteredByACLs { - i-- - if m.ResultsFilteredByACLs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.ConsistencyLevel) > 0 { - i -= len(m.ConsistencyLevel) - copy(dAtA[i:], m.ConsistencyLevel) - i = encodeVarintCommon(dAtA, i, uint64(len(m.ConsistencyLevel))) - i-- - dAtA[i] = 0x22 - } - if m.KnownLeader { - i-- - if m.KnownLeader { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - { - size, err := m.LastContact.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if m.Index != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *EnterpriseMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EnterpriseMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EnterpriseMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x12 - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { - offset -= sovCommon(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RaftIndex) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CreateIndex != 0 { - n += 1 + sovCommon(uint64(m.CreateIndex)) - } - if m.ModifyIndex != 0 { - n += 1 + sovCommon(uint64(m.ModifyIndex)) - } - return n -} - -func (m *TargetDatacenter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.RequireConsistent { - n += 2 - } - return n -} - -func (m *QueryOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.MinQueryIndex != 0 { - n += 1 + sovCommon(uint64(m.MinQueryIndex)) - } - l = m.MaxQueryTime.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.AllowStale { - n += 2 - } - if m.RequireConsistent { - n += 2 - } - if m.UseCache { - n += 2 - } - l = m.MaxStaleDuration.Size() - n += 1 + l + sovCommon(uint64(l)) - l = m.MaxAge.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.MustRevalidate { - n += 2 - } - l = m.StaleIfError.Size() - n += 1 + l + sovCommon(uint64(l)) - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *QueryMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovCommon(uint64(m.Index)) - } - l = m.LastContact.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.KnownLeader { - n += 2 - } - l = len(m.ConsistencyLevel) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.ResultsFilteredByACLs { - n += 2 - } - return n -} - -func (m *EnterpriseMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func sovCommon(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommon(x uint64) (n int) { - return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RaftIndex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RaftIndex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RaftIndex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateIndex", wireType) - } - m.CreateIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModifyIndex", wireType) - } - m.ModifyIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModifyIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TargetDatacenter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TargetDatacenter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TargetDatacenter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RequireConsistent = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinQueryIndex", wireType) - } - m.MinQueryIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinQueryIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxQueryTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxQueryTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowStale", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowStale = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RequireConsistent = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseCache", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseCache = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxStaleDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxStaleDuration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxAge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MustRevalidate", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MustRevalidate = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StaleIfError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StaleIfError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastContact", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastContact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KnownLeader", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.KnownLeader = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsistencyLevel", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsistencyLevel = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsFilteredByACLs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ResultsFilteredByACLs = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnterpriseMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnterpriseMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnterpriseMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommon(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommon - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommon - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommon - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbcommon/common.proto b/proto/pbcommon/common.proto index a3ec852b6..b38e77fda 100644 --- a/proto/pbcommon/common.proto +++ b/proto/pbcommon/common.proto @@ -5,23 +5,15 @@ package common; option go_package = "github.com/hashicorp/consul/proto/pbcommon"; import "google/protobuf/duration.proto"; -// Go Modules now includes the version in the filepath for packages within GOPATH/pkg/mode -// Therefore unless we want to hardcode a version here like -// github.com/gogo/protobuf@v1.3.0/gogoproto/gogo.proto then the only other choice is to -// have a more relative import and pass the right import path to protoc. I don't like it -// but its necessary. -import "gogoproto/gogo.proto"; -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; // RaftIndex is used to track the index used while creating // or modifying a given struct type. message RaftIndex { - uint64 CreateIndex = 1 [(gogoproto.moretags) = "bexpr:\"-\""]; - uint64 ModifyIndex = 2 [(gogoproto.moretags) = "bexpr:\"-\""]; + // @gotags: bexpr:"-" + uint64 CreateIndex = 1; + // @gotags: bexpr:"-" + uint64 ModifyIndex = 2; } // TargetDatacenter is intended to be used within other messages used for RPC routing @@ -53,6 +45,13 @@ message ReadRequest { // QueryOptions is used to specify various flags for read queries +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryOptions +// output=common.gen.go +// name=Structs +// ignore-fields=StaleIfError,AllowNotModifiedResponse,state,sizeCache,unknownFields message QueryOptions { // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. @@ -63,8 +62,8 @@ message QueryOptions { uint64 MinQueryIndex = 2; // Provided with MinQueryIndex to wait for change. - google.protobuf.Duration MaxQueryTime = 3 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration MaxQueryTime = 3; // If set, any follower can service the request. Results // may be arbitrarily stale. @@ -86,8 +85,8 @@ message QueryOptions { // If set and AllowStale is true, will try first a stale // read, and then will perform a consistent read if stale // read is older than value. - google.protobuf.Duration MaxStaleDuration = 7 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration MaxStaleDuration = 7; // MaxAge limits how old a cached value will be returned if UseCache is true. // If there is a cached response that is older than the MaxAge, it is treated @@ -96,8 +95,8 @@ message QueryOptions { // StaleIfError to a longer duration to change this behavior. It is ignored // if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. - google.protobuf.Duration MaxAge = 8 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration MaxAge = 8; // MustRevalidate forces the agent to fetch a fresh version of a cached // resource or at least validate that the cached version is still fresh. It is @@ -111,8 +110,7 @@ message QueryOptions { // UseCache is true and MaxAge is set to a lower, non-zero value. It is // ignored if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. - google.protobuf.Duration StaleIfError = 10 - [(gogoproto.nullable) = false]; + google.protobuf.Duration StaleIfError = 10; // Filter specifies the go-bexpr filter expression to be used for // filtering the data prior to returning a response @@ -121,6 +119,13 @@ message QueryOptions { // QueryMeta allows a query response to include potentially // useful metadata about a query +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryMeta +// output=common.gen.go +// name=Structs +// ignore-fields=NotModified,Backend,state,sizeCache,unknownFields message QueryMeta { // This is the index associated with the read uint64 Index = 1; @@ -128,8 +133,8 @@ message QueryMeta { // If AllowStale is used, this is time elapsed since // last contact between the follower and leader. This // can be used to gauge staleness. - google.protobuf.Duration LastContact = 2 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration LastContact = 2; // Used to indicate if there is a known leader node bool KnownLeader = 3; diff --git a/proto/pbcommon/common_oss.go b/proto/pbcommon/common_oss.go index 06c9dbdd3..e96c1a4b3 100644 --- a/proto/pbcommon/common_oss.go +++ b/proto/pbcommon/common_oss.go @@ -9,10 +9,16 @@ import ( var DefaultEnterpriseMeta = EnterpriseMeta{} -func EnterpriseMetaToStructs(_ *EnterpriseMeta) structs.EnterpriseMeta { - return *structs.DefaultEnterpriseMetaInDefaultPartition() -} - func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *EnterpriseMeta { return &EnterpriseMeta{} } +func EnterpriseMetaToStructs(s *EnterpriseMeta, t *structs.EnterpriseMeta) { + if s == nil { + return + } +} +func EnterpriseMetaFromStructs(t *structs.EnterpriseMeta, s *EnterpriseMeta) { + if s == nil { + return + } +} diff --git a/proto/pbcommongogo/common.gen.go b/proto/pbcommongogo/common.gen.go new file mode 100644 index 000000000..925ebb70d --- /dev/null +++ b/proto/pbcommongogo/common.gen.go @@ -0,0 +1,70 @@ +// Code generated by mog. DO NOT EDIT. + +package pbcommongogo + +import "github.com/hashicorp/consul/agent/structs" + +func QueryMetaToStructs(s *QueryMeta, t *structs.QueryMeta) { + if s == nil { + return + } + t.Index = s.Index + t.LastContact = structs.DurationFromProtoGogo(s.LastContact) + t.KnownLeader = s.KnownLeader + t.ConsistencyLevel = s.ConsistencyLevel + t.ResultsFilteredByACLs = s.ResultsFilteredByACLs +} +func QueryMetaFromStructs(t *structs.QueryMeta, s *QueryMeta) { + if s == nil { + return + } + s.Index = t.Index + s.LastContact = structs.DurationToProtoGogo(t.LastContact) + s.KnownLeader = t.KnownLeader + s.ConsistencyLevel = t.ConsistencyLevel + s.ResultsFilteredByACLs = t.ResultsFilteredByACLs +} +func QueryOptionsToStructs(s *QueryOptions, t *structs.QueryOptions) { + if s == nil { + return + } + t.Token = s.Token + t.MinQueryIndex = s.MinQueryIndex + t.MaxQueryTime = structs.DurationFromProtoGogo(s.MaxQueryTime) + t.AllowStale = s.AllowStale + t.RequireConsistent = s.RequireConsistent + t.UseCache = s.UseCache + t.MaxStaleDuration = structs.DurationFromProtoGogo(s.MaxStaleDuration) + t.MaxAge = structs.DurationFromProtoGogo(s.MaxAge) + t.MustRevalidate = s.MustRevalidate + t.Filter = s.Filter +} +func QueryOptionsFromStructs(t *structs.QueryOptions, s *QueryOptions) { + if s == nil { + return + } + s.Token = t.Token + s.MinQueryIndex = t.MinQueryIndex + s.MaxQueryTime = structs.DurationToProtoGogo(t.MaxQueryTime) + s.AllowStale = t.AllowStale + s.RequireConsistent = t.RequireConsistent + s.UseCache = t.UseCache + s.MaxStaleDuration = structs.DurationToProtoGogo(t.MaxStaleDuration) + s.MaxAge = structs.DurationToProtoGogo(t.MaxAge) + s.MustRevalidate = t.MustRevalidate + s.Filter = t.Filter +} +func RaftIndexToStructs(s *RaftIndex, t *structs.RaftIndex) { + if s == nil { + return + } + t.CreateIndex = s.CreateIndex + t.ModifyIndex = s.ModifyIndex +} +func RaftIndexFromStructs(t *structs.RaftIndex, s *RaftIndex) { + if s == nil { + return + } + s.CreateIndex = t.CreateIndex + s.ModifyIndex = t.ModifyIndex +} diff --git a/proto/pbcommongogo/common.go b/proto/pbcommongogo/common.go new file mode 100644 index 000000000..5cd6d4d64 --- /dev/null +++ b/proto/pbcommongogo/common.go @@ -0,0 +1,303 @@ +package pbcommongogo + +import ( + "time" + + "github.com/hashicorp/consul/agent/structs" +) + +// IsRead is always true for QueryOption +func (q *QueryOptions) IsRead() bool { + return true +} + +// AllowStaleRead returns whether a stale read should be allowed +func (q *QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +func (q *QueryOptions) TokenSecret() string { + return q.Token +} + +func (q *QueryOptions) SetTokenSecret(s string) { + q.Token = s +} + +// SetToken is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetToken(token string) { + q.Token = token +} + +// SetMinQueryIndex is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { + q.MinQueryIndex = minQueryIndex +} + +// SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { + q.MaxQueryTime = structs.DurationToProtoGogo(maxQueryTime) +} + +// SetAllowStale is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetAllowStale(allowStale bool) { + q.AllowStale = allowStale +} + +// SetRequireConsistent is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetRequireConsistent(requireConsistent bool) { + q.RequireConsistent = requireConsistent +} + +// SetUseCache is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetUseCache(useCache bool) { + q.UseCache = useCache +} + +// SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { + q.MaxStaleDuration = structs.DurationToProtoGogo(maxStaleDuration) +} + +// SetMaxAge is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { + q.MaxAge = structs.DurationToProtoGogo(maxAge) +} + +// SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { + q.MustRevalidate = mustRevalidate +} + +// SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { + q.StaleIfError = structs.DurationToProtoGogo(staleIfError) +} + +func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + maxTime := structs.DurationFromProtoGogo(q.MaxQueryTime) + o := structs.QueryOptions{ + MaxQueryTime: maxTime, + MinQueryIndex: q.MinQueryIndex, + } + return o.HasTimedOut(start, rpcHoldTimeout, maxQueryTime, defaultQueryTime) +} + +// SetFilter is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetFilter(filter string) { + q.Filter = filter +} + +// GetMaxQueryTime is required to implement blockingQueryOptions +func (q *QueryOptions) GetMaxQueryTime() (time.Duration, error) { + return structs.DurationFromProtoGogo(q.MaxQueryTime), nil +} + +// GetMinQueryIndex is required to implement blockingQueryOptions +func (q *QueryOptions) GetMinQueryIndex() uint64 { + if q != nil { + return q.MinQueryIndex + } + return 0 +} + +// GetRequireConsistent is required to implement blockingQueryOptions +func (q *QueryOptions) GetRequireConsistent() bool { + if q != nil { + return q.RequireConsistent + } + return false +} + +// GetToken is required to implement blockingQueryOptions +func (q *QueryOptions) GetToken() string { + if q != nil { + return q.Token + } + return "" +} + +// GetAllowStale is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetAllowStale() bool { + if q != nil { + return q.AllowStale + } + return false +} + +// GetFilter is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetFilter() string { + if q != nil { + return q.Filter + } + return "" +} + +// GetMaxAge is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetMaxAge() (time.Duration, error) { + if q != nil { + return structs.DurationFromProtoGogo(q.MaxAge), nil + } + return 0, nil +} + +// GetMaxStaleDuration is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { + if q != nil { + return structs.DurationFromProtoGogo(q.MaxStaleDuration), nil + } + return 0, nil +} + +// GetMustRevalidate is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetMustRevalidate() bool { + if q != nil { + return q.MustRevalidate + } + return false +} + +// GetStaleIfError is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetStaleIfError() (time.Duration, error) { + if q != nil { + return structs.DurationFromProtoGogo(q.StaleIfError), nil + } + return 0, nil +} + +// GetUseCache is required to implement structs.QueryOptionsCompat +func (q *QueryOptions) GetUseCache() bool { + if q != nil { + return q.UseCache + } + return false +} + +// SetLastContact is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetLastContact(lastContact time.Duration) { + q.LastContact = structs.DurationToProtoGogo(lastContact) +} + +// SetKnownLeader is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetKnownLeader(knownLeader bool) { + q.KnownLeader = knownLeader +} + +// SetIndex is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetIndex(index uint64) { + q.Index = index +} + +// SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { + q.ConsistencyLevel = consistencyLevel +} + +func (q *QueryMeta) GetBackend() structs.QueryBackend { + return structs.QueryBackend(0) +} + +// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { + q.ResultsFilteredByACLs = v +} + +// GetIndex is required to implement blockingQueryResponseMeta +func (q *QueryMeta) GetIndex() uint64 { + if q != nil { + return q.Index + } + return 0 +} + +// GetConsistencyLevel is required to implement structs.QueryMetaCompat +func (q *QueryMeta) GetConsistencyLevel() string { + if q != nil { + return q.ConsistencyLevel + } + return "" +} + +// GetKnownLeader is required to implement structs.QueryMetaCompat +func (q *QueryMeta) GetKnownLeader() bool { + if q != nil { + return q.KnownLeader + } + return false +} + +// GetLastContact is required to implement structs.QueryMetaCompat +func (q *QueryMeta) GetLastContact() (time.Duration, error) { + if q != nil { + return structs.DurationFromProtoGogo(q.LastContact), nil + } + return 0, nil +} + +// GetResultsFilteredByACLs is required to implement structs.QueryMetaCompat +func (q *QueryMeta) GetResultsFilteredByACLs() bool { + if q != nil { + return q.ResultsFilteredByACLs + } + return false +} + +// WriteRequest only applies to writes, always false +// +// IsRead implements structs.RPCInfo +func (w WriteRequest) IsRead() bool { + return false +} + +// SetTokenSecret implements structs.RPCInfo +func (w WriteRequest) TokenSecret() string { + return w.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (w *WriteRequest) SetTokenSecret(s string) { + w.Token = s +} + +// AllowStaleRead returns whether a stale read should be allowed +// +// AllowStaleRead implements structs.RPCInfo +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +// HasTimedOut implements structs.RPCInfo +func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// IsRead implements structs.RPCInfo +func (r *ReadRequest) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (r *ReadRequest) AllowStaleRead() bool { + // TODO(partitions): plumb this? + return false +} + +// TokenSecret implements structs.RPCInfo +func (r *ReadRequest) TokenSecret() string { + return r.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (r *ReadRequest) SetTokenSecret(token string) { + r.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// RequestDatacenter implements structs.RPCInfo +func (td TargetDatacenter) RequestDatacenter() string { + return td.Datacenter +} diff --git a/proto/pbcommongogo/common.pb.binary.go b/proto/pbcommongogo/common.pb.binary.go new file mode 100644 index 000000000..2e6b57496 --- /dev/null +++ b/proto/pbcommongogo/common.pb.binary.go @@ -0,0 +1,78 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto/pbcommongogo/common.proto + +package pbcommongogo + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *RaftIndex) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *RaftIndex) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *TargetDatacenter) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *TargetDatacenter) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *WriteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *WriteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReadRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReadRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *QueryOptions) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *QueryOptions) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *QueryMeta) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *QueryMeta) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *EnterpriseMeta) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *EnterpriseMeta) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbcommongogo/common.pb.go b/proto/pbcommongogo/common.pb.go new file mode 100644 index 000000000..07db0feaa --- /dev/null +++ b/proto/pbcommongogo/common.pb.go @@ -0,0 +1,2036 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: proto/pbcommongogo/common.proto + +package pbcommongogo + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + types "github.com/gogo/protobuf/types" + proto "github.com/golang/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.RaftIndex +// output=common.gen.go +// name=Structs +type RaftIndex struct { + CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` + ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` +} + +func (m *RaftIndex) Reset() { *m = RaftIndex{} } +func (m *RaftIndex) String() string { return proto.CompactTextString(m) } +func (*RaftIndex) ProtoMessage() {} +func (*RaftIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{0} +} +func (m *RaftIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RaftIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftIndex.Merge(m, src) +} +func (m *RaftIndex) XXX_Size() int { + return m.Size() +} +func (m *RaftIndex) XXX_DiscardUnknown() { + xxx_messageInfo_RaftIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftIndex proto.InternalMessageInfo + +// TargetDatacenter is intended to be used within other messages used for RPC routing +// amongst the various Consul datacenters +type TargetDatacenter struct { + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (m *TargetDatacenter) Reset() { *m = TargetDatacenter{} } +func (m *TargetDatacenter) String() string { return proto.CompactTextString(m) } +func (*TargetDatacenter) ProtoMessage() {} +func (*TargetDatacenter) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{1} +} +func (m *TargetDatacenter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TargetDatacenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TargetDatacenter) XXX_Merge(src proto.Message) { + xxx_messageInfo_TargetDatacenter.Merge(m, src) +} +func (m *TargetDatacenter) XXX_Size() int { + return m.Size() +} +func (m *TargetDatacenter) XXX_DiscardUnknown() { + xxx_messageInfo_TargetDatacenter.DiscardUnknown(m) +} + +var xxx_messageInfo_TargetDatacenter proto.InternalMessageInfo + +type WriteRequest struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{2} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +// ReadRequest is a type that may be embedded into any requests for read +// operations. +// It is a replacement for QueryOptions now that we no longer need any of those +// fields because we are moving away from using blocking queries. +// It is also similar to WriteRequest. It is a separate type so that in the +// future we can introduce fields that may only be relevant for reads. +type ReadRequest struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + // RequireConsistent indicates that the request must be sent to the leader. + RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{3} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +// QueryOptions is used to specify various flags for read queries +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryOptions +// output=common.gen.go +// name=Structs +// ignore-fields=StaleIfError,AllowNotModifiedResponse +type QueryOptions struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 `protobuf:"varint,2,opt,name=MinQueryIndex,proto3" json:"MinQueryIndex,omitempty"` + // Provided with MinQueryIndex to wait for change. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + MaxQueryTime types.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime"` + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool `protobuf:"varint,4,opt,name=AllowStale,proto3" json:"AllowStale,omitempty"` + // If set, the leader must verify leadership prior to + // servicing the request. Prevents a stale read. + RequireConsistent bool `protobuf:"varint,5,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` + // If set, the local agent may respond with an arbitrarily stale locally + // cached response. The semantics differ from AllowStale since the agent may + // be entirely partitioned from the servers and still considered "healthy" by + // operators. Stale responses from Servers are also arbitrarily stale, but can + // provide additional bounds on the last contact time from the leader. It's + // expected that servers that are partitioned are noticed and replaced in a + // timely way by operators while the same may not be true for client agents. + UseCache bool `protobuf:"varint,6,opt,name=UseCache,proto3" json:"UseCache,omitempty"` + // If set and AllowStale is true, will try first a stale + // read, and then will perform a consistent read if stale + // read is older than value. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + MaxStaleDuration types.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration"` + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + MaxAge types.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge"` + // MustRevalidate forces the agent to fetch a fresh version of a cached + // resource or at least validate that the cached version is still fresh. It is + // implied by either max-age=0 or must-revalidate Cache-Control headers. It + // only makes sense when UseCache is true. We store it since MaxAge = 0 is the + // default unset value. + MustRevalidate bool `protobuf:"varint,9,opt,name=MustRevalidate,proto3" json:"MustRevalidate,omitempty"` + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + StaleIfError types.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError"` + // Filter specifies the go-bexpr filter expression to be used for + // filtering the data prior to returning a response + Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` +} + +func (m *QueryOptions) Reset() { *m = QueryOptions{} } +func (m *QueryOptions) String() string { return proto.CompactTextString(m) } +func (*QueryOptions) ProtoMessage() {} +func (*QueryOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{4} +} +func (m *QueryOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOptions.Merge(m, src) +} +func (m *QueryOptions) XXX_Size() int { + return m.Size() +} +func (m *QueryOptions) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOptions proto.InternalMessageInfo + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryMeta +// output=common.gen.go +// name=Structs +// ignore-fields=NotModified,Backend +type QueryMeta struct { + // This is the index associated with the read + Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + LastContact types.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact"` + // Used to indicate if there is a known leader node + KnownLeader bool `protobuf:"varint,3,opt,name=KnownLeader,proto3" json:"KnownLeader,omitempty"` + // Consistencylevel returns the consistency used to serve the query + // Having `discovery_max_stale` on the agent can affect whether + // the request was served by a leader. + ConsistencyLevel string `protobuf:"bytes,4,opt,name=ConsistencyLevel,proto3" json:"ConsistencyLevel,omitempty"` + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` +} + +func (m *QueryMeta) Reset() { *m = QueryMeta{} } +func (m *QueryMeta) String() string { return proto.CompactTextString(m) } +func (*QueryMeta) ProtoMessage() {} +func (*QueryMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{5} +} +func (m *QueryMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryMeta.Merge(m, src) +} +func (m *QueryMeta) XXX_Size() int { + return m.Size() +} +func (m *QueryMeta) XXX_DiscardUnknown() { + xxx_messageInfo_QueryMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryMeta proto.InternalMessageInfo + +// EnterpriseMeta contains metadata that is only used by the Enterprise version +// of Consul. +type EnterpriseMeta struct { + // Namespace in which the entity exists. + Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + // Partition in which the entity exists. + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` +} + +func (m *EnterpriseMeta) Reset() { *m = EnterpriseMeta{} } +func (m *EnterpriseMeta) String() string { return proto.CompactTextString(m) } +func (*EnterpriseMeta) ProtoMessage() {} +func (*EnterpriseMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_a834024536145257, []int{6} +} +func (m *EnterpriseMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnterpriseMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnterpriseMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnterpriseMeta.Merge(m, src) +} +func (m *EnterpriseMeta) XXX_Size() int { + return m.Size() +} +func (m *EnterpriseMeta) XXX_DiscardUnknown() { + xxx_messageInfo_EnterpriseMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_EnterpriseMeta proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RaftIndex)(nil), "commongogo.RaftIndex") + proto.RegisterType((*TargetDatacenter)(nil), "commongogo.TargetDatacenter") + proto.RegisterType((*WriteRequest)(nil), "commongogo.WriteRequest") + proto.RegisterType((*ReadRequest)(nil), "commongogo.ReadRequest") + proto.RegisterType((*QueryOptions)(nil), "commongogo.QueryOptions") + proto.RegisterType((*QueryMeta)(nil), "commongogo.QueryMeta") + proto.RegisterType((*EnterpriseMeta)(nil), "commongogo.EnterpriseMeta") +} + +func init() { proto.RegisterFile("proto/pbcommongogo/common.proto", fileDescriptor_a834024536145257) } + +var fileDescriptor_a834024536145257 = []byte{ + // 639 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x4d, 0x4a, 0x9a, 0x26, 0x93, 0xb6, 0x0a, 0xab, 0x82, 0x4c, 0x85, 0xdc, 0xca, 0xaa, 0x50, + 0x85, 0x20, 0x96, 0x0a, 0x12, 0x12, 0xb7, 0x24, 0x2d, 0x52, 0xdb, 0x18, 0xda, 0xa5, 0x08, 0x89, + 0xdb, 0xc6, 0x9e, 0x38, 0x16, 0x8e, 0xd7, 0xec, 0xae, 0xdb, 0xe4, 0xce, 0x07, 0x70, 0xe4, 0x93, + 0x7a, 0xec, 0x91, 0x53, 0x05, 0xcd, 0x1f, 0x20, 0x3e, 0x00, 0x79, 0x9d, 0xb6, 0x2e, 0x69, 0x51, + 0x6e, 0x9e, 0x37, 0xef, 0xed, 0xce, 0xcc, 0x9b, 0x35, 0xac, 0xc5, 0x82, 0x2b, 0x6e, 0xc7, 0x5d, + 0x97, 0x0f, 0x06, 0x3c, 0xf2, 0xb9, 0xcf, 0xed, 0xec, 0xb3, 0xa1, 0x33, 0x04, 0xae, 0x13, 0xab, + 0xa6, 0xcf, 0xb9, 0x1f, 0xa2, 0xad, 0x33, 0xdd, 0xa4, 0x67, 0x7b, 0x89, 0x60, 0x2a, 0xb8, 0xe4, + 0xae, 0xae, 0xa4, 0xac, 0xec, 0xc0, 0xf4, 0x2b, 0x43, 0xad, 0x01, 0x54, 0x29, 0xeb, 0xa9, 0xdd, + 0xc8, 0xc3, 0x21, 0xb1, 0xa1, 0xd6, 0x16, 0xc8, 0x14, 0xea, 0xd0, 0x28, 0xae, 0x17, 0x37, 0x4b, + 0xad, 0xa5, 0xdf, 0xe7, 0x6b, 0xd5, 0x2e, 0x0e, 0x63, 0xf1, 0xda, 0x7a, 0x6e, 0xd1, 0x3c, 0x23, + 0x15, 0x38, 0xdc, 0x0b, 0x7a, 0xa3, 0x4c, 0x30, 0x77, 0xab, 0x20, 0xc7, 0xb0, 0xb6, 0xa0, 0x7e, + 0xc4, 0x84, 0x8f, 0x6a, 0x9b, 0x29, 0xe6, 0x62, 0xa4, 0x50, 0x10, 0x13, 0xe0, 0x3a, 0xd2, 0x97, + 0x56, 0x69, 0x0e, 0xb1, 0x36, 0x60, 0xf1, 0xa3, 0x08, 0x14, 0x52, 0xfc, 0x92, 0xa0, 0x54, 0x64, + 0x05, 0xe6, 0x8f, 0xf8, 0x67, 0x8c, 0x26, 0xd4, 0x2c, 0xb0, 0x0e, 0xa1, 0x46, 0x91, 0x79, 0xff, + 0x25, 0x91, 0x67, 0x70, 0x3f, 0x25, 0x04, 0x02, 0xdb, 0x3c, 0x92, 0x81, 0x54, 0x18, 0x29, 0x5d, + 0x75, 0x85, 0x4e, 0x27, 0xac, 0xaf, 0x25, 0x58, 0x3c, 0x4c, 0x50, 0x8c, 0xde, 0xc5, 0xe9, 0x1c, + 0xe5, 0x1d, 0x87, 0x6e, 0xc0, 0x92, 0x13, 0x44, 0x9a, 0x98, 0x1b, 0x03, 0xbd, 0x09, 0x92, 0x36, + 0x2c, 0x3a, 0x6c, 0xa8, 0x81, 0xa3, 0x60, 0x80, 0xc6, 0xbd, 0xf5, 0xe2, 0x66, 0x6d, 0xeb, 0x51, + 0x23, 0x73, 0xad, 0x71, 0xe9, 0x5a, 0x63, 0x7b, 0xe2, 0x5a, 0xab, 0x74, 0x7a, 0xbe, 0x56, 0xa0, + 0x37, 0x44, 0xe9, 0xa8, 0x9a, 0x61, 0xc8, 0x4f, 0xde, 0x2b, 0x16, 0xa2, 0x51, 0xd2, 0x85, 0xe7, + 0x90, 0xdb, 0xfb, 0x9b, 0xbf, 0xa3, 0x3f, 0xb2, 0x0a, 0x95, 0x0f, 0x12, 0xdb, 0xcc, 0xed, 0xa3, + 0x51, 0xd6, 0xa4, 0xab, 0x98, 0xec, 0x43, 0xdd, 0x61, 0x43, 0x7d, 0xea, 0x65, 0x45, 0xc6, 0xc2, + 0x6c, 0x25, 0x4f, 0x09, 0xc9, 0x2b, 0x28, 0x3b, 0x6c, 0xd8, 0xf4, 0xd1, 0xa8, 0xcc, 0x76, 0xc4, + 0x84, 0x4e, 0x9e, 0xc0, 0xb2, 0x93, 0x48, 0x45, 0xf1, 0x98, 0x85, 0x81, 0xc7, 0x14, 0x1a, 0x55, + 0x5d, 0xe7, 0x3f, 0x68, 0x3a, 0x5c, 0x7d, 0xe3, 0x6e, 0x6f, 0x47, 0x08, 0x2e, 0x0c, 0x98, 0x71, + 0xb8, 0x79, 0x11, 0x79, 0x08, 0xe5, 0x37, 0x41, 0x98, 0xee, 0x60, 0x4d, 0xdb, 0x3b, 0x89, 0xac, + 0x3f, 0x45, 0xa8, 0x6a, 0x0b, 0x1c, 0x54, 0x2c, 0xdd, 0x81, 0xdc, 0xeb, 0xa0, 0x59, 0x40, 0x9a, + 0x50, 0xeb, 0x30, 0xa9, 0xda, 0x3c, 0x52, 0xcc, 0xcd, 0x56, 0x6a, 0x86, 0xfb, 0xf3, 0x1a, 0xb2, + 0x0e, 0xb5, 0xfd, 0x88, 0x9f, 0x44, 0x1d, 0x64, 0x1e, 0x0a, 0xbd, 0x1f, 0x15, 0x9a, 0x87, 0xc8, + 0x53, 0xa8, 0x5f, 0xb9, 0xe7, 0x8e, 0x3a, 0x78, 0x8c, 0xa1, 0xde, 0x81, 0x2a, 0x9d, 0xc2, 0xc9, + 0x4b, 0x78, 0x40, 0x51, 0x26, 0xa1, 0x92, 0x59, 0x17, 0xe8, 0xb5, 0x46, 0xcd, 0x76, 0x47, 0x6a, + 0x13, 0x2b, 0xf4, 0xf6, 0xe4, 0x5e, 0xa9, 0x32, 0x5f, 0x2f, 0xef, 0x95, 0x2a, 0xe5, 0xfa, 0x82, + 0xd5, 0x81, 0xe5, 0x9d, 0xf4, 0xfd, 0xc5, 0x22, 0x90, 0xa8, 0x5b, 0x7f, 0x0c, 0xd5, 0xb7, 0x6c, + 0x80, 0x32, 0x66, 0x2e, 0x4e, 0x9e, 0xc0, 0x35, 0x90, 0x66, 0x0f, 0x98, 0x50, 0x81, 0x5e, 0x95, + 0xb9, 0x2c, 0x7b, 0x05, 0xb4, 0x0e, 0x4e, 0x7f, 0x99, 0x85, 0xd3, 0x0b, 0xb3, 0x78, 0x76, 0x61, + 0x16, 0x7f, 0x5e, 0x98, 0xc5, 0x6f, 0x63, 0xb3, 0xf0, 0x7d, 0x6c, 0x16, 0xce, 0xc6, 0x66, 0xe1, + 0xc7, 0xd8, 0x2c, 0x7c, 0x6a, 0xf8, 0x81, 0xea, 0x27, 0xdd, 0x86, 0xcb, 0x07, 0x76, 0x9f, 0xc9, + 0x7e, 0xe0, 0x72, 0x11, 0xdb, 0x2e, 0x8f, 0x64, 0x12, 0xda, 0xd3, 0x3f, 0xc2, 0x6e, 0x59, 0x63, + 0x2f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x17, 0x8b, 0xc5, 0x82, 0x25, 0x05, 0x00, 0x00, +} + +func (m *RaftIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RaftIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ModifyIndex != 0 { + i = encodeVarintCommon(dAtA, i, uint64(m.ModifyIndex)) + i-- + dAtA[i] = 0x10 + } + if m.CreateIndex != 0 { + i = encodeVarintCommon(dAtA, i, uint64(m.CreateIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TargetDatacenter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TargetDatacenter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TargetDatacenter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Datacenter) > 0 { + i -= len(m.Datacenter) + copy(dAtA[i:], m.Datacenter) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Datacenter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Token) > 0 { + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RequireConsistent { + i-- + if m.RequireConsistent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Token) > 0 { + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Filter))) + i-- + dAtA[i] = 0x5a + } + { + size, err := m.StaleIfError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.MustRevalidate { + i-- + if m.MustRevalidate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + { + size, err := m.MaxAge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + { + size, err := m.MaxStaleDuration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if m.UseCache { + i-- + if m.UseCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.RequireConsistent { + i-- + if m.RequireConsistent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.AllowStale { + i-- + if m.AllowStale { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + { + size, err := m.MaxQueryTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.MinQueryIndex != 0 { + i = encodeVarintCommon(dAtA, i, uint64(m.MinQueryIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.Token) > 0 { + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResultsFilteredByACLs { + i-- + if m.ResultsFilteredByACLs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.ConsistencyLevel) > 0 { + i -= len(m.ConsistencyLevel) + copy(dAtA[i:], m.ConsistencyLevel) + i = encodeVarintCommon(dAtA, i, uint64(len(m.ConsistencyLevel))) + i-- + dAtA[i] = 0x22 + } + if m.KnownLeader { + i-- + if m.KnownLeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + { + size, err := m.LastContact.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Index != 0 { + i = encodeVarintCommon(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnterpriseMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnterpriseMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnterpriseMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Partition) > 0 { + i -= len(m.Partition) + copy(dAtA[i:], m.Partition) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Partition))) + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { + offset -= sovCommon(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RaftIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CreateIndex != 0 { + n += 1 + sovCommon(uint64(m.CreateIndex)) + } + if m.ModifyIndex != 0 { + n += 1 + sovCommon(uint64(m.ModifyIndex)) + } + return n +} + +func (m *TargetDatacenter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Datacenter) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + if m.RequireConsistent { + n += 2 + } + return n +} + +func (m *QueryOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + if m.MinQueryIndex != 0 { + n += 1 + sovCommon(uint64(m.MinQueryIndex)) + } + l = m.MaxQueryTime.Size() + n += 1 + l + sovCommon(uint64(l)) + if m.AllowStale { + n += 2 + } + if m.RequireConsistent { + n += 2 + } + if m.UseCache { + n += 2 + } + l = m.MaxStaleDuration.Size() + n += 1 + l + sovCommon(uint64(l)) + l = m.MaxAge.Size() + n += 1 + l + sovCommon(uint64(l)) + if m.MustRevalidate { + n += 2 + } + l = m.StaleIfError.Size() + n += 1 + l + sovCommon(uint64(l)) + l = len(m.Filter) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func (m *QueryMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovCommon(uint64(m.Index)) + } + l = m.LastContact.Size() + n += 1 + l + sovCommon(uint64(l)) + if m.KnownLeader { + n += 2 + } + l = len(m.ConsistencyLevel) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + if m.ResultsFilteredByACLs { + n += 2 + } + return n +} + +func (m *EnterpriseMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + l = len(m.Partition) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func sovCommon(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCommon(x uint64) (n int) { + return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RaftIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateIndex", wireType) + } + m.CreateIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModifyIndex", wireType) + } + m.ModifyIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModifyIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TargetDatacenter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TargetDatacenter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TargetDatacenter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Datacenter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequireConsistent = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinQueryIndex", wireType) + } + m.MinQueryIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinQueryIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxQueryTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxQueryTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowStale", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowStale = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequireConsistent = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseCache = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxStaleDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxStaleDuration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxAge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MustRevalidate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MustRevalidate = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleIfError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StaleIfError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastContact", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastContact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KnownLeader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KnownLeader = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsistencyLevel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsistencyLevel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsFilteredByACLs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResultsFilteredByACLs = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnterpriseMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnterpriseMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnterpriseMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCommon(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCommon + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCommon + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCommon + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/pbcommongogo/common.proto b/proto/pbcommongogo/common.proto new file mode 100644 index 000000000..30cd847f3 --- /dev/null +++ b/proto/pbcommongogo/common.proto @@ -0,0 +1,182 @@ +syntax = "proto3"; + +package commongogo; + +option go_package = "github.com/hashicorp/consul/proto/pbcommongogo"; + +import "google/protobuf/duration.proto"; +// Go Modules now includes the version in the filepath for packages within GOPATH/pkg/mode +// Therefore unless we want to hardcode a version here like +// github.com/gogo/protobuf@v1.3.0/gogoproto/gogo.proto then the only other choice is to +// have a more relative import and pass the right import path to protoc. I don't like it +// but its necessary. +import "gogoproto/gogo.proto"; + +option (gogoproto.goproto_unkeyed_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_sizecache_all) = false; + +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.RaftIndex +// output=common.gen.go +// name=Structs +message RaftIndex { + uint64 CreateIndex = 1 [(gogoproto.moretags) = "bexpr:\"-\""]; + uint64 ModifyIndex = 2 [(gogoproto.moretags) = "bexpr:\"-\""]; +} + +// TargetDatacenter is intended to be used within other messages used for RPC routing +// amongst the various Consul datacenters +message TargetDatacenter { + string Datacenter = 1; +} + +message WriteRequest { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + string Token = 1; +} + +// ReadRequest is a type that may be embedded into any requests for read +// operations. +// It is a replacement for QueryOptions now that we no longer need any of those +// fields because we are moving away from using blocking queries. +// It is also similar to WriteRequest. It is a separate type so that in the +// future we can introduce fields that may only be relevant for reads. +message ReadRequest { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + string Token = 1; + + // RequireConsistent indicates that the request must be sent to the leader. + bool RequireConsistent = 2; +} + + +// QueryOptions is used to specify various flags for read queries +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryOptions +// output=common.gen.go +// name=Structs +// ignore-fields=StaleIfError,AllowNotModifiedResponse +message QueryOptions { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + string Token = 1; + + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + uint64 MinQueryIndex = 2; + + // Provided with MinQueryIndex to wait for change. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + google.protobuf.Duration MaxQueryTime = 3 + [(gogoproto.nullable) = false]; + + // If set, any follower can service the request. Results + // may be arbitrarily stale. + bool AllowStale = 4; + + // If set, the leader must verify leadership prior to + // servicing the request. Prevents a stale read. + bool RequireConsistent = 5; + + // If set, the local agent may respond with an arbitrarily stale locally + // cached response. The semantics differ from AllowStale since the agent may + // be entirely partitioned from the servers and still considered "healthy" by + // operators. Stale responses from Servers are also arbitrarily stale, but can + // provide additional bounds on the last contact time from the leader. It's + // expected that servers that are partitioned are noticed and replaced in a + // timely way by operators while the same may not be true for client agents. + bool UseCache = 6; + + // If set and AllowStale is true, will try first a stale + // read, and then will perform a consistent read if stale + // read is older than value. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + google.protobuf.Duration MaxStaleDuration = 7 + [(gogoproto.nullable) = false]; + + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + google.protobuf.Duration MaxAge = 8 + [(gogoproto.nullable) = false]; + + // MustRevalidate forces the agent to fetch a fresh version of a cached + // resource or at least validate that the cached version is still fresh. It is + // implied by either max-age=0 or must-revalidate Cache-Control headers. It + // only makes sense when UseCache is true. We store it since MaxAge = 0 is the + // default unset value. + bool MustRevalidate = 9; + + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + google.protobuf.Duration StaleIfError = 10 + [(gogoproto.nullable) = false]; + + // Filter specifies the go-bexpr filter expression to be used for + // filtering the data prior to returning a response + string Filter = 11; +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryMeta +// output=common.gen.go +// name=Structs +// ignore-fields=NotModified,Backend +message QueryMeta { + // This is the index associated with the read + uint64 Index = 1; + + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo + google.protobuf.Duration LastContact = 2 + [(gogoproto.nullable) = false]; + + // Used to indicate if there is a known leader node + bool KnownLeader = 3; + + // Consistencylevel returns the consistency used to serve the query + // Having `discovery_max_stale` on the agent can affect whether + // the request was served by a leader. + string ConsistencyLevel = 4; + + // Reserved for NotModified and Backend. + reserved 5, 6; + + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + bool ResultsFilteredByACLs = 7; +} + +// EnterpriseMeta contains metadata that is only used by the Enterprise version +// of Consul. +message EnterpriseMeta { + // Namespace in which the entity exists. + string Namespace = 1; + // Partition in which the entity exists. + string Partition = 2; +} diff --git a/proto/pbcommongogo/common_oss.go b/proto/pbcommongogo/common_oss.go new file mode 100644 index 000000000..d24b27b69 --- /dev/null +++ b/proto/pbcommongogo/common_oss.go @@ -0,0 +1,25 @@ +//go:build !consulent +// +build !consulent + +package pbcommongogo + +import ( + "github.com/hashicorp/consul/agent/structs" +) + +var DefaultEnterpriseMeta = EnterpriseMeta{} + +func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *EnterpriseMeta { + return &EnterpriseMeta{} +} + +func EnterpriseMetaToStructs(s *EnterpriseMeta, t *structs.EnterpriseMeta) { + if s == nil { + return + } +} +func EnterpriseMetaFromStructs(t *structs.EnterpriseMeta, s *EnterpriseMeta) { + if s == nil { + return + } +} diff --git a/proto/pbconnect/connect.gen.go b/proto/pbconnect/connect.gen.go new file mode 100644 index 000000000..433a065b7 --- /dev/null +++ b/proto/pbconnect/connect.gen.go @@ -0,0 +1,116 @@ +// Code generated by mog. DO NOT EDIT. + +package pbconnect + +import "github.com/hashicorp/consul/agent/structs" + +func CARootToStructsCARoot(s *CARoot, t *structs.CARoot) { + if s == nil { + return + } + t.ID = s.ID + t.Name = s.Name + t.SerialNumber = s.SerialNumber + t.SigningKeyID = s.SigningKeyID + t.ExternalTrustDomain = s.ExternalTrustDomain + t.NotBefore = structs.TimeFromProtoGogo(s.NotBefore) + t.NotAfter = structs.TimeFromProtoGogo(s.NotAfter) + t.RootCert = s.RootCert + t.IntermediateCerts = s.IntermediateCerts + t.SigningCert = s.SigningCert + t.SigningKey = s.SigningKey + t.Active = s.Active + t.RotatedOutAt = structs.TimeFromProtoGogo(s.RotatedOutAt) + t.PrivateKeyType = s.PrivateKeyType + t.PrivateKeyBits = int(s.PrivateKeyBits) + t.RaftIndex = RaftIndexTo(s.RaftIndex) +} +func CARootFromStructsCARoot(t *structs.CARoot, s *CARoot) { + if s == nil { + return + } + s.ID = t.ID + s.Name = t.Name + s.SerialNumber = t.SerialNumber + s.SigningKeyID = t.SigningKeyID + s.ExternalTrustDomain = t.ExternalTrustDomain + s.NotBefore = structs.TimeToProtoGogo(t.NotBefore) + s.NotAfter = structs.TimeToProtoGogo(t.NotAfter) + s.RootCert = t.RootCert + s.IntermediateCerts = t.IntermediateCerts + s.SigningCert = t.SigningCert + s.SigningKey = t.SigningKey + s.Active = t.Active + s.RotatedOutAt = structs.TimeToProtoGogo(t.RotatedOutAt) + s.PrivateKeyType = t.PrivateKeyType + s.PrivateKeyBits = int32(t.PrivateKeyBits) + s.RaftIndex = RaftIndexFrom(t.RaftIndex) +} +func CARootsToStructsIndexedCARoots(s *CARoots, t *structs.IndexedCARoots) { + if s == nil { + return + } + t.ActiveRootID = s.ActiveRootID + t.TrustDomain = s.TrustDomain + { + t.Roots = make([]*structs.CARoot, len(s.Roots)) + for i := range s.Roots { + if s.Roots[i] != nil { + var x structs.CARoot + CARootToStructsCARoot(s.Roots[i], &x) + t.Roots[i] = &x + } + } + } + t.QueryMeta = QueryMetaTo(s.QueryMeta) +} +func CARootsFromStructsIndexedCARoots(t *structs.IndexedCARoots, s *CARoots) { + if s == nil { + return + } + s.ActiveRootID = t.ActiveRootID + s.TrustDomain = t.TrustDomain + { + s.Roots = make([]*CARoot, len(t.Roots)) + for i := range t.Roots { + if t.Roots[i] != nil { + var x CARoot + CARootFromStructsCARoot(t.Roots[i], &x) + s.Roots[i] = &x + } + } + } + s.QueryMeta = QueryMetaFrom(t.QueryMeta) +} +func IssuedCertToStructsIssuedCert(s *IssuedCert, t *structs.IssuedCert) { + if s == nil { + return + } + t.SerialNumber = s.SerialNumber + t.CertPEM = s.CertPEM + t.PrivateKeyPEM = s.PrivateKeyPEM + t.Service = s.Service + t.ServiceURI = s.ServiceURI + t.Agent = s.Agent + t.AgentURI = s.AgentURI + t.ValidAfter = structs.TimeFromProtoGogo(s.ValidAfter) + t.ValidBefore = structs.TimeFromProtoGogo(s.ValidBefore) + t.EnterpriseMeta = EnterpriseMetaTo(s.EnterpriseMeta) + t.RaftIndex = RaftIndexTo(s.RaftIndex) +} +func IssuedCertFromStructsIssuedCert(t *structs.IssuedCert, s *IssuedCert) { + if s == nil { + return + } + s.SerialNumber = t.SerialNumber + s.CertPEM = t.CertPEM + s.PrivateKeyPEM = t.PrivateKeyPEM + s.Service = t.Service + s.ServiceURI = t.ServiceURI + s.Agent = t.Agent + s.AgentURI = t.AgentURI + s.ValidAfter = structs.TimeToProtoGogo(t.ValidAfter) + s.ValidBefore = structs.TimeToProtoGogo(t.ValidBefore) + s.EnterpriseMeta = EnterpriseMetaFrom(t.EnterpriseMeta) + s.RaftIndex = RaftIndexFrom(t.RaftIndex) +} diff --git a/proto/pbconnect/connect.go b/proto/pbconnect/connect.go index f499ecc31..c61ca7c8a 100644 --- a/proto/pbconnect/connect.go +++ b/proto/pbconnect/connect.go @@ -2,184 +2,77 @@ package pbconnect import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommon" - "github.com/hashicorp/consul/proto/pbutil" + "github.com/hashicorp/consul/proto/pbcommongogo" ) -func CARootsToStructs(s *CARoots) (*structs.IndexedCARoots, error) { - if s == nil { - return nil, nil - } - var t structs.IndexedCARoots - t.ActiveRootID = s.ActiveRootID - t.TrustDomain = s.TrustDomain - t.Roots = make([]*structs.CARoot, len(s.Roots)) - for i := range s.Roots { - root, err := CARootToStructs(s.Roots[i]) - if err != nil { - return &t, err - } - t.Roots[i] = root - } - queryMeta, err := pbcommon.QueryMetaToStructs(s.QueryMeta) - if err != nil { - return &t, nil - } - t.QueryMeta = queryMeta - return &t, nil +func QueryMetaFrom(f structs.QueryMeta) *pbcommongogo.QueryMeta { + t := new(pbcommongogo.QueryMeta) + pbcommongogo.QueryMetaFromStructs(&f, t) + return t } -func NewCARootsFromStructs(s *structs.IndexedCARoots) (*CARoots, error) { - if s == nil { - return nil, nil - } - var t CARoots - t.ActiveRootID = s.ActiveRootID - t.TrustDomain = s.TrustDomain - t.Roots = make([]*CARoot, len(s.Roots)) - for i := range s.Roots { - root, err := NewCARootFromStructs(s.Roots[i]) - if err != nil { - return &t, err - } - t.Roots[i] = root - } - queryMeta, err := pbcommon.NewQueryMetaFromStructs(s.QueryMeta) - if err != nil { - return &t, nil - } - t.QueryMeta = queryMeta - return &t, nil +func QueryMetaTo(f *pbcommongogo.QueryMeta) structs.QueryMeta { + t := new(structs.QueryMeta) + pbcommongogo.QueryMetaToStructs(f, t) + return *t } -func CARootToStructs(s *CARoot) (*structs.CARoot, error) { - if s == nil { - return nil, nil - } - var t structs.CARoot - t.ID = s.ID - t.Name = s.Name - t.SerialNumber = s.SerialNumber - t.SigningKeyID = s.SigningKeyID - t.ExternalTrustDomain = s.ExternalTrustDomain - notBefore, err := pbutil.TimeFromProto(s.NotBefore) - if err != nil { - return &t, nil - } - t.NotBefore = notBefore - notAfter, err := pbutil.TimeFromProto(s.NotAfter) - if err != nil { - return &t, nil - } - t.NotAfter = notAfter - t.RootCert = s.RootCert - if len(s.IntermediateCerts) > 0 { - t.IntermediateCerts = make([]string, len(s.IntermediateCerts)) - copy(t.IntermediateCerts, s.IntermediateCerts) - } - t.SigningCert = s.SigningCert - t.SigningKey = s.SigningKey - t.Active = s.Active - rotatedOutAt, err := pbutil.TimeFromProto(s.RotatedOutAt) - if err != nil { - return &t, nil - } - t.RotatedOutAt = rotatedOutAt - t.PrivateKeyType = s.PrivateKeyType - t.PrivateKeyBits = int(s.PrivateKeyBits) - t.RaftIndex = pbcommon.RaftIndexToStructs(s.RaftIndex) - return &t, nil +func RaftIndexFrom(f structs.RaftIndex) *pbcommongogo.RaftIndex { + t := new(pbcommongogo.RaftIndex) + pbcommongogo.RaftIndexFromStructs(&f, t) + return t } -func NewCARootFromStructs(s *structs.CARoot) (*CARoot, error) { - if s == nil { - return nil, nil - } - var t CARoot - t.ID = s.ID - t.Name = s.Name - t.SerialNumber = s.SerialNumber - t.SigningKeyID = s.SigningKeyID - t.ExternalTrustDomain = s.ExternalTrustDomain - notBefore, err := pbutil.TimeToProto(s.NotBefore) - if err != nil { - return &t, err - } - t.NotBefore = notBefore - notAfter, err := pbutil.TimeToProto(s.NotAfter) - if err != nil { - return &t, err - } - t.NotAfter = notAfter - t.RootCert = s.RootCert - if len(s.IntermediateCerts) > 0 { - t.IntermediateCerts = make([]string, len(s.IntermediateCerts)) - copy(t.IntermediateCerts, s.IntermediateCerts) - } - t.SigningCert = s.SigningCert - t.SigningKey = s.SigningKey - t.Active = s.Active - rotatedOutAt, err := pbutil.TimeToProto(s.RotatedOutAt) - if err != nil { - return &t, err - } - t.RotatedOutAt = rotatedOutAt - t.PrivateKeyType = s.PrivateKeyType - t.PrivateKeyBits = int32(s.PrivateKeyBits) - t.RaftIndex = pbcommon.NewRaftIndexFromStructs(s.RaftIndex) - return &t, nil +func RaftIndexTo(f *pbcommongogo.RaftIndex) structs.RaftIndex { + t := new(structs.RaftIndex) + pbcommongogo.RaftIndexToStructs(f, t) + return *t } -func IssuedCertToStructs(s *IssuedCert) (*structs.IssuedCert, error) { - if s == nil { - return nil, nil - } - var t structs.IssuedCert - t.SerialNumber = s.SerialNumber - t.CertPEM = s.CertPEM - t.PrivateKeyPEM = s.PrivateKeyPEM - t.Service = s.Service - t.ServiceURI = s.ServiceURI - t.Agent = s.Agent - t.AgentURI = s.AgentURI - validAfter, err := pbutil.TimeFromProto(s.ValidAfter) - if err != nil { - return &t, err - } - t.ValidAfter = validAfter - validBefore, err := pbutil.TimeFromProto(s.ValidBefore) - if err != nil { - return &t, err - } - t.ValidBefore = validBefore - t.EnterpriseMeta = pbcommon.EnterpriseMetaToStructs(s.EnterpriseMeta) - t.RaftIndex = pbcommon.RaftIndexToStructs(s.RaftIndex) - return &t, nil +func EnterpriseMetaFrom(f structs.EnterpriseMeta) *pbcommongogo.EnterpriseMeta { + t := new(pbcommongogo.EnterpriseMeta) + pbcommongogo.EnterpriseMetaFromStructs(&f, t) + return t } -func NewIssuedCertFromStructs(s *structs.IssuedCert) (*IssuedCert, error) { - if s == nil { - return nil, nil - } - var t IssuedCert - t.SerialNumber = s.SerialNumber - t.CertPEM = s.CertPEM - t.PrivateKeyPEM = s.PrivateKeyPEM - t.Service = s.Service - t.ServiceURI = s.ServiceURI - t.Agent = s.Agent - t.AgentURI = s.AgentURI - validAfter, err := pbutil.TimeToProto(s.ValidAfter) - if err != nil { - return &t, err - } - t.ValidAfter = validAfter - validBefore, err := pbutil.TimeToProto(s.ValidBefore) - if err != nil { - return &t, err - } - t.ValidBefore = validBefore - t.EnterpriseMeta = pbcommon.NewEnterpriseMetaFromStructs(s.EnterpriseMeta) - t.RaftIndex = pbcommon.NewRaftIndexFromStructs(s.RaftIndex) - return &t, nil +func EnterpriseMetaTo(f *pbcommongogo.EnterpriseMeta) structs.EnterpriseMeta { + t := new(structs.EnterpriseMeta) + pbcommongogo.EnterpriseMetaToStructs(f, t) + return *t +} + +func NewIssuedCertFromStructs(in *structs.IssuedCert) (*IssuedCert, error) { + t := new(IssuedCert) + IssuedCertFromStructsIssuedCert(in, t) + return t, nil +} + +func NewCARootsFromStructs(in *structs.IndexedCARoots) (*CARoots, error) { + t := new(CARoots) + CARootsFromStructsIndexedCARoots(in, t) + return t, nil +} + +func CARootsToStructs(in *CARoots) (*structs.IndexedCARoots, error) { + t := new(structs.IndexedCARoots) + CARootsToStructsIndexedCARoots(in, t) + return t, nil +} + +func NewCARootFromStructs(in *structs.CARoot) (*CARoot, error) { + t := new(CARoot) + CARootFromStructsCARoot(in, t) + return t, nil +} + +func CARootToStructs(in *CARoot) (*structs.CARoot, error) { + t := new(structs.CARoot) + CARootToStructsCARoot(in, t) + return t, nil +} + +func IssuedCertToStructs(in *IssuedCert) (*structs.IssuedCert, error) { + t := new(structs.IssuedCert) + IssuedCertToStructsIssuedCert(in, t) + return t, nil } diff --git a/proto/pbconnect/connect.pb.go b/proto/pbconnect/connect.pb.go index 73081a696..64a6738f9 100644 --- a/proto/pbconnect/connect.pb.go +++ b/proto/pbconnect/connect.pb.go @@ -7,7 +7,7 @@ import ( fmt "fmt" types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - pbcommon "github.com/hashicorp/consul/proto/pbcommon" + pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" io "io" math "math" math_bits "math/bits" @@ -25,6 +25,12 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // CARoots is the list of all currently trusted CA Roots. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.IndexedCARoots +// output=connect.gen.go +// name=StructsIndexedCARoots type CARoots struct { // ActiveRootID is the ID of a root in Roots that is the active CA root. // Other roots are still valid if they're in the Roots list but are in @@ -57,10 +63,11 @@ type CARoots struct { Roots []*CARoot `protobuf:"bytes,3,rep,name=Roots,proto3" json:"Roots,omitempty"` // QueryMeta here is mainly used to contain the latest Raft Index that could // be used to perform a blocking query. - QueryMeta *pbcommon.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // mog: func-to=QueryMetaTo func-from=QueryMetaFrom + QueryMeta *pbcommongogo.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CARoots) Reset() { *m = CARoots{} } @@ -117,13 +124,20 @@ func (m *CARoots) GetRoots() []*CARoot { return nil } -func (m *CARoots) GetQueryMeta() *pbcommon.QueryMeta { +func (m *CARoots) GetQueryMeta() *pbcommongogo.QueryMeta { if m != nil { return m.QueryMeta } return nil } +// CARoot is the trusted CA Root. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.CARoot +// output=connect.gen.go +// name=StructsCARoot type CARoot struct { // ID is a globally unique ID (UUID) representing this CA root. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -146,8 +160,10 @@ type CARoot struct { // future flexibility. ExternalTrustDomain string `protobuf:"bytes,5,opt,name=ExternalTrustDomain,proto3" json:"ExternalTrustDomain,omitempty"` // Time validity bounds. + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo NotBefore *types.Timestamp `protobuf:"bytes,6,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` - NotAfter *types.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + NotAfter *types.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` // RootCert is the PEM-encoded public certificate. RootCert string `protobuf:"bytes,8,opt,name=RootCert,proto3" json:"RootCert,omitempty"` // IntermediateCerts is a list of PEM-encoded intermediate certs to @@ -166,6 +182,7 @@ type CARoot struct { // RotatedOutAt is the time at which this CA was removed from the state. // This will only be set on roots that have been rotated out from being the // active root. + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo RotatedOutAt *types.Timestamp `protobuf:"bytes,13,opt,name=RotatedOutAt,proto3" json:"RotatedOutAt,omitempty"` // PrivateKeyType is the type of the private key used to sign certificates. It // may be "rsa" or "ec". This is provided as a convenience to avoid parsing @@ -174,11 +191,13 @@ type CARoot struct { // PrivateKeyBits is the length of the private key used to sign certificates. // This is provided as a convenience to avoid parsing the public key from the // certificate to infer the type. - PrivateKeyBits int32 `protobuf:"varint,15,opt,name=PrivateKeyBits,proto3" json:"PrivateKeyBits,omitempty"` - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // mog: func-to=int func-from=int32 + PrivateKeyBits int32 `protobuf:"varint,15,opt,name=PrivateKeyBits,proto3" json:"PrivateKeyBits,omitempty"` + // mog: func-to=RaftIndexTo func-from=RaftIndexFrom + RaftIndex *pbcommongogo.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CARoot) Reset() { *m = CARoot{} } @@ -319,13 +338,19 @@ func (m *CARoot) GetPrivateKeyBits() int32 { return 0 } -func (m *CARoot) GetRaftIndex() *pbcommon.RaftIndex { +func (m *CARoot) GetRaftIndex() *pbcommongogo.RaftIndex { if m != nil { return m.RaftIndex } return nil } +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.IssuedCert +// output=connect.gen.go +// name=StructsIssuedCert type IssuedCert struct { // SerialNumber is the unique serial number for this certificate. // This is encoded in standard hex separated by :. @@ -345,14 +370,18 @@ type IssuedCert struct { AgentURI string `protobuf:"bytes,7,opt,name=AgentURI,proto3" json:"AgentURI,omitempty"` // ValidAfter and ValidBefore are the validity periods for the // certificate. - ValidAfter *types.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + ValidAfter *types.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo ValidBefore *types.Timestamp `protobuf:"bytes,9,opt,name=ValidBefore,proto3" json:"ValidBefore,omitempty"` // EnterpriseMeta is the Consul Enterprise specific metadata - EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,10,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // mog: func-to=EnterpriseMetaTo func-from=EnterpriseMetaFrom + EnterpriseMeta *pbcommongogo.EnterpriseMeta `protobuf:"bytes,10,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` + // mog: func-to=RaftIndexTo func-from=RaftIndexFrom + RaftIndex *pbcommongogo.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *IssuedCert) Reset() { *m = IssuedCert{} } @@ -451,14 +480,14 @@ func (m *IssuedCert) GetValidBefore() *types.Timestamp { return nil } -func (m *IssuedCert) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { +func (m *IssuedCert) GetEnterpriseMeta() *pbcommongogo.EnterpriseMeta { if m != nil { return m.EnterpriseMeta } return nil } -func (m *IssuedCert) GetRaftIndex() *pbcommon.RaftIndex { +func (m *IssuedCert) GetRaftIndex() *pbcommongogo.RaftIndex { if m != nil { return m.RaftIndex } @@ -474,49 +503,49 @@ func init() { func init() { proto.RegisterFile("proto/pbconnect/connect.proto", fileDescriptor_80627e709958eb04) } var fileDescriptor_80627e709958eb04 = []byte{ - // 659 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xdd, 0x6a, 0xdb, 0x4a, - 0x10, 0x3e, 0x8a, 0x7f, 0x35, 0x4e, 0x9c, 0x93, 0x3d, 0x87, 0xb0, 0xf8, 0x70, 0x5c, 0x61, 0xda, - 0x62, 0x68, 0xb1, 0x4a, 0x0a, 0xa5, 0x94, 0x36, 0xe0, 0xc4, 0xb9, 0x10, 0x21, 0x6e, 0xba, 0x49, - 0x7b, 0xd1, 0x3b, 0xd9, 0x1e, 0x3b, 0x0b, 0x96, 0xd6, 0xac, 0x56, 0x21, 0x7e, 0x93, 0xbe, 0x41, - 0x1f, 0xa5, 0xbd, 0xec, 0x23, 0x94, 0xf4, 0x39, 0x0a, 0x65, 0x57, 0x92, 0x2d, 0xb9, 0x05, 0x5f, - 0x79, 0xe7, 0x9b, 0x6f, 0x46, 0x33, 0xfb, 0x7d, 0x5e, 0xf8, 0x7f, 0x21, 0x85, 0x12, 0xee, 0x62, - 0x34, 0x16, 0x61, 0x88, 0x63, 0xe5, 0xa6, 0xbf, 0x3d, 0x83, 0x93, 0x5a, 0x1a, 0xb6, 0x1e, 0xcc, - 0x84, 0x98, 0xcd, 0xd1, 0x35, 0xf0, 0x28, 0x9e, 0xba, 0x8a, 0x07, 0x18, 0x29, 0x3f, 0x58, 0x24, - 0xcc, 0xd6, 0x7f, 0xeb, 0x46, 0x41, 0x20, 0x42, 0x37, 0xf9, 0x49, 0x92, 0x9d, 0xcf, 0x16, 0xd4, - 0x4e, 0xfb, 0x4c, 0x08, 0x15, 0x91, 0x0e, 0xec, 0xf6, 0xc7, 0x8a, 0xdf, 0xa2, 0x0e, 0xbd, 0x01, - 0xb5, 0x1c, 0xab, 0x6b, 0xb3, 0x02, 0x46, 0x1c, 0x68, 0x5c, 0xcb, 0x38, 0x52, 0x03, 0x11, 0xf8, - 0x3c, 0xa4, 0x3b, 0x86, 0x92, 0x87, 0xc8, 0x23, 0xa8, 0x98, 0x76, 0xb4, 0xe4, 0x94, 0xba, 0x8d, - 0xa3, 0xfd, 0x5e, 0x36, 0x77, 0xf2, 0x19, 0x96, 0x64, 0x89, 0x0b, 0xf6, 0xbb, 0x18, 0xe5, 0xf2, - 0x02, 0x95, 0x4f, 0xcb, 0x8e, 0xd5, 0x6d, 0x1c, 0x1d, 0xf4, 0xd2, 0xd1, 0x56, 0x09, 0xb6, 0xe6, - 0x74, 0x7e, 0x96, 0xa1, 0x9a, 0xb4, 0x20, 0x4d, 0xd8, 0x59, 0x8d, 0xb7, 0xe3, 0x0d, 0x08, 0x81, - 0xf2, 0xd0, 0x0f, 0x30, 0x9d, 0xc6, 0x9c, 0xf5, 0x32, 0x57, 0x28, 0xb9, 0x3f, 0x1f, 0xc6, 0xc1, - 0x08, 0x25, 0x2d, 0x39, 0x56, 0xb7, 0xcc, 0x0a, 0x98, 0xe1, 0xf0, 0x59, 0xc8, 0xc3, 0xd9, 0x39, - 0x2e, 0xbd, 0x81, 0x19, 0xc3, 0x66, 0x05, 0x8c, 0x3c, 0x83, 0x7f, 0xce, 0xee, 0x14, 0xca, 0xd0, - 0x9f, 0xe7, 0x17, 0xaf, 0x18, 0xea, 0x9f, 0x52, 0xe4, 0x25, 0xd8, 0x43, 0xa1, 0x4e, 0x70, 0x2a, - 0x24, 0xd2, 0xaa, 0xd9, 0xac, 0xd5, 0x4b, 0x44, 0xea, 0x65, 0x22, 0xf5, 0xae, 0x33, 0x91, 0xd8, - 0x9a, 0x4c, 0x5e, 0x40, 0x7d, 0x28, 0x54, 0x7f, 0xaa, 0x50, 0xd2, 0xda, 0xd6, 0xc2, 0x15, 0x97, - 0xb4, 0xa0, 0xae, 0xef, 0xe5, 0x14, 0xa5, 0xa2, 0x75, 0x33, 0xd8, 0x2a, 0x26, 0x4f, 0xe1, 0xc0, - 0x0b, 0x15, 0xca, 0x00, 0x27, 0xdc, 0x57, 0xa8, 0xb1, 0x88, 0xda, 0x4e, 0xa9, 0x6b, 0xb3, 0xdf, - 0x13, 0x5a, 0xde, 0x74, 0x7b, 0xd3, 0x0c, 0x12, 0x79, 0x73, 0x10, 0x69, 0x03, 0xac, 0xef, 0x87, - 0x36, 0x0c, 0x21, 0x87, 0x90, 0x43, 0xa8, 0x26, 0x86, 0xa1, 0xbb, 0x8e, 0xd5, 0xad, 0xb3, 0x34, - 0x22, 0xc7, 0xb0, 0xcb, 0x84, 0xf2, 0x15, 0x4e, 0xde, 0xc6, 0xaa, 0xaf, 0xe8, 0xde, 0xd6, 0xfd, - 0x0a, 0x7c, 0xf2, 0x18, 0x9a, 0x97, 0x92, 0xdf, 0xfa, 0x0a, 0xcf, 0x71, 0x79, 0xbd, 0x5c, 0x20, - 0x6d, 0x9a, 0x6f, 0x6f, 0xa0, 0x45, 0xde, 0x09, 0x57, 0x11, 0xdd, 0x77, 0xac, 0x6e, 0x85, 0x6d, - 0xa0, 0xda, 0x7f, 0xcc, 0x9f, 0x2a, 0x2f, 0x9c, 0xe0, 0x1d, 0xfd, 0xbb, 0xe8, 0xbf, 0x55, 0x82, - 0xad, 0x39, 0x9d, 0x2f, 0x25, 0x00, 0x2f, 0x8a, 0x62, 0x9c, 0x98, 0x7b, 0xd8, 0xf4, 0x57, 0xfa, - 0x67, 0x29, 0xf8, 0x8b, 0x42, 0x4d, 0x73, 0x2f, 0xcf, 0x2e, 0x52, 0x6b, 0x66, 0x21, 0x79, 0x08, - 0x7b, 0xeb, 0x79, 0x74, 0xbe, 0x64, 0xf2, 0x45, 0x50, 0xd7, 0x5f, 0xa1, 0xbc, 0xe5, 0x63, 0x4c, - 0xad, 0x99, 0x85, 0x46, 0x85, 0xe4, 0xf8, 0x9e, 0x79, 0xa9, 0x19, 0x73, 0x08, 0xf9, 0x17, 0x2a, - 0xfd, 0x19, 0x86, 0xca, 0xf8, 0xcf, 0x66, 0x49, 0xa0, 0x7d, 0x62, 0x0e, 0xba, 0xa6, 0x96, 0xf8, - 0x24, 0x8b, 0xc9, 0x2b, 0x80, 0x0f, 0xfe, 0x9c, 0x4f, 0x12, 0xf7, 0xd5, 0xb7, 0xaa, 0x93, 0x63, - 0x93, 0xd7, 0xd0, 0x30, 0x51, 0xea, 0x79, 0x7b, 0x6b, 0x71, 0x9e, 0x4e, 0x8e, 0xa1, 0x79, 0xa6, - 0x8d, 0xb8, 0x90, 0x3c, 0x42, 0xf3, 0x1c, 0x80, 0x69, 0x70, 0x98, 0xc9, 0x51, 0xcc, 0xb2, 0x0d, - 0x76, 0x51, 0xc9, 0xc6, 0x76, 0x25, 0x4f, 0xde, 0x7c, 0xbd, 0x6f, 0x5b, 0xdf, 0xee, 0xdb, 0xd6, - 0xf7, 0xfb, 0xb6, 0xf5, 0xe9, 0x47, 0xfb, 0xaf, 0x8f, 0x4f, 0x66, 0x5c, 0xdd, 0xc4, 0x23, 0x5d, - 0xe5, 0xde, 0xf8, 0xd1, 0x0d, 0x1f, 0x0b, 0xb9, 0xd0, 0x0f, 0x6e, 0x14, 0xcf, 0xdd, 0x8d, 0x77, - 0x78, 0x54, 0x35, 0xc0, 0xf3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x98, 0x92, 0x5c, 0xa1, - 0x05, 0x00, 0x00, + // 661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x4d, 0xf3, 0xe3, 0x49, 0x9b, 0xc2, 0xf2, 0xa3, 0x55, 0x24, 0x52, 0x2b, 0x02, 0x14, + 0x09, 0x94, 0xa0, 0x56, 0x42, 0x08, 0x01, 0x52, 0xd2, 0xf4, 0x10, 0x55, 0x0d, 0x65, 0x5b, 0x38, + 0x70, 0x73, 0x92, 0x89, 0xbb, 0x52, 0xec, 0x8d, 0xd6, 0xeb, 0xaa, 0x39, 0xf2, 0x16, 0xbc, 0x03, + 0x2f, 0xc2, 0x0d, 0x1e, 0x01, 0x95, 0x17, 0x41, 0xbb, 0x76, 0x62, 0x3b, 0x45, 0x0a, 0xa7, 0xec, + 0x7c, 0xf3, 0xcd, 0x78, 0x66, 0xbf, 0x2f, 0x0b, 0x8f, 0xe7, 0x52, 0x28, 0xd1, 0x99, 0x8f, 0xc6, + 0x22, 0x08, 0x70, 0xac, 0x3a, 0xc9, 0x6f, 0xdb, 0xe0, 0xa4, 0x9c, 0x84, 0xf5, 0x7d, 0x4f, 0x08, + 0x6f, 0x86, 0x1d, 0x03, 0x8f, 0xa2, 0x69, 0x47, 0x71, 0x1f, 0x43, 0xe5, 0xfa, 0xf3, 0x98, 0x59, + 0xdf, 0x4f, 0x1b, 0xf9, 0xbe, 0x08, 0x3c, 0xe1, 0x89, 0x4e, 0x7c, 0x8c, 0x09, 0xcd, 0xef, 0x16, + 0x94, 0x8f, 0xba, 0x4c, 0x08, 0x15, 0x92, 0x26, 0xec, 0x74, 0xc7, 0x8a, 0x5f, 0xa1, 0x0e, 0x07, + 0x7d, 0x6a, 0x39, 0x56, 0xcb, 0x66, 0x39, 0x8c, 0x38, 0x50, 0xbd, 0x90, 0x51, 0xa8, 0xfa, 0xc2, + 0x77, 0x79, 0x40, 0xb7, 0x0c, 0x25, 0x0b, 0x91, 0xa7, 0x50, 0x34, 0xed, 0x68, 0xc1, 0x29, 0xb4, + 0xaa, 0x07, 0x7b, 0xed, 0xe5, 0xec, 0xf1, 0x67, 0x58, 0x9c, 0x25, 0x87, 0x60, 0x7f, 0x8c, 0x50, + 0x2e, 0x4e, 0x51, 0xb9, 0x74, 0xdb, 0xb1, 0x5a, 0xd5, 0x83, 0x87, 0xed, 0x74, 0xca, 0xf6, 0x2a, + 0xc9, 0x52, 0x5e, 0xf3, 0x6b, 0x11, 0x4a, 0x71, 0x1b, 0x52, 0x83, 0xad, 0xd5, 0x88, 0x5b, 0x83, + 0x3e, 0x21, 0xb0, 0x3d, 0x74, 0x7d, 0x4c, 0x26, 0x32, 0x67, 0xbd, 0xd0, 0x39, 0x4a, 0xee, 0xce, + 0x86, 0x91, 0x3f, 0x42, 0x49, 0x0b, 0x8e, 0xd5, 0xda, 0x66, 0x39, 0xcc, 0x70, 0xb8, 0x17, 0xf0, + 0xc0, 0x3b, 0xc1, 0xc5, 0xa0, 0x6f, 0x46, 0xb1, 0x59, 0x0e, 0x23, 0x2f, 0xe1, 0xfe, 0xf1, 0xb5, + 0x42, 0x19, 0xb8, 0xb3, 0xec, 0xf2, 0x45, 0x43, 0xfd, 0x57, 0x8a, 0xbc, 0x06, 0x7b, 0x28, 0x54, + 0x0f, 0xa7, 0x42, 0x22, 0x2d, 0x99, 0xed, 0xea, 0xed, 0x58, 0xac, 0xf6, 0x52, 0xac, 0xf6, 0xc5, + 0x52, 0x2c, 0x96, 0x92, 0xc9, 0x2b, 0xa8, 0x0c, 0x85, 0xea, 0x4e, 0x15, 0x4a, 0x5a, 0xde, 0x58, + 0xb8, 0xe2, 0x92, 0x3a, 0x54, 0xf4, 0xbd, 0x1c, 0xa1, 0x54, 0xb4, 0x62, 0x06, 0x5b, 0xc5, 0xe4, + 0x05, 0xdc, 0x1b, 0x04, 0x0a, 0xa5, 0x8f, 0x13, 0xee, 0x2a, 0xd4, 0x58, 0x48, 0x6d, 0xa7, 0xd0, + 0xb2, 0xd9, 0xed, 0x84, 0x96, 0x38, 0xd9, 0xde, 0x34, 0x83, 0x58, 0xe2, 0x0c, 0x44, 0x1a, 0x00, + 0xe9, 0xfd, 0xd0, 0xaa, 0x21, 0x64, 0x10, 0xf2, 0x08, 0x4a, 0xb1, 0x69, 0xe8, 0x8e, 0x63, 0xb5, + 0x2a, 0x2c, 0x89, 0xc8, 0x7b, 0xd8, 0x61, 0x42, 0xb9, 0x0a, 0x27, 0x1f, 0x22, 0xd5, 0x55, 0x74, + 0x77, 0xe3, 0x7e, 0x39, 0x3e, 0x79, 0x06, 0xb5, 0x33, 0xc9, 0xaf, 0x5c, 0x85, 0x27, 0xb8, 0xb8, + 0x58, 0xcc, 0x91, 0xd6, 0xcc, 0xb7, 0xd7, 0xd0, 0x3c, 0xaf, 0xc7, 0x55, 0x48, 0xf7, 0x1c, 0xab, + 0x55, 0x64, 0x6b, 0xa8, 0xf6, 0x20, 0x73, 0xa7, 0x6a, 0x10, 0x4c, 0xf0, 0x9a, 0xde, 0xbd, 0xed, + 0xc1, 0x55, 0x92, 0xa5, 0xbc, 0xe6, 0xcf, 0x02, 0xc0, 0x20, 0x0c, 0x23, 0x9c, 0x98, 0xbb, 0x58, + 0xf7, 0x58, 0xf2, 0xa7, 0xc9, 0x79, 0x8c, 0x42, 0x59, 0x73, 0xcf, 0x8e, 0x4f, 0x13, 0x7b, 0x2e, + 0x43, 0xf2, 0x04, 0x76, 0xd3, 0x99, 0x74, 0xbe, 0x60, 0xf2, 0x79, 0x50, 0xd7, 0x9f, 0xa3, 0xbc, + 0xe2, 0x63, 0x4c, 0xec, 0xb9, 0x0c, 0x8d, 0x12, 0xf1, 0xf1, 0x13, 0x1b, 0x24, 0x86, 0xcc, 0x20, + 0xe4, 0x01, 0x14, 0xbb, 0x1e, 0x06, 0xca, 0x78, 0xd0, 0x66, 0x71, 0xa0, 0xbd, 0x62, 0x0e, 0xba, + 0xa6, 0x1c, 0x7b, 0x65, 0x19, 0x93, 0x37, 0x00, 0x9f, 0xdd, 0x19, 0x9f, 0xc4, 0x0e, 0xac, 0x6c, + 0x54, 0x28, 0xc3, 0x26, 0x6f, 0xa1, 0x6a, 0xa2, 0xc4, 0xf7, 0xf6, 0xc6, 0xe2, 0x2c, 0x9d, 0xf4, + 0xa0, 0x76, 0xac, 0xcd, 0x38, 0x97, 0x3c, 0x44, 0xf3, 0x2c, 0x40, 0xd2, 0x20, 0x23, 0x49, 0x9e, + 0xc1, 0xd6, 0x2a, 0xf2, 0x8a, 0x56, 0xff, 0x4f, 0xd1, 0xde, 0xbb, 0x1f, 0x37, 0x0d, 0xeb, 0xd7, + 0x4d, 0xc3, 0xfa, 0x7d, 0xd3, 0xb0, 0xbe, 0xfd, 0x69, 0xdc, 0xf9, 0xf2, 0xdc, 0xe3, 0xea, 0x32, + 0x1a, 0xe9, 0xca, 0xce, 0xa5, 0x1b, 0x5e, 0xf2, 0xb1, 0x90, 0x73, 0xfd, 0x08, 0x87, 0xd1, 0xac, + 0xb3, 0xf6, 0x36, 0x8f, 0x4a, 0x06, 0x38, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xda, 0x8a, 0x91, + 0x6e, 0xb5, 0x05, 0x00, 0x00, } func (m *CARoots) Marshal() (dAtA []byte, err error) { @@ -1207,7 +1236,7 @@ func (m *CARoots) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.QueryMeta == nil { - m.QueryMeta = &pbcommon.QueryMeta{} + m.QueryMeta = &pbcommongogo.QueryMeta{} } if err := m.QueryMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1748,7 +1777,7 @@ func (m *CARoot) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.RaftIndex == nil { - m.RaftIndex = &pbcommon.RaftIndex{} + m.RaftIndex = &pbcommongogo.RaftIndex{} } if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2131,7 +2160,7 @@ func (m *IssuedCert) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.EnterpriseMeta == nil { - m.EnterpriseMeta = &pbcommon.EnterpriseMeta{} + m.EnterpriseMeta = &pbcommongogo.EnterpriseMeta{} } if err := m.EnterpriseMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2167,7 +2196,7 @@ func (m *IssuedCert) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.RaftIndex == nil { - m.RaftIndex = &pbcommon.RaftIndex{} + m.RaftIndex = &pbcommongogo.RaftIndex{} } if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/proto/pbconnect/connect.proto b/proto/pbconnect/connect.proto index 0b2d10b46..129fa5385 100644 --- a/proto/pbconnect/connect.proto +++ b/proto/pbconnect/connect.proto @@ -5,9 +5,15 @@ package connect; option go_package = "github.com/hashicorp/consul/proto/pbconnect"; import "google/protobuf/timestamp.proto"; -import "proto/pbcommon/common.proto"; +import "proto/pbcommongogo/common.proto"; // CARoots is the list of all currently trusted CA Roots. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.IndexedCARoots +// output=connect.gen.go +// name=StructsIndexedCARoots message CARoots { // ActiveRootID is the ID of a root in Roots that is the active CA root. // Other roots are still valid if they're in the Roots list but are in @@ -43,9 +49,17 @@ message CARoots { // QueryMeta here is mainly used to contain the latest Raft Index that could // be used to perform a blocking query. - common.QueryMeta QueryMeta = 4; + // mog: func-to=QueryMetaTo func-from=QueryMetaFrom + commongogo.QueryMeta QueryMeta = 4; } +// CARoot is the trusted CA Root. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.CARoot +// output=connect.gen.go +// name=StructsCARoot message CARoot { // ID is a globally unique ID (UUID) representing this CA root. string ID = 1; @@ -73,7 +87,9 @@ message CARoot { string ExternalTrustDomain = 5; // Time validity bounds. + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo google.protobuf.Timestamp NotBefore = 6; + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo google.protobuf.Timestamp NotAfter = 7; // RootCert is the PEM-encoded public certificate. @@ -98,6 +114,7 @@ message CARoot { // RotatedOutAt is the time at which this CA was removed from the state. // This will only be set on roots that have been rotated out from being the // active root. + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo google.protobuf.Timestamp RotatedOutAt = 13; // PrivateKeyType is the type of the private key used to sign certificates. It @@ -108,11 +125,19 @@ message CARoot { // PrivateKeyBits is the length of the private key used to sign certificates. // This is provided as a convenience to avoid parsing the public key from the // certificate to infer the type. + // mog: func-to=int func-from=int32 int32 PrivateKeyBits = 15; - - common.RaftIndex RaftIndex = 16; + + // mog: func-to=RaftIndexTo func-from=RaftIndexFrom + commongogo.RaftIndex RaftIndex = 16; } +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.IssuedCert +// output=connect.gen.go +// name=StructsIssuedCert message IssuedCert { // SerialNumber is the unique serial number for this certificate. // This is encoded in standard hex separated by :. @@ -136,11 +161,15 @@ message IssuedCert { // ValidAfter and ValidBefore are the validity periods for the // certificate. + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo google.protobuf.Timestamp ValidAfter = 8; + // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo google.protobuf.Timestamp ValidBefore = 9; // EnterpriseMeta is the Consul Enterprise specific metadata - common.EnterpriseMeta EnterpriseMeta = 10; + // mog: func-to=EnterpriseMetaTo func-from=EnterpriseMetaFrom + commongogo.EnterpriseMeta EnterpriseMeta = 10; - common.RaftIndex RaftIndex = 11; + // mog: func-to=RaftIndexTo func-from=RaftIndexFrom + commongogo.RaftIndex RaftIndex = 11; } \ No newline at end of file diff --git a/proto/pbservice/convert.go b/proto/pbservice/convert.go index 6f679d237..a68c22b8f 100644 --- a/proto/pbservice/convert.go +++ b/proto/pbservice/convert.go @@ -2,18 +2,18 @@ package pbservice import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" ) -func RaftIndexToStructs(s pbcommon.RaftIndex) structs.RaftIndex { +func RaftIndexToStructs(s pbcommongogo.RaftIndex) structs.RaftIndex { return structs.RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, } } -func NewRaftIndexFromStructs(s structs.RaftIndex) pbcommon.RaftIndex { - return pbcommon.RaftIndex{ +func NewRaftIndexFromStructs(s structs.RaftIndex) pbcommongogo.RaftIndex { + return pbcommongogo.RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, } diff --git a/proto/pbservice/convert_oss.go b/proto/pbservice/convert_oss.go index 215a2dc5f..214cf69ad 100644 --- a/proto/pbservice/convert_oss.go +++ b/proto/pbservice/convert_oss.go @@ -5,13 +5,13 @@ package pbservice import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" ) -func EnterpriseMetaToStructs(_ pbcommon.EnterpriseMeta) structs.EnterpriseMeta { +func EnterpriseMetaToStructs(_ pbcommongogo.EnterpriseMeta) structs.EnterpriseMeta { return structs.EnterpriseMeta{} } -func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) pbcommon.EnterpriseMeta { - return pbcommon.EnterpriseMeta{} +func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) pbcommongogo.EnterpriseMeta { + return pbcommongogo.EnterpriseMeta{} } diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index cd06be937..cb9eed6f8 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -8,7 +8,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - pbcommon "github.com/hashicorp/consul/proto/pbcommon" + pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" github_com_hashicorp_consul_types "github.com/hashicorp/consul/types" io "io" math "math" @@ -46,9 +46,9 @@ type HealthCheck struct { Type string `protobuf:"bytes,12,opt,name=Type,proto3" json:"Type,omitempty"` Definition HealthCheckDefinition `protobuf:"bytes,10,opt,name=Definition,proto3" json:"Definition"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + pbcommongogo.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommon.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` // mog: func-to=int func-from=int32 ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` @@ -290,76 +290,76 @@ func init() { func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) } var fileDescriptor_8a6f7448747c9fbe = []byte{ - // 1092 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x4f, 0xe3, 0x46, - 0x14, 0x8e, 0x09, 0x49, 0xf0, 0x04, 0x58, 0x98, 0x05, 0x3a, 0xcb, 0x6e, 0x4d, 0x4a, 0xf7, 0x40, - 0x55, 0x9a, 0xa8, 0xb4, 0xaa, 0xba, 0xad, 0x54, 0x89, 0x10, 0x16, 0x52, 0x01, 0x4d, 0x9d, 0x74, - 0x2b, 0xf5, 0x66, 0x9c, 0x89, 0x63, 0x91, 0x78, 0xa2, 0xf1, 0x18, 0x91, 0xfe, 0x8a, 0x1e, 0xfb, - 0x03, 0xfa, 0x63, 0x38, 0x72, 0xac, 0x54, 0x09, 0xb5, 0xf0, 0x1b, 0x7a, 0xe9, 0xa9, 0x9a, 0x37, - 0x76, 0x62, 0x6f, 0xbc, 0x24, 0x2b, 0xed, 0x9e, 0x32, 0xef, 0x7d, 0xef, 0xcd, 0x78, 0xde, 0xfb, - 0xbe, 0x37, 0x41, 0x1f, 0x0d, 0x38, 0x13, 0xac, 0x32, 0x38, 0xf7, 0x29, 0xbf, 0x74, 0x6d, 0x5a, - 0xe9, 0x52, 0xab, 0x27, 0xba, 0x76, 0x97, 0xda, 0x17, 0x65, 0xc0, 0xb0, 0x3e, 0x02, 0x37, 0x0d, - 0x87, 0x31, 0xa7, 0x47, 0x2b, 0x00, 0x9c, 0x07, 0x9d, 0x4a, 0x3b, 0xe0, 0x96, 0x70, 0x99, 0xa7, - 0x42, 0x37, 0x9f, 0x46, 0xbb, 0xd9, 0xac, 0xdf, 0x67, 0x5e, 0x45, 0xfd, 0x84, 0xe0, 0x9a, 0xc3, - 0x1c, 0xa6, 0x02, 0xe4, 0x4a, 0x79, 0xb7, 0xff, 0x9a, 0x47, 0xc5, 0x63, 0x38, 0xf3, 0x40, 0x9e, - 0x89, 0x31, 0x9a, 0x3f, 0x63, 0x6d, 0x4a, 0xb4, 0x92, 0xb6, 0xa3, 0x9b, 0xb0, 0xc6, 0x47, 0xa8, - 0x00, 0x60, 0xbd, 0x46, 0xe6, 0xa4, 0xbb, 0xfa, 0xd9, 0x7f, 0xb7, 0x5b, 0x9f, 0x38, 0xae, 0xe8, - 0x06, 0xe7, 0x65, 0x9b, 0xf5, 0x2b, 0x5d, 0xcb, 0xef, 0xba, 0x36, 0xe3, 0x83, 0x8a, 0xcd, 0x3c, - 0x3f, 0xe8, 0x55, 0xc4, 0x70, 0x40, 0xfd, 0x72, 0x98, 0x64, 0x46, 0xd9, 0xb0, 0xb9, 0xd5, 0xa7, - 0x24, 0x1b, 0x6e, 0x6e, 0xf5, 0x29, 0xde, 0x40, 0xf9, 0xa6, 0xb0, 0x44, 0xe0, 0x93, 0x79, 0xf0, - 0x86, 0x16, 0x5e, 0x43, 0xb9, 0x33, 0x26, 0xa8, 0x4f, 0x72, 0xe0, 0x56, 0x86, 0x8c, 0xfe, 0x21, - 0x10, 0x83, 0x40, 0x90, 0xbc, 0x8a, 0x56, 0x16, 0x7e, 0x86, 0xf4, 0xa6, 0x2a, 0x52, 0xbd, 0x46, - 0x0a, 0x00, 0x8d, 0x1d, 0xb8, 0x84, 0x8a, 0xa1, 0x01, 0xc7, 0x2f, 0x00, 0x1e, 0x77, 0xc5, 0x22, - 0x5a, 0x96, 0xe3, 0x13, 0xbd, 0x94, 0x8d, 0x45, 0x48, 0x97, 0xfc, 0xf6, 0xd6, 0x70, 0x40, 0xc9, - 0xa2, 0xfa, 0x76, 0xb9, 0xc6, 0x2f, 0x11, 0xaa, 0xd1, 0x8e, 0xeb, 0xb9, 0xb2, 0x07, 0x04, 0x95, - 0xb4, 0x9d, 0xe2, 0x5e, 0xa9, 0x3c, 0xea, 0x57, 0x39, 0x56, 0xd8, 0x71, 0x5c, 0x75, 0xfe, 0xfa, - 0x76, 0x2b, 0x63, 0xc6, 0x32, 0xf1, 0x0b, 0xa4, 0x9b, 0x56, 0x47, 0xd4, 0xbd, 0x36, 0xbd, 0x22, - 0x45, 0xd8, 0x66, 0xb5, 0x1c, 0x36, 0x6f, 0x04, 0x54, 0x17, 0x64, 0xde, 0xcd, 0xed, 0x96, 0x66, - 0x8e, 0xa3, 0x71, 0x0d, 0x2d, 0x1f, 0x7a, 0x82, 0xf2, 0x01, 0x77, 0x7d, 0x7a, 0x4a, 0x85, 0x45, - 0x96, 0x20, 0x7f, 0x23, 0xca, 0x4f, 0xa2, 0xe1, 0xe1, 0xaf, 0xe5, 0xc8, 0xeb, 0x1f, 0x5e, 0x0d, - 0x98, 0x4f, 0xdb, 0x0d, 0xc6, 0x05, 0x59, 0x2e, 0x69, 0x3b, 0x39, 0x33, 0xee, 0xc2, 0x9b, 0x68, - 0xa1, 0x2e, 0x73, 0x2e, 0xad, 0x1e, 0x79, 0x04, 0x25, 0x18, 0xd9, 0x98, 0xa0, 0x42, 0xcb, 0xed, - 0x53, 0x16, 0x08, 0xb2, 0x02, 0x50, 0x64, 0x6e, 0x7f, 0x0c, 0xe4, 0x6a, 0x53, 0xfe, 0xca, 0xea, - 0x05, 0x54, 0xf6, 0x14, 0x16, 0x44, 0x83, 0xfa, 0x2a, 0x63, 0xfb, 0x8f, 0x02, 0x5a, 0x4f, 0xad, - 0x94, 0xac, 0xf9, 0x71, 0xab, 0xd5, 0x88, 0xc8, 0x28, 0xd7, 0xf8, 0x39, 0x5a, 0x6a, 0x9d, 0x34, - 0x65, 0x67, 0x28, 0x87, 0x6e, 0x3e, 0x06, 0x30, 0xe9, 0x8c, 0xa2, 0x2e, 0xdc, 0xc1, 0x2b, 0xca, - 0xdd, 0xce, 0x10, 0x88, 0xbb, 0x60, 0x26, 0x9d, 0xf8, 0x7b, 0x94, 0x57, 0x9f, 0x47, 0xb2, 0xa5, - 0xec, 0x4e, 0x71, 0x6f, 0x77, 0x5a, 0xef, 0xca, 0x2a, 0xfc, 0xd0, 0x13, 0x7c, 0x18, 0x96, 0x32, - 0xdc, 0x41, 0x32, 0xf3, 0x94, 0x8a, 0x2e, 0x6b, 0x47, 0x3c, 0x56, 0x96, 0xbc, 0x43, 0x95, 0xb5, - 0x87, 0x04, 0xab, 0x3b, 0xc8, 0x35, 0x5e, 0x41, 0xd9, 0xd6, 0x41, 0x23, 0x64, 0xb6, 0x5c, 0xe2, - 0x6f, 0x63, 0xe5, 0xcd, 0x43, 0x03, 0x9f, 0x94, 0x95, 0xd8, 0xcb, 0x91, 0xd8, 0xcb, 0xb5, 0x50, - 0xec, 0xe1, 0xc1, 0xe3, 0xfa, 0x3f, 0x47, 0x4b, 0x4a, 0x06, 0xa7, 0xd6, 0x55, 0xd3, 0xfd, 0x95, - 0x12, 0xbd, 0xa4, 0xed, 0x2c, 0x99, 0x49, 0x27, 0x7e, 0x31, 0xee, 0x52, 0x61, 0xb6, 0x13, 0xa2, - 0x78, 0xec, 0x20, 0xa3, 0x46, 0x39, 0x75, 0x5c, 0x5f, 0x50, 0x7e, 0xc0, 0x5d, 0xe1, 0xda, 0x56, - 0x2f, 0x14, 0xc7, 0x7e, 0x47, 0x50, 0x0e, 0x92, 0x9a, 0x61, 0xc7, 0x29, 0xdb, 0x60, 0x03, 0xa1, - 0xa6, 0xcd, 0xdd, 0x81, 0xd8, 0xe7, 0x8e, 0x4f, 0x10, 0xb0, 0x24, 0xe6, 0xc1, 0xbb, 0x68, 0xb5, - 0xc6, 0xec, 0x0b, 0xca, 0x0f, 0x98, 0x27, 0x2c, 0xd7, 0xa3, 0xbc, 0x5e, 0x03, 0xc1, 0xe8, 0xe6, - 0x24, 0x20, 0xe9, 0xd6, 0xec, 0xd2, 0x5e, 0x2f, 0xd4, 0xac, 0x32, 0x64, 0xa3, 0x8e, 0xf7, 0x1a, - 0xf5, 0xb3, 0x23, 0xb2, 0xa6, 0x1a, 0xa5, 0x2c, 0xbc, 0x8d, 0x16, 0x8f, 0xf7, 0x1a, 0xae, 0xe7, - 0xfc, 0xe4, 0xd3, 0xd6, 0x49, 0x93, 0xac, 0x03, 0x63, 0x12, 0x3e, 0xd9, 0xcc, 0x23, 0xb3, 0x71, - 0x00, 0x1a, 0xd3, 0x4d, 0x58, 0xcb, 0x6f, 0x96, 0xbf, 0x61, 0xd6, 0x32, 0x64, 0xc5, 0x3c, 0x72, - 0x34, 0xed, 0xf7, 0x5c, 0xcb, 0x87, 0xb1, 0xaa, 0xa4, 0x33, 0x76, 0xc8, 0x53, 0xc1, 0x08, 0xcb, - 0x10, 0x0a, 0x28, 0xe1, 0xc3, 0x9f, 0xa3, 0x6c, 0xab, 0x75, 0x42, 0x56, 0x67, 0xab, 0xb1, 0x8c, - 0xdd, 0xfc, 0x31, 0x12, 0x1e, 0x50, 0x55, 0x12, 0xee, 0x82, 0x0e, 0x43, 0x1d, 0xc9, 0x25, 0xde, - 0x45, 0xb9, 0x4b, 0x90, 0xe2, 0x5c, 0x38, 0x2e, 0x12, 0xcc, 0x8f, 0x14, 0x6b, 0xaa, 0xa0, 0x6f, - 0xe6, 0xbe, 0xd6, 0xb6, 0xff, 0xd5, 0x91, 0x0e, 0x72, 0x80, 0xd1, 0x17, 0x7b, 0x13, 0xb4, 0x77, - 0xf2, 0x26, 0xcc, 0xa5, 0xbe, 0x09, 0xd9, 0xf4, 0x37, 0x61, 0x3e, 0xfe, 0x26, 0x24, 0x49, 0x93, - 0x9b, 0x20, 0x4d, 0x34, 0x45, 0xf2, 0xb1, 0x29, 0xf2, 0xdd, 0x48, 0xf9, 0x6b, 0xa0, 0xfc, 0xf8, - 0xd4, 0x1e, 0x5d, 0x72, 0x26, 0xb5, 0x17, 0x52, 0xd5, 0xbe, 0x39, 0xa9, 0xf6, 0x85, 0x74, 0xb5, - 0xeb, 0x6f, 0xab, 0xf6, 0x04, 0x9f, 0xd0, 0x34, 0x3e, 0x15, 0x53, 0xf8, 0x94, 0xaa, 0xa2, 0xc5, - 0xa9, 0x2a, 0x5a, 0x4a, 0x57, 0xd1, 0xb3, 0x07, 0x55, 0x64, 0x3c, 0xa0, 0xa2, 0xe5, 0x37, 0xaa, - 0xe8, 0xd1, 0x84, 0x8a, 0x26, 0xc6, 0xfe, 0xd3, 0x99, 0xc6, 0xfe, 0x4a, 0xda, 0xd8, 0x8f, 0x4d, - 0xc2, 0xd5, 0xb7, 0x9c, 0x84, 0xa1, 0x14, 0xf1, 0xec, 0x52, 0xc4, 0x7b, 0x68, 0xad, 0x19, 0xd8, - 0x36, 0xf5, 0xfd, 0x2a, 0xed, 0x30, 0x4e, 0x1b, 0x96, 0xef, 0xbb, 0x9e, 0x03, 0xf3, 0x25, 0x67, - 0xa6, 0x62, 0xf8, 0x4b, 0xb4, 0xfe, 0xd2, 0x72, 0x7b, 0x01, 0xa7, 0x21, 0xf0, 0xb3, 0xc5, 0x3d, - 0x99, 0xf4, 0x21, 0x24, 0xa5, 0x83, 0xf8, 0x2b, 0xb4, 0x91, 0x04, 0xa2, 0x19, 0x4b, 0x36, 0x20, - 0xed, 0x0d, 0xa8, 0x64, 0x54, 0x83, 0xb3, 0xab, 0x21, 0xa8, 0xe4, 0x03, 0xc5, 0xa8, 0x91, 0x63, - 0x84, 0x42, 0xcb, 0x48, 0x0c, 0x85, 0xbe, 0x4d, 0x7f, 0x1a, 0x1e, 0xbf, 0x9b, 0xa7, 0x61, 0xe2, - 0x91, 0x7b, 0x02, 0x77, 0x4a, 0x3a, 0xdf, 0xc3, 0xdc, 0xab, 0x9e, 0x5e, 0xff, 0x63, 0x64, 0xae, - 0xef, 0x0c, 0xed, 0xe6, 0xce, 0xd0, 0xfe, 0xbe, 0x33, 0xb4, 0xdf, 0xee, 0x8d, 0xcc, 0xef, 0xf7, - 0x46, 0xe6, 0xe6, 0xde, 0xc8, 0xfc, 0x79, 0x6f, 0x64, 0x7e, 0xf9, 0xf4, 0xa1, 0xb1, 0xf7, 0xda, - 0x1f, 0xfc, 0xf3, 0x3c, 0x38, 0xbe, 0xf8, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x43, 0x4c, 0xaf, - 0xfa, 0x0b, 0x00, 0x00, + // 1096 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x4e, 0xe3, 0xc6, + 0x17, 0x8f, 0x09, 0x49, 0xf0, 0x64, 0x61, 0x97, 0x59, 0xe0, 0x3f, 0x9b, 0xff, 0xd6, 0xa4, 0x74, + 0x2f, 0xa8, 0x4a, 0x1d, 0x95, 0x56, 0x55, 0x3f, 0xd4, 0x4a, 0x84, 0xb0, 0x90, 0x0a, 0x68, 0xea, + 0xa4, 0x5b, 0xa9, 0x77, 0xc6, 0x99, 0x38, 0x16, 0x89, 0x27, 0x1a, 0x8f, 0x11, 0xe9, 0x53, 0xf4, + 0xb2, 0x0f, 0xd0, 0x87, 0xe1, 0x92, 0xcb, 0x5e, 0xa1, 0x16, 0x9e, 0xa1, 0x37, 0xbd, 0xaa, 0xe6, + 0x8c, 0x9d, 0xd8, 0x1b, 0x2f, 0x64, 0xa5, 0xed, 0x55, 0xce, 0xf9, 0x9d, 0x8f, 0x19, 0x9f, 0x73, + 0x7e, 0x67, 0x82, 0xde, 0x1f, 0x71, 0x26, 0x58, 0x6d, 0x74, 0x16, 0x50, 0x7e, 0xe1, 0x39, 0xb4, + 0xd6, 0xa7, 0xf6, 0x40, 0xf4, 0x9d, 0x3e, 0x75, 0xce, 0x4d, 0xb0, 0x61, 0x7d, 0x62, 0xac, 0x18, + 0x2e, 0x63, 0xee, 0x80, 0xd6, 0xc0, 0x70, 0x16, 0xf6, 0x6a, 0xdd, 0x90, 0xdb, 0xc2, 0x63, 0xbe, + 0x72, 0xad, 0x6c, 0xc6, 0xd9, 0x1c, 0x36, 0x1c, 0x32, 0xdf, 0x65, 0x2e, 0xab, 0x29, 0x31, 0x72, + 0x58, 0x93, 0x90, 0x72, 0x92, 0x92, 0x42, 0xb7, 0xee, 0x16, 0x51, 0xf9, 0x08, 0xce, 0xdd, 0x97, + 0xe7, 0x62, 0x8c, 0x16, 0x4f, 0x59, 0x97, 0x12, 0xad, 0xaa, 0x6d, 0xeb, 0x16, 0xc8, 0xf8, 0x10, + 0x95, 0xc0, 0xd8, 0x6c, 0x90, 0x05, 0x09, 0xd7, 0x3f, 0xfe, 0xe7, 0x66, 0xf3, 0x43, 0xd7, 0x13, + 0xfd, 0xf0, 0xcc, 0x74, 0xd8, 0xb0, 0xd6, 0xb7, 0x83, 0xbe, 0xe7, 0x30, 0x3e, 0xaa, 0x39, 0xcc, + 0x0f, 0xc2, 0x41, 0x4d, 0x8c, 0x47, 0x34, 0x30, 0xa3, 0x20, 0x2b, 0x8e, 0x86, 0xe4, 0xf6, 0x90, + 0x92, 0x7c, 0x94, 0xdc, 0x1e, 0x52, 0xbc, 0x81, 0x8a, 0x6d, 0x61, 0x8b, 0x30, 0x20, 0x8b, 0x80, + 0x46, 0x1a, 0x5e, 0x43, 0x85, 0x53, 0x26, 0x68, 0x40, 0x0a, 0x00, 0x2b, 0x45, 0x7a, 0x7f, 0x1f, + 0x8a, 0x51, 0x28, 0x48, 0x51, 0x79, 0x2b, 0x0d, 0x3f, 0x47, 0x7a, 0x5b, 0x15, 0xaa, 0xd9, 0x20, + 0x25, 0x30, 0x4d, 0x01, 0x5c, 0x45, 0xe5, 0x48, 0x81, 0xe3, 0x97, 0xc0, 0x9e, 0x84, 0x12, 0x1e, + 0x1d, 0xdb, 0x0d, 0x88, 0x5e, 0xcd, 0x27, 0x3c, 0x24, 0x24, 0xef, 0xde, 0x19, 0x8f, 0x28, 0x79, + 0xa4, 0xee, 0x2e, 0x65, 0xfc, 0x12, 0xa1, 0x06, 0xed, 0x79, 0xbe, 0x27, 0xfb, 0x40, 0x50, 0x55, + 0xdb, 0x2e, 0xef, 0x56, 0xcd, 0x49, 0xcf, 0xcc, 0x44, 0x61, 0xa7, 0x7e, 0xf5, 0xc5, 0xab, 0x9b, + 0xcd, 0x9c, 0x95, 0x88, 0xc4, 0xdf, 0x20, 0xdd, 0xb2, 0x7b, 0xa2, 0xe9, 0x77, 0xe9, 0x25, 0x29, + 0x43, 0x9a, 0x75, 0x73, 0xda, 0x47, 0x73, 0x62, 0xac, 0x2f, 0xc9, 0xd8, 0xeb, 0x9b, 0x4d, 0xcd, + 0x9a, 0x46, 0xe0, 0x23, 0xb4, 0x72, 0xe0, 0x0b, 0xca, 0x47, 0xdc, 0x0b, 0xe8, 0x09, 0x15, 0x36, + 0x59, 0x86, 0x1c, 0x95, 0x64, 0x8e, 0xb4, 0x47, 0x74, 0x89, 0xd7, 0xe2, 0x64, 0x19, 0x0e, 0x2e, + 0x47, 0x2c, 0xa0, 0xdd, 0x16, 0xe3, 0x82, 0xac, 0x54, 0xb5, 0xed, 0x82, 0x95, 0x84, 0x70, 0x05, + 0x2d, 0x35, 0x65, 0xcc, 0x85, 0x3d, 0x20, 0x8f, 0xa1, 0x14, 0x13, 0x1d, 0x13, 0x54, 0xea, 0x78, + 0x43, 0xca, 0x42, 0x41, 0x9e, 0x80, 0x29, 0x56, 0xb7, 0x3e, 0x80, 0x21, 0xeb, 0x52, 0xfe, 0xca, + 0x1e, 0x84, 0x54, 0xf6, 0x16, 0x04, 0xa2, 0x41, 0x9d, 0x95, 0xb2, 0xf5, 0x7b, 0x09, 0xad, 0x67, + 0x56, 0x4c, 0xd6, 0xfe, 0xa8, 0xd3, 0x69, 0xc5, 0x43, 0x29, 0x65, 0xfc, 0x02, 0x2d, 0x77, 0x8e, + 0xdb, 0xb2, 0x43, 0x94, 0x43, 0x57, 0x9f, 0x82, 0x31, 0x0d, 0xc6, 0x5e, 0xe7, 0xde, 0xe8, 0x15, + 0xe5, 0x5e, 0x6f, 0x0c, 0x03, 0xbc, 0x64, 0xa5, 0x41, 0xfc, 0x1d, 0x2a, 0xaa, 0xeb, 0x91, 0x7c, + 0x35, 0xbf, 0x5d, 0xde, 0xdd, 0x79, 0xa8, 0x87, 0xa6, 0x72, 0x3f, 0xf0, 0x05, 0x1f, 0x47, 0xa5, + 0x8c, 0x32, 0xc8, 0x09, 0x3d, 0xa1, 0xa2, 0xcf, 0xba, 0xf1, 0x3c, 0x2b, 0x4d, 0x7e, 0x43, 0x9d, + 0x75, 0xc7, 0x04, 0xab, 0x6f, 0x90, 0x32, 0x7e, 0x82, 0xf2, 0x9d, 0xfd, 0x56, 0x34, 0xe1, 0x52, + 0xc4, 0x5f, 0x27, 0xca, 0x5b, 0x84, 0x26, 0x3e, 0x33, 0x15, 0xf1, 0xcd, 0x98, 0xf8, 0x66, 0x23, + 0x22, 0x7e, 0x74, 0xf0, 0xb4, 0xfe, 0x2f, 0xd0, 0xb2, 0xa2, 0xc3, 0x89, 0x7d, 0xd9, 0xf6, 0x7e, + 0xa1, 0x44, 0xaf, 0x6a, 0xdb, 0xcb, 0x56, 0x1a, 0xc4, 0x5f, 0x4e, 0xbb, 0x54, 0x9a, 0xef, 0x84, + 0xd8, 0x1f, 0xbb, 0xc8, 0x68, 0x50, 0x4e, 0x5d, 0x2f, 0x10, 0x94, 0xef, 0x73, 0x4f, 0x78, 0x8e, + 0x3d, 0x88, 0x48, 0xb2, 0xd7, 0x13, 0x94, 0x03, 0xb5, 0xe6, 0xc8, 0xf8, 0x40, 0x1a, 0x6c, 0x20, + 0xd4, 0x76, 0xb8, 0x37, 0x12, 0x7b, 0xdc, 0x0d, 0x08, 0x82, 0x29, 0x49, 0x20, 0x78, 0x07, 0xad, + 0x36, 0x98, 0x73, 0x4e, 0xf9, 0x3e, 0xf3, 0x85, 0xed, 0xf9, 0x94, 0x37, 0x1b, 0x40, 0x1c, 0xdd, + 0x9a, 0x35, 0xc8, 0x71, 0x6b, 0xf7, 0xe9, 0x60, 0x10, 0x71, 0x57, 0x29, 0xb2, 0x51, 0x47, 0xbb, + 0xad, 0xe6, 0xe9, 0x21, 0x59, 0x53, 0x8d, 0x52, 0x1a, 0xde, 0x42, 0x8f, 0x8e, 0x76, 0x5b, 0x9e, + 0xef, 0xfe, 0x18, 0xd0, 0xce, 0x71, 0x9b, 0xac, 0xc3, 0xc4, 0xa4, 0x30, 0xd9, 0xcc, 0x43, 0xab, + 0xb5, 0x0f, 0x3c, 0xd3, 0x2d, 0x90, 0xe5, 0x9d, 0xe5, 0x6f, 0x14, 0xb5, 0x02, 0x51, 0x09, 0x44, + 0xae, 0xa8, 0xbd, 0x81, 0x67, 0x07, 0xb0, 0x5e, 0x15, 0x75, 0xa6, 0x80, 0x3c, 0x15, 0x94, 0xa8, + 0x0c, 0x11, 0x81, 0x52, 0x18, 0xfe, 0x04, 0xe5, 0x3b, 0x9d, 0x63, 0xb2, 0x3a, 0x5f, 0x8d, 0xa5, + 0x6f, 0xe5, 0x87, 0x98, 0x78, 0x30, 0xaa, 0x72, 0xe0, 0xce, 0xe9, 0x38, 0xe2, 0x91, 0x14, 0xf1, + 0x0e, 0x2a, 0x5c, 0x00, 0x15, 0x17, 0x20, 0xeb, 0x46, 0x7a, 0xf2, 0x63, 0xc6, 0x5a, 0xca, 0xe9, + 0xab, 0x85, 0x2f, 0xb4, 0xad, 0xbf, 0x75, 0xa4, 0x03, 0x1d, 0x60, 0x05, 0x26, 0xde, 0x06, 0xed, + 0x9d, 0xbc, 0x0d, 0x0b, 0x99, 0x6f, 0x43, 0x3e, 0xfb, 0x6d, 0x58, 0x4c, 0xbe, 0x0d, 0xe9, 0xa1, + 0x29, 0xcc, 0x0c, 0x4d, 0xbc, 0x45, 0x8a, 0x89, 0x2d, 0xf2, 0xed, 0x84, 0xf9, 0x6b, 0xc0, 0xfc, + 0xe4, 0xf6, 0x9e, 0x7c, 0xe4, 0x5c, 0x6c, 0x2f, 0x65, 0xb2, 0xbd, 0x32, 0xcb, 0xf6, 0xa5, 0x6c, + 0xb6, 0xeb, 0x6f, 0xcb, 0xf6, 0xd4, 0x3c, 0xa1, 0x87, 0xe6, 0xa9, 0x9c, 0x31, 0x4f, 0x99, 0x2c, + 0x7a, 0xf4, 0x20, 0x8b, 0x96, 0xb3, 0x59, 0xf4, 0xfc, 0x5e, 0x16, 0x19, 0xf7, 0xb0, 0x68, 0xe5, + 0x8d, 0x2c, 0x7a, 0x3c, 0xc3, 0xa2, 0x99, 0xb5, 0xff, 0xff, 0xb9, 0xd6, 0xfe, 0x93, 0xac, 0xb5, + 0x9f, 0xd8, 0x84, 0xab, 0x6f, 0xb9, 0x09, 0x23, 0x2a, 0xe2, 0xf9, 0xa9, 0x88, 0x77, 0xd1, 0x5a, + 0x3b, 0x74, 0x1c, 0x1a, 0x04, 0x75, 0xda, 0x63, 0x9c, 0xb6, 0xec, 0x20, 0xf0, 0x7c, 0x17, 0xf6, + 0x4b, 0xc1, 0xca, 0xb4, 0xe1, 0xcf, 0xd0, 0xfa, 0x4b, 0xdb, 0x1b, 0x84, 0x9c, 0x46, 0x86, 0x9f, + 0x6c, 0xee, 0xcb, 0xa0, 0xf7, 0x20, 0x28, 0xdb, 0x88, 0x3f, 0x47, 0x1b, 0x69, 0x43, 0xbc, 0x63, + 0xc9, 0x06, 0x84, 0xbd, 0xc1, 0x2a, 0x27, 0xaa, 0xc5, 0xd9, 0xe5, 0x18, 0x58, 0xf2, 0x3f, 0x35, + 0x51, 0x13, 0x60, 0x62, 0x85, 0x96, 0x91, 0x84, 0x15, 0xfa, 0xf6, 0xf0, 0xd3, 0xf0, 0xf4, 0xdd, + 0x3c, 0x0d, 0x33, 0x8f, 0xdc, 0x33, 0xf8, 0xa6, 0x34, 0xf8, 0x1f, 0xec, 0xbd, 0xfa, 0xc9, 0xd5, + 0x5f, 0x46, 0xee, 0xea, 0xd6, 0xd0, 0xae, 0x6f, 0x0d, 0xed, 0xcf, 0x5b, 0x43, 0xfb, 0xf5, 0xce, + 0xc8, 0xfd, 0x76, 0x67, 0xe4, 0xae, 0xef, 0x8c, 0xdc, 0x1f, 0x77, 0x46, 0xee, 0xe7, 0x8f, 0xee, + 0x5b, 0x7b, 0xaf, 0xfd, 0xd9, 0x3f, 0x2b, 0x02, 0xf0, 0xe9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x87, 0x18, 0xc2, 0xd7, 0x06, 0x0c, 0x00, 0x00, } func (m *HealthCheck) Marshal() (dAtA []byte, err error) { diff --git a/proto/pbservice/healthcheck.proto b/proto/pbservice/healthcheck.proto index 6a8fb5906..15a5d3dce 100644 --- a/proto/pbservice/healthcheck.proto +++ b/proto/pbservice/healthcheck.proto @@ -5,7 +5,7 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; import "google/protobuf/duration.proto"; -import "proto/pbcommon/common.proto"; +import "proto/pbcommongogo/common.proto"; // This fake import path is replaced by the build script with a versioned path import "gogoproto/gogo.proto"; @@ -37,10 +37,10 @@ message HealthCheck { HealthCheckDefinition Definition = 10 [(gogoproto.nullable) = false]; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - common.RaftIndex RaftIndex = 11 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + commongogo.RaftIndex RaftIndex = 11 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - common.EnterpriseMeta EnterpriseMeta = 13 [(gogoproto.nullable) = false]; + commongogo.EnterpriseMeta EnterpriseMeta = 13 [(gogoproto.nullable) = false]; // mog: func-to=int func-from=int32 int32 ExposedPort = 14; diff --git a/proto/pbservice/ids_test.go b/proto/pbservice/ids_test.go index ae425e05c..2856aa70a 100644 --- a/proto/pbservice/ids_test.go +++ b/proto/pbservice/ids_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbcommongogo" ) func TestCheckServiceNode_UniqueID(t *testing.T) { @@ -25,7 +25,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { Node: &Node{Node: "the-node-name"}, Service: &NodeService{ ID: "the-service-id", - EnterpriseMeta: pbcommon.EnterpriseMeta{Namespace: "the-namespace"}, + EnterpriseMeta: pbcommongogo.EnterpriseMeta{Namespace: "the-namespace"}, }, }, expected: "/the-node-name/the-namespace/the-service-id", @@ -35,7 +35,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { csn: CheckServiceNode{ Service: &NodeService{ ID: "the-service-id", - EnterpriseMeta: pbcommon.EnterpriseMeta{Namespace: "the-namespace"}, + EnterpriseMeta: pbcommongogo.EnterpriseMeta{Namespace: "the-namespace"}, }, }, expected: "/the-namespace/the-service-id", diff --git a/proto/pbservice/node.pb.go b/proto/pbservice/node.pb.go index 98f1e4732..a93dc6da1 100644 --- a/proto/pbservice/node.pb.go +++ b/proto/pbservice/node.pb.go @@ -8,7 +8,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" github_com_hashicorp_consul_agent_structs "github.com/hashicorp/consul/agent/structs" - pbcommon "github.com/hashicorp/consul/proto/pbcommon" + pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" github_com_hashicorp_consul_types "github.com/hashicorp/consul/types" io "io" math "math" @@ -83,7 +83,7 @@ type Node struct { TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommon.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + pbcommongogo.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` } func (m *Node) Reset() { *m = Node{} } @@ -177,9 +177,9 @@ type NodeService struct { // somewhere this is used in API output. LocallyRegisteredAsSidecar bool `protobuf:"varint,13,opt,name=LocallyRegisteredAsSidecar,proto3" json:"LocallyRegisteredAsSidecar,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommon.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + pbcommongogo.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` } func (m *NodeService) Reset() { *m = NodeService{} } @@ -228,56 +228,56 @@ func init() { func init() { proto.RegisterFile("proto/pbservice/node.proto", fileDescriptor_bbc215b78fa95fe5) } var fileDescriptor_bbc215b78fa95fe5 = []byte{ - // 773 bytes of a gzipped FileDescriptorProto + // 777 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x13, 0xa7, 0x69, 0x26, 0xf7, 0xf6, 0x67, 0x54, 0x5d, 0xcd, 0xcd, 0xbd, 0x75, 0x42, - 0x61, 0x51, 0xa9, 0xc5, 0x46, 0x05, 0x04, 0x41, 0x08, 0xa9, 0x69, 0x2a, 0x11, 0x41, 0x4b, 0xe4, + 0x14, 0x8e, 0x13, 0xa7, 0x69, 0x26, 0xf7, 0xf6, 0x67, 0xd4, 0x7b, 0x35, 0x37, 0xba, 0x75, 0x42, + 0x61, 0x51, 0xa9, 0xc5, 0x46, 0x05, 0x04, 0x45, 0x80, 0xd4, 0x34, 0x95, 0x1a, 0x41, 0x4b, 0xe4, 0x56, 0x42, 0x02, 0xb1, 0x98, 0xd8, 0x53, 0xdb, 0x6a, 0xea, 0x89, 0xc6, 0x93, 0xaa, 0x79, 0x0b, - 0x96, 0xf0, 0x02, 0x3c, 0x4b, 0x97, 0x5d, 0xb2, 0x8a, 0xa0, 0x59, 0xf0, 0x0e, 0x5d, 0xa1, 0x19, - 0x4f, 0x12, 0xc7, 0x0d, 0x15, 0x95, 0x58, 0xcd, 0xf8, 0x7c, 0xdf, 0x39, 0x73, 0x66, 0xbe, 0xef, - 0x24, 0xa0, 0xdc, 0x65, 0x94, 0x53, 0xab, 0xdb, 0x8e, 0x08, 0x3b, 0x0d, 0x1c, 0x62, 0x85, 0xd4, - 0x25, 0xa6, 0x0c, 0xc2, 0xe2, 0x38, 0x5a, 0xfe, 0x6f, 0x44, 0x73, 0xe8, 0xc9, 0x09, 0x0d, 0xad, - 0x78, 0x89, 0x79, 0xe5, 0x3b, 0xe9, 0x1a, 0x3e, 0xc1, 0x1d, 0xee, 0x3b, 0x3e, 0x71, 0x8e, 0x15, - 0x65, 0x35, 0x4d, 0x51, 0xab, 0x82, 0x57, 0x3c, 0xea, 0xd1, 0x98, 0x22, 0x76, 0x71, 0x74, 0xed, - 0xb3, 0x06, 0x96, 0x76, 0x44, 0x91, 0x83, 0x98, 0xbc, 0x4f, 0x5d, 0x02, 0xef, 0x02, 0x5d, 0xac, - 0x48, 0xab, 0x6a, 0xeb, 0xa5, 0xad, 0x45, 0x73, 0x5c, 0xd2, 0x14, 0x61, 0x5b, 0x82, 0xf0, 0x01, - 0x28, 0xa8, 0x1c, 0x94, 0x95, 0xbc, 0x7f, 0x52, 0x3c, 0x85, 0xda, 0x23, 0x1a, 0x34, 0xc1, 0x9c, - 0x3c, 0x2a, 0x42, 0xb9, 0x6a, 0x2e, 0x95, 0xf0, 0x52, 0x5e, 0x47, 0xc2, 0xb6, 0x62, 0xad, 0xfd, - 0xc8, 0xc5, 0x7d, 0xc0, 0xe7, 0x20, 0xdb, 0x6c, 0xc8, 0x6e, 0x8a, 0xf5, 0xcd, 0xab, 0x41, 0x65, - 0xdd, 0x0b, 0xb8, 0xdf, 0x6b, 0x9b, 0x0e, 0x3d, 0xb1, 0x7c, 0x1c, 0xf9, 0x81, 0x43, 0x59, 0xd7, - 0x72, 0x68, 0x18, 0xf5, 0x3a, 0x16, 0xef, 0x77, 0x49, 0x24, 0x1b, 0x68, 0x36, 0xec, 0x6c, 0xb3, - 0x01, 0xa1, 0xba, 0x8d, 0xe8, 0xb2, 0xa8, 0x9a, 0xff, 0x1f, 0x14, 0x5b, 0x98, 0xf1, 0x80, 0x07, - 0x34, 0x44, 0xf3, 0x12, 0x98, 0x04, 0x20, 0x02, 0x85, 0x6d, 0xd7, 0x65, 0x24, 0x12, 0x9d, 0x0a, - 0x6c, 0xf4, 0x09, 0x0d, 0x00, 0x1a, 0x98, 0x63, 0x87, 0x84, 0x9c, 0x30, 0xa4, 0x4b, 0x30, 0x11, - 0x81, 0xfb, 0x60, 0xf1, 0x10, 0x7b, 0x1e, 0x71, 0x55, 0x02, 0x89, 0x50, 0x5e, 0xde, 0xf5, 0x5e, - 0xea, 0x71, 0xcc, 0x14, 0x6d, 0x37, 0xe4, 0xac, 0x6f, 0xa7, 0x93, 0xe1, 0x7d, 0xa0, 0xef, 0x11, - 0x8e, 0xd1, 0x9c, 0x2c, 0xf2, 0x6f, 0xba, 0x88, 0xc0, 0xe2, 0x4c, 0x49, 0x83, 0x35, 0x50, 0xb4, - 0xf1, 0x11, 0x6f, 0x86, 0x2e, 0x39, 0x43, 0x05, 0xa9, 0xca, 0xb2, 0xa9, 0x7c, 0x34, 0x06, 0xea, - 0xf3, 0xe7, 0x83, 0x4a, 0xe6, 0x62, 0x50, 0xd1, 0xec, 0x09, 0xbb, 0x5c, 0x07, 0x2b, 0xb3, 0x5a, - 0x82, 0x4b, 0x20, 0x77, 0x4c, 0xfa, 0xf1, 0xe3, 0xdb, 0x62, 0x0b, 0x57, 0x40, 0xfe, 0x14, 0x77, - 0x7a, 0xa3, 0x07, 0x8d, 0x3f, 0x9e, 0x65, 0x9f, 0x6a, 0xe5, 0x27, 0xa0, 0x38, 0xee, 0xe8, 0x36, - 0x89, 0x6b, 0x5f, 0x0a, 0xa0, 0x94, 0xb0, 0x0c, 0xdc, 0x03, 0xfa, 0xab, 0x20, 0x74, 0x95, 0xe4, - 0xb5, 0xab, 0x41, 0xe5, 0xf1, 0x4d, 0x92, 0x63, 0x8f, 0x84, 0xdc, 0x8a, 0x38, 0xeb, 0x39, 0x3c, - 0x32, 0x55, 0x11, 0x51, 0xc0, 0x96, 0x65, 0xe0, 0x82, 0xf4, 0x4f, 0x7c, 0xaa, 0x70, 0x04, 0x9a, - 0x58, 0x57, 0xe9, 0x3b, 0x3a, 0x18, 0x02, 0xfd, 0x10, 0x7b, 0x11, 0xd2, 0xab, 0x39, 0xe1, 0x15, - 0xb1, 0x4f, 0xba, 0x21, 0x3f, 0xed, 0x86, 0xf7, 0xd7, 0xd5, 0x5e, 0x94, 0x42, 0x6d, 0xcc, 0x1e, - 0x85, 0x99, 0xa2, 0xd7, 0x75, 0x21, 0xc7, 0x75, 0xe9, 0x1f, 0x4d, 0x49, 0x5f, 0xfd, 0x45, 0xc5, - 0xb4, 0x03, 0x20, 0xd0, 0x5b, 0x94, 0x71, 0x29, 0x7e, 0xde, 0x96, 0x7b, 0x61, 0xda, 0x03, 0xea, - 0x1c, 0x13, 0xde, 0xc2, 0xdc, 0x47, 0xcb, 0xb1, 0x69, 0x27, 0x11, 0xb8, 0x09, 0x0a, 0x6f, 0x49, - 0xe0, 0xf9, 0x3c, 0x92, 0xa3, 0x50, 0xda, 0x82, 0x89, 0xc3, 0x14, 0x62, 0x8f, 0x28, 0x70, 0x13, - 0x2c, 0xef, 0x86, 0xb8, 0xdd, 0x21, 0x87, 0xd8, 0x7b, 0x73, 0x4a, 0x18, 0x0b, 0x5c, 0x82, 0x8a, - 0x55, 0x6d, 0x7d, 0xde, 0xbe, 0x0e, 0xc0, 0x1a, 0xc8, 0xb7, 0x18, 0x3d, 0xeb, 0xa3, 0x92, 0xac, - 0xbc, 0x9a, 0xa8, 0xbc, 0x43, 0xc3, 0x90, 0x38, 0x5c, 0xc2, 0x3b, 0x34, 0x3c, 0x0a, 0x3c, 0xf5, - 0x14, 0x71, 0x06, 0xac, 0x81, 0x82, 0xa2, 0xa0, 0xbf, 0x64, 0x72, 0xd2, 0xfe, 0xea, 0xfe, 0x8a, - 0xa0, 0x12, 0x47, 0x7c, 0xf8, 0x02, 0x94, 0x5f, 0x53, 0x07, 0x77, 0x3a, 0x7d, 0x9b, 0x78, 0x41, - 0xc4, 0x09, 0x23, 0xee, 0x76, 0x74, 0x10, 0xb8, 0xc4, 0xc1, 0x0c, 0xfd, 0x2d, 0x9b, 0xbd, 0x81, - 0x01, 0x1b, 0x60, 0x61, 0x57, 0xcc, 0x73, 0x97, 0x05, 0x11, 0x91, 0x2a, 0x2c, 0xa9, 0x9f, 0x38, - 0x35, 0x4c, 0xd3, 0xa8, 0x3a, 0x3e, 0x95, 0x33, 0x3d, 0x8d, 0x0b, 0xb7, 0x9a, 0xc6, 0x0f, 0xbf, - 0x3d, 0x8d, 0x56, 0x72, 0xa8, 0x66, 0xbe, 0x91, 0x2a, 0xf1, 0x27, 0x06, 0xb5, 0xbe, 0x77, 0xfe, - 0xdd, 0xc8, 0x9c, 0x5f, 0x1a, 0xda, 0xc5, 0xa5, 0xa1, 0x7d, 0xbb, 0x34, 0xb4, 0x8f, 0x43, 0x23, - 0xf3, 0x69, 0x68, 0x64, 0x2e, 0x86, 0x46, 0xe6, 0xeb, 0xd0, 0xc8, 0xbc, 0xdb, 0xb8, 0x69, 0x50, - 0x53, 0xff, 0x50, 0xed, 0x39, 0x19, 0x78, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x3e, 0x64, - 0x61, 0x22, 0x07, 0x00, 0x00, + 0x96, 0xf0, 0x12, 0x3c, 0x47, 0x97, 0x5d, 0xb2, 0x8a, 0xa0, 0x59, 0xf2, 0x06, 0x5d, 0xa1, 0x19, + 0x4f, 0x12, 0xc7, 0x0d, 0x15, 0x95, 0x58, 0xe5, 0xf8, 0x9c, 0xef, 0x7c, 0x73, 0x66, 0xbe, 0xef, + 0x28, 0xa0, 0xdc, 0x61, 0x94, 0x53, 0xab, 0xd3, 0x8a, 0x08, 0x3b, 0x0d, 0x1c, 0x62, 0x85, 0xd4, + 0x25, 0xa6, 0x4c, 0xc2, 0xe2, 0x28, 0x5b, 0xae, 0x0c, 0x61, 0x0e, 0x3d, 0x39, 0xa1, 0xa1, 0x47, + 0x3d, 0x6a, 0xc5, 0x61, 0x8c, 0x2d, 0xdf, 0x49, 0xf3, 0xf8, 0x04, 0xb7, 0xb9, 0xef, 0xf8, 0xc4, + 0x39, 0x56, 0x90, 0xe5, 0x34, 0x44, 0xfd, 0xaa, 0xf2, 0x92, 0x20, 0x8d, 0x21, 0x22, 0x8a, 0xb3, + 0x2b, 0x9f, 0x35, 0xb0, 0xb0, 0x2d, 0x48, 0x0e, 0x62, 0xf0, 0x3e, 0x75, 0x09, 0xbc, 0x0b, 0x74, + 0xf1, 0x8b, 0xb4, 0xaa, 0xb6, 0x5a, 0xda, 0x98, 0x37, 0x47, 0x94, 0xa6, 0x48, 0xdb, 0xb2, 0x08, + 0x1f, 0x80, 0x82, 0xea, 0x41, 0x59, 0x89, 0xfb, 0x37, 0x85, 0x53, 0x55, 0x7b, 0x08, 0x83, 0x26, + 0x98, 0x91, 0x47, 0x45, 0x28, 0x57, 0xcd, 0xa5, 0x1a, 0x76, 0xe5, 0x75, 0x64, 0xd9, 0x56, 0xa8, + 0x95, 0x1f, 0xb9, 0x78, 0x0e, 0xf8, 0x1c, 0x64, 0x1b, 0x75, 0x39, 0x4d, 0xb1, 0xb6, 0x7e, 0xd5, + 0xaf, 0xac, 0x7a, 0x01, 0xf7, 0xbb, 0x2d, 0xd3, 0xa1, 0x27, 0x96, 0x8f, 0x23, 0x3f, 0x70, 0x28, + 0xeb, 0x58, 0x0e, 0x0d, 0xa3, 0x6e, 0xdb, 0xe2, 0xbd, 0x0e, 0x89, 0xe4, 0x00, 0x8d, 0xba, 0x9d, + 0x6d, 0xd4, 0x21, 0x54, 0xb7, 0x11, 0x53, 0x16, 0xd5, 0xf0, 0xff, 0x83, 0x62, 0x13, 0x33, 0x1e, + 0xf0, 0x80, 0x86, 0x68, 0x56, 0x16, 0xc6, 0x09, 0x88, 0x40, 0x61, 0xcb, 0x75, 0x19, 0x89, 0xc4, + 0xa4, 0xa2, 0x36, 0xfc, 0x84, 0x06, 0x00, 0x75, 0xcc, 0xb1, 0x43, 0x42, 0x4e, 0x18, 0xd2, 0x65, + 0x31, 0x91, 0x81, 0xfb, 0x60, 0xfe, 0x10, 0x7b, 0x1e, 0x71, 0x55, 0x03, 0x89, 0x50, 0x5e, 0xde, + 0xf5, 0x5e, 0xea, 0x71, 0xcc, 0x14, 0x6c, 0x27, 0xe4, 0xac, 0x67, 0xa7, 0x9b, 0xe1, 0x7d, 0xa0, + 0xef, 0x11, 0x8e, 0xd1, 0x8c, 0x24, 0xf9, 0x2f, 0x4d, 0x22, 0x6a, 0x71, 0xa7, 0x84, 0xc1, 0x17, + 0xa0, 0x68, 0xe3, 0x23, 0xde, 0x08, 0x5d, 0x72, 0x86, 0x0a, 0x52, 0x95, 0x7f, 0xcc, 0xb1, 0xa5, + 0xcc, 0x51, 0xb1, 0x36, 0x7b, 0xde, 0xaf, 0x64, 0x2e, 0xfa, 0x15, 0xcd, 0x1e, 0x77, 0x94, 0x6b, + 0x60, 0x69, 0xda, 0x58, 0x70, 0x01, 0xe4, 0x8e, 0x49, 0x2f, 0x16, 0xc0, 0x16, 0x21, 0x5c, 0x02, + 0xf9, 0x53, 0xdc, 0xee, 0x0e, 0x1f, 0x35, 0xfe, 0x78, 0x96, 0x7d, 0xaa, 0x95, 0x9f, 0x80, 0xe2, + 0x68, 0xaa, 0xdb, 0x34, 0xae, 0x7c, 0x29, 0x80, 0x52, 0xc2, 0x36, 0x70, 0x0f, 0xe8, 0xaf, 0x82, + 0xd0, 0x55, 0xb2, 0x6f, 0x5e, 0xf5, 0x2b, 0x8f, 0x6f, 0x92, 0x1d, 0x7b, 0x24, 0xe4, 0x56, 0xc4, + 0x59, 0xd7, 0xe1, 0x91, 0xa9, 0x48, 0x04, 0x81, 0x2d, 0x69, 0xe0, 0x9c, 0xf4, 0x50, 0x7c, 0xaa, + 0x70, 0x05, 0x1a, 0xdb, 0x57, 0x69, 0x3c, 0x3c, 0x18, 0x02, 0xfd, 0x10, 0x7b, 0x11, 0xd2, 0xab, + 0x39, 0xe1, 0x17, 0x11, 0x27, 0x1d, 0x91, 0x9f, 0x74, 0xc4, 0xfb, 0xeb, 0x8a, 0xcf, 0x4b, 0xb1, + 0xd6, 0xa6, 0xaf, 0xc3, 0x54, 0xe1, 0x6b, 0xba, 0x90, 0xe3, 0xba, 0xfc, 0x8f, 0x26, 0xe4, 0xaf, + 0xfe, 0x82, 0x31, 0xed, 0x02, 0x08, 0xf4, 0x26, 0x65, 0x5c, 0x1a, 0x20, 0x6f, 0xcb, 0x58, 0x18, + 0xf7, 0x80, 0x3a, 0xc7, 0x84, 0x37, 0x31, 0xf7, 0xd1, 0x62, 0x6c, 0xdc, 0x71, 0x06, 0xae, 0x83, + 0xc2, 0x5b, 0x12, 0x78, 0x3e, 0x8f, 0xe4, 0x3a, 0x94, 0x36, 0x60, 0xe2, 0x30, 0x55, 0xb1, 0x87, + 0x10, 0xb8, 0x0e, 0x16, 0x77, 0x42, 0xdc, 0x6a, 0x93, 0x43, 0xec, 0xbd, 0x39, 0x25, 0x8c, 0x05, + 0x2e, 0x41, 0xc5, 0xaa, 0xb6, 0x3a, 0x6b, 0x5f, 0x2f, 0xc0, 0x4d, 0x90, 0x6f, 0x32, 0x7a, 0xd6, + 0x43, 0x25, 0xc9, 0xbc, 0x9c, 0x60, 0xde, 0xa6, 0x61, 0x48, 0x1c, 0x2e, 0xcb, 0xdb, 0x34, 0x3c, + 0x0a, 0x3c, 0xf5, 0x14, 0x71, 0x07, 0xdc, 0x04, 0x05, 0x05, 0x41, 0x7f, 0xc9, 0xe6, 0xe4, 0x0a, + 0xa8, 0xfb, 0x2b, 0x80, 0x6a, 0x1c, 0xe2, 0xe1, 0x4b, 0x50, 0x7e, 0x4d, 0x1d, 0xdc, 0x6e, 0xf7, + 0x6c, 0xe2, 0x05, 0x11, 0x27, 0x8c, 0xb8, 0x5b, 0xd1, 0x41, 0xe0, 0x12, 0x07, 0x33, 0xf4, 0xb7, + 0x1c, 0xf6, 0x06, 0x04, 0xdc, 0x05, 0x73, 0x3b, 0x62, 0xa7, 0x3b, 0x2c, 0x88, 0x88, 0x54, 0x61, + 0x41, 0x4e, 0x50, 0x4e, 0x2e, 0xd4, 0x24, 0x42, 0x8d, 0x90, 0xea, 0x9b, 0xdc, 0xca, 0xb9, 0x5b, + 0x6f, 0xe5, 0x87, 0xdf, 0xde, 0x4a, 0x2b, 0xb9, 0x5c, 0x53, 0xdf, 0x4a, 0x51, 0xfc, 0x89, 0x85, + 0xad, 0xed, 0x9d, 0x7f, 0x37, 0x32, 0xe7, 0x97, 0x86, 0x76, 0x71, 0x69, 0x68, 0xdf, 0x2e, 0x0d, + 0xed, 0xe3, 0xc0, 0xc8, 0x7c, 0x1a, 0x18, 0x99, 0x8b, 0x81, 0x91, 0xf9, 0x3a, 0x30, 0x32, 0xef, + 0xd6, 0x6e, 0x5a, 0xd8, 0xd4, 0xbf, 0x55, 0x6b, 0x46, 0x26, 0x1e, 0xfe, 0x0c, 0x00, 0x00, 0xff, + 0xff, 0xf9, 0x3e, 0x1b, 0x2c, 0x32, 0x07, 0x00, 0x00, } func (m *CheckServiceNode) Marshal() (dAtA []byte, err error) { diff --git a/proto/pbservice/node.proto b/proto/pbservice/node.proto index 2600e7b14..c07e809a8 100644 --- a/proto/pbservice/node.proto +++ b/proto/pbservice/node.proto @@ -4,7 +4,7 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; -import "proto/pbcommon/common.proto"; +import "proto/pbcommongogo/common.proto"; import "proto/pbservice/healthcheck.proto"; import "proto/pbservice/service.proto"; @@ -42,7 +42,7 @@ message Node { map Meta = 6; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - common.RaftIndex RaftIndex = 7 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + commongogo.RaftIndex RaftIndex = 7 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; } // NodeService is a service provided by a node @@ -109,8 +109,8 @@ message NodeService { bool LocallyRegisteredAsSidecar = 13; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - common.EnterpriseMeta EnterpriseMeta = 16 [(gogoproto.nullable) = false]; + commongogo.EnterpriseMeta EnterpriseMeta = 16 [(gogoproto.nullable) = false]; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - common.RaftIndex RaftIndex = 14 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + commongogo.RaftIndex RaftIndex = 14 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; } diff --git a/proto/pbservice/service.pb.go b/proto/pbservice/service.pb.go index 9bd4b381c..ca5761fdc 100644 --- a/proto/pbservice/service.pb.go +++ b/proto/pbservice/service.pb.go @@ -9,7 +9,7 @@ import ( types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" github_com_hashicorp_consul_agent_structs "github.com/hashicorp/consul/agent/structs" - pbcommon "github.com/hashicorp/consul/proto/pbcommon" + pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" io "io" math "math" math_bits "math/bits" @@ -483,7 +483,7 @@ type ServiceDefinition struct { // mog: func-to=ConnectProxyConfigPtrToStructs func-from=NewConnectProxyConfigPtrFromStructs Proxy *ConnectProxyConfig `protobuf:"bytes,14,opt,name=Proxy,proto3" json:"Proxy,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommon.EnterpriseMeta `protobuf:"bytes,17,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,17,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` // mog: func-to=ServiceConnectPtrToStructs func-from=NewServiceConnectPtrFromStructs Connect *ServiceConnect `protobuf:"bytes,15,opt,name=Connect,proto3" json:"Connect,omitempty"` } @@ -620,83 +620,83 @@ func init() { func init() { proto.RegisterFile("proto/pbservice/service.proto", fileDescriptor_cbb99233b75fb80b) } var fileDescriptor_cbb99233b75fb80b = []byte{ - // 1212 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x96, 0xcd, 0x6e, 0x1b, 0x37, - 0x10, 0xc7, 0xb5, 0xfa, 0xb0, 0x24, 0xda, 0xf5, 0x07, 0xad, 0xba, 0x5b, 0x37, 0x91, 0x1d, 0xa1, - 0x08, 0x8c, 0xd6, 0x90, 0x12, 0x1b, 0xa9, 0x9b, 0x00, 0x29, 0x50, 0x5b, 0x6e, 0x11, 0x24, 0x4e, - 0xd4, 0xb5, 0x8a, 0xa0, 0x05, 0x7a, 0xa0, 0x56, 0xf4, 0x6a, 0x61, 0x69, 0x29, 0x90, 0x94, 0x1b, - 0xbd, 0x45, 0x8f, 0xbd, 0xf6, 0xd0, 0x7b, 0x1f, 0xc3, 0x40, 0x2f, 0x39, 0xf6, 0x64, 0xb4, 0xf6, - 0x5b, 0xf8, 0x54, 0x70, 0xc8, 0x5d, 0xaf, 0x76, 0xb7, 0x46, 0x9a, 0x93, 0xc8, 0xf9, 0xcf, 0x0c, - 0xb9, 0x9c, 0x1f, 0x87, 0x42, 0x77, 0xc7, 0x9c, 0x49, 0xd6, 0x1a, 0xf7, 0x04, 0xe5, 0x67, 0xbe, - 0x4b, 0x5b, 0xe6, 0xb7, 0x09, 0x76, 0x5c, 0x8d, 0x84, 0xf5, 0x3b, 0x1e, 0x63, 0xde, 0x90, 0xb6, - 0x40, 0xe8, 0x4d, 0x4e, 0x5a, 0x42, 0xf2, 0x89, 0x2b, 0xb5, 0xe3, 0xfa, 0x27, 0x61, 0x1e, 0x97, - 0x8d, 0x46, 0x2c, 0x68, 0xe9, 0x1f, 0x23, 0xde, 0x4b, 0x2e, 0x32, 0xa0, 0x64, 0x28, 0x07, 0xee, - 0x80, 0xba, 0xa7, 0xc6, 0xa5, 0xe6, 0x31, 0x8f, 0x69, 0x37, 0x35, 0xd2, 0xd6, 0xc6, 0xef, 0x25, - 0x84, 0x0f, 0x58, 0x10, 0x50, 0x57, 0x76, 0x38, 0x7b, 0x33, 0x3d, 0x60, 0xc1, 0x89, 0xef, 0xe1, - 0x2f, 0xd0, 0x5a, 0x9b, 0x0a, 0xe9, 0x07, 0x44, 0xfa, 0x2c, 0x38, 0xd6, 0x49, 0x5f, 0x92, 0x11, - 0xb5, 0xad, 0x4d, 0x6b, 0xab, 0xea, 0xfc, 0x87, 0x8a, 0x77, 0x50, 0x2d, 0xad, 0x3c, 0x6b, 0xdb, - 0x79, 0x88, 0xca, 0xd4, 0xf0, 0x03, 0xb4, 0xfa, 0x82, 0xb9, 0x64, 0x68, 0x2c, 0x5f, 0xf7, 0xfb, - 0x9c, 0x0a, 0x61, 0x17, 0x20, 0x24, 0x4b, 0xc2, 0x9f, 0xa1, 0xe5, 0xb8, 0xb9, 0xc3, 0xb8, 0xb4, - 0x8b, 0x9b, 0xd6, 0x56, 0xc9, 0x49, 0xd9, 0xf1, 0x23, 0x34, 0xa7, 0xbf, 0xc9, 0x2e, 0x6d, 0x5a, - 0x5b, 0xf3, 0x3b, 0x1f, 0x35, 0xf5, 0x29, 0x37, 0xc3, 0x53, 0x6e, 0x1e, 0xc3, 0x29, 0xef, 0x17, - 0xcf, 0x2f, 0x36, 0x2c, 0xc7, 0x38, 0xe3, 0x3d, 0x54, 0xfd, 0x7e, 0x2c, 0x24, 0xa7, 0x64, 0x24, - 0xec, 0xb9, 0xcd, 0xc2, 0xd6, 0xfc, 0xce, 0x6a, 0x33, 0x3a, 0xde, 0x66, 0xa8, 0x41, 0x54, 0xce, - 0xb9, 0xf1, 0xc5, 0x6d, 0x34, 0x7f, 0x44, 0xc5, 0xe0, 0x5b, 0x22, 0xe9, 0xcf, 0x64, 0x6a, 0x97, - 0x61, 0xd1, 0x3b, 0xb1, 0xd0, 0x98, 0xaa, 0xd7, 0x32, 0x39, 0xe2, 0x61, 0x6a, 0xd7, 0x87, 0x6f, - 0xc6, 0x4c, 0x50, 0xbb, 0x62, 0x76, 0x7d, 0x93, 0x40, 0x0b, 0x33, 0xb1, 0xc6, 0x19, 0x3f, 0x47, - 0xc5, 0x23, 0xd6, 0xa7, 0x76, 0x55, 0x9d, 0xdd, 0xfe, 0xde, 0xf5, 0xc5, 0xc6, 0xae, 0xe7, 0xcb, - 0xc1, 0xa4, 0xd7, 0x74, 0xd9, 0xa8, 0x35, 0x20, 0x62, 0xe0, 0xbb, 0x8c, 0x8f, 0x5b, 0x2e, 0x0b, - 0xc4, 0x64, 0xd8, 0x22, 0x1e, 0x0d, 0xa4, 0xa1, 0x4c, 0x34, 0xa1, 0xfe, 0x2a, 0xdc, 0x81, 0x24, - 0xf8, 0x18, 0x2d, 0x77, 0x39, 0x09, 0xc4, 0x98, 0x70, 0x1a, 0x68, 0x3a, 0x6c, 0x04, 0xbb, 0xb9, - 0x17, 0xdb, 0x4d, 0xd2, 0x65, 0x66, 0x5f, 0xa9, 0x04, 0x0a, 0xac, 0x78, 0x89, 0x8e, 0x99, 0x7b, - 0x4a, 0x65, 0x87, 0xc8, 0x81, 0x3d, 0xaf, 0xc1, 0xca, 0x56, 0x1b, 0x7f, 0x16, 0x51, 0x25, 0x3c, - 0x64, 0xbc, 0x85, 0x96, 0x62, 0x24, 0x75, 0xa7, 0xe3, 0x10, 0xcb, 0xa4, 0x39, 0xc1, 0xa3, 0x42, - 0x54, 0x8c, 0x89, 0x4b, 0x33, 0x78, 0x8c, 0xb4, 0x44, 0x76, 0x80, 0xbe, 0x90, 0xca, 0x0e, 0xb4, - 0xd7, 0x11, 0x6a, 0x13, 0x49, 0x5c, 0x1a, 0x48, 0xca, 0x81, 0xc0, 0xaa, 0x13, 0xb3, 0x44, 0x9c, - 0xee, 0xfb, 0x41, 0x3f, 0xc4, 0xba, 0x04, 0x5e, 0x29, 0x3b, 0xfe, 0x14, 0x7d, 0x10, 0xd9, 0x00, - 0xe8, 0x39, 0x00, 0x7a, 0xd6, 0x18, 0xa3, 0xb9, 0xfc, 0x7f, 0x68, 0x4e, 0x40, 0x59, 0x79, 0x3f, - 0x28, 0x1f, 0xa0, 0xd5, 0x03, 0x1a, 0x48, 0x4e, 0x86, 0x43, 0xe3, 0x35, 0xe1, 0xb4, 0x0f, 0xb0, - 0x55, 0x9c, 0x2c, 0x29, 0xba, 0xda, 0x6a, 0xff, 0xb1, 0x52, 0xa3, 0xd8, 0xd5, 0x9e, 0x95, 0x32, - 0x22, 0x00, 0xe8, 0xf9, 0xcc, 0x08, 0xc0, 0x74, 0xb6, 0xc4, 0x1d, 0xc2, 0xa5, 0xaf, 0x06, 0xf6, - 0x42, 0xaa, 0xc4, 0x91, 0xd6, 0x08, 0xd0, 0xa2, 0x41, 0xcc, 0xf4, 0x3e, 0xbc, 0x86, 0xe6, 0x5e, - 0x12, 0xe9, 0x9f, 0x69, 0x92, 0x2a, 0x8e, 0x99, 0xe1, 0x36, 0x5a, 0x3c, 0xf6, 0xfb, 0xd4, 0x25, - 0xdc, 0x04, 0x00, 0x0b, 0xb3, 0x87, 0x67, 0x94, 0x36, 0x3d, 0xf1, 0x03, 0xc8, 0xef, 0x24, 0x62, - 0x1a, 0x3f, 0xa0, 0x85, 0xf8, 0xad, 0x55, 0xab, 0x1d, 0xa8, 0xd6, 0x2c, 0xc2, 0xd5, 0xf4, 0x0c, - 0x3f, 0x44, 0x25, 0x75, 0x0a, 0xc2, 0xce, 0x43, 0xc7, 0xf9, 0x30, 0x75, 0xeb, 0x95, 0x6a, 0x4a, - 0xa3, 0x3d, 0x1b, 0x7f, 0x58, 0x08, 0xdd, 0x68, 0xb8, 0x81, 0x16, 0x5e, 0xf8, 0x42, 0xd2, 0x80, - 0x72, 0xa0, 0xc8, 0x02, 0x8a, 0x66, 0x6c, 0x18, 0xa3, 0x22, 0x94, 0x41, 0x5f, 0x02, 0x18, 0x47, - 0xf8, 0xa9, 0x09, 0x04, 0x16, 0x62, 0xf8, 0x85, 0x46, 0xbc, 0x8e, 0x2a, 0x1d, 0x05, 0x9a, 0xcb, - 0x86, 0x06, 0xf7, 0x68, 0xae, 0xae, 0x4d, 0x87, 0x70, 0x41, 0xfb, 0xdf, 0x70, 0x36, 0x82, 0xef, - 0x01, 0xd6, 0x2b, 0x4e, 0xd2, 0xdc, 0x38, 0x41, 0x2b, 0x29, 0xde, 0xf0, 0x77, 0xa6, 0x75, 0xc1, - 0x45, 0xde, 0x7f, 0x7a, 0x7d, 0xb1, 0xf1, 0xf8, 0xdd, 0x5b, 0x57, 0x2c, 0xdd, 0x4d, 0x03, 0x6b, - 0x48, 0xb4, 0x96, 0xdd, 0x9d, 0x14, 0x33, 0xaf, 0x26, 0xb2, 0xc7, 0x26, 0x41, 0x3f, 0xe3, 0xb4, - 0x32, 0x35, 0x7c, 0x1f, 0x2d, 0xb6, 0x7d, 0x32, 0xa4, 0xfd, 0xb6, 0xcf, 0xa9, 0x2b, 0x87, 0x53, - 0x38, 0xbf, 0x8a, 0x93, 0xb0, 0x36, 0x7e, 0x2b, 0xa3, 0x95, 0x14, 0x11, 0xf8, 0x08, 0x15, 0x9f, - 0xfb, 0x41, 0xdf, 0x7c, 0xde, 0xe3, 0xeb, 0x8b, 0x8d, 0x47, 0xef, 0xfe, 0x79, 0x26, 0x9d, 0x4a, - 0xe0, 0x40, 0x1a, 0xbc, 0x88, 0xf2, 0xd1, 0xab, 0x9a, 0x7f, 0xd6, 0x56, 0x25, 0x8d, 0x35, 0x2a, - 0x18, 0x2b, 0x5b, 0x97, 0x78, 0xc2, 0x2e, 0x6e, 0x16, 0x94, 0x4d, 0x8d, 0xb1, 0x8d, 0xca, 0xb3, - 0x8d, 0x28, 0x9c, 0x62, 0x82, 0x96, 0xba, 0xc4, 0xf3, 0x68, 0xd8, 0x90, 0xa8, 0xb0, 0x97, 0x01, - 0xc2, 0x87, 0xb7, 0x91, 0xde, 0x4c, 0xc4, 0x1c, 0x06, 0x92, 0x4f, 0x0d, 0xa0, 0xc9, 0x7c, 0xf8, - 0x09, 0x2a, 0x1e, 0x51, 0x49, 0xcc, 0x73, 0x7a, 0xff, 0xd6, 0xbc, 0xca, 0x11, 0x92, 0x39, 0x10, - 0x03, 0xcc, 0xaa, 0x0a, 0x95, 0xa1, 0x42, 0x30, 0x56, 0xed, 0x37, 0xd6, 0x54, 0xb0, 0x6e, 0xbf, - 0x33, 0xbd, 0xa4, 0xa4, 0x39, 0xd4, 0xfd, 0xae, 0x16, 0x5b, 0x10, 0xec, 0xea, 0x85, 0x08, 0x2f, - 0x13, 0x18, 0xf0, 0x76, 0x74, 0x2f, 0xab, 0xb0, 0xc7, 0xcc, 0x90, 0xe8, 0xb6, 0x6e, 0xa3, 0xf2, - 0x6b, 0xea, 0x7b, 0x03, 0x29, 0xcc, 0xbb, 0x88, 0x63, 0xee, 0x46, 0x71, 0x42, 0x17, 0x5c, 0x43, - 0xa5, 0x2e, 0x3b, 0xa5, 0x81, 0xe9, 0x65, 0x7a, 0x82, 0xb7, 0xd1, 0xca, 0x61, 0x40, 0x7a, 0x43, - 0xda, 0x25, 0xde, 0xab, 0x33, 0xca, 0xb9, 0xdf, 0xa7, 0xd0, 0xba, 0x2a, 0x4e, 0x5a, 0xc0, 0xbb, - 0xa8, 0xa4, 0xdf, 0xe1, 0x45, 0x58, 0xef, 0x6e, 0x7c, 0x7b, 0xa9, 0x3f, 0x71, 0x8e, 0xf6, 0x55, - 0x2d, 0xec, 0x50, 0x3d, 0x47, 0x63, 0xee, 0x0b, 0x0a, 0x05, 0x58, 0x81, 0xe8, 0xb5, 0xa6, 0xf9, - 0x0b, 0x39, 0xab, 0x9a, 0x13, 0x49, 0xc4, 0xe0, 0x5d, 0x54, 0x36, 0x4b, 0xd8, 0x4b, 0x10, 0xfe, - 0x71, 0xba, 0x7e, 0xc6, 0xc1, 0x09, 0x3d, 0xd7, 0x7f, 0x42, 0xb5, 0x2c, 0x40, 0xf0, 0x32, 0x2a, - 0x9c, 0xd2, 0xa9, 0x79, 0xb4, 0xd5, 0x10, 0xb7, 0x50, 0xe9, 0x8c, 0x0c, 0x27, 0xfa, 0x65, 0xce, - 0x4c, 0x6e, 0x52, 0x38, 0xda, 0xef, 0x49, 0xfe, 0x4b, 0x6b, 0x7d, 0x0f, 0x55, 0x23, 0x4e, 0x32, - 0x72, 0xd6, 0xe2, 0x39, 0xab, 0xb1, 0xc0, 0xc6, 0x57, 0x51, 0xff, 0x0f, 0xf1, 0x8f, 0x5d, 0x0c, - 0x6b, 0xf6, 0x62, 0x84, 0xe4, 0xe5, 0x6f, 0xc8, 0x6b, 0x3c, 0x8d, 0x2a, 0xaf, 0x02, 0x3b, 0x44, - 0x08, 0x3f, 0xf0, 0x4c, 0xf7, 0x08, 0xa7, 0x4a, 0x79, 0x4d, 0x78, 0xa0, 0x14, 0x1d, 0x1b, 0x4e, - 0xf7, 0x8f, 0xce, 0xff, 0xa9, 0xe7, 0xce, 0x2f, 0xeb, 0xd6, 0xdb, 0xcb, 0xba, 0xf5, 0xf7, 0x65, - 0xdd, 0xfa, 0xe5, 0xaa, 0x9e, 0xfb, 0xf5, 0xaa, 0x9e, 0x7b, 0x7b, 0x55, 0xcf, 0xfd, 0x75, 0x55, - 0xcf, 0xfd, 0xf8, 0xf9, 0x6d, 0xcd, 0x21, 0xf1, 0x5f, 0xbf, 0x37, 0x07, 0x86, 0xdd, 0x7f, 0x03, - 0x00, 0x00, 0xff, 0xff, 0x93, 0xfb, 0xcc, 0xfb, 0x6a, 0x0c, 0x00, 0x00, + // 1216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x96, 0xdf, 0x6e, 0x13, 0x47, + 0x17, 0xc0, 0xb3, 0x89, 0x1d, 0xdb, 0x27, 0x7c, 0x21, 0x19, 0xf2, 0xd1, 0xad, 0x05, 0x4e, 0xb0, + 0x2a, 0x14, 0xb5, 0xc8, 0x06, 0x22, 0x4a, 0x41, 0xa2, 0x52, 0x13, 0xa7, 0x2d, 0x82, 0x80, 0xbb, + 0x71, 0x85, 0x5a, 0xa9, 0x17, 0xe3, 0xf5, 0x64, 0xbd, 0x8a, 0xbd, 0x63, 0xcd, 0x8c, 0x53, 0xfc, + 0x16, 0xbd, 0xec, 0x0b, 0x70, 0xdf, 0xc7, 0x40, 0xea, 0x0d, 0x97, 0xbd, 0x8a, 0x5a, 0xf2, 0x16, + 0x5c, 0x55, 0x73, 0x66, 0x76, 0xbd, 0xde, 0xdd, 0x22, 0xda, 0x2b, 0xcf, 0x9c, 0x7f, 0x33, 0x7b, + 0xce, 0xef, 0x9c, 0x31, 0x5c, 0x9f, 0x08, 0xae, 0x78, 0x7b, 0xd2, 0x97, 0x4c, 0x9c, 0x85, 0x3e, + 0x6b, 0xdb, 0xdf, 0x16, 0xca, 0x49, 0x2d, 0x51, 0xd4, 0xaf, 0x05, 0x9c, 0x07, 0x23, 0xd6, 0x46, + 0x45, 0x7f, 0x7a, 0xd2, 0x96, 0x4a, 0x4c, 0x7d, 0x65, 0x0c, 0xeb, 0xdb, 0x71, 0x1c, 0x9f, 0x8f, + 0xc7, 0x3c, 0x0a, 0x78, 0xc0, 0xdb, 0x66, 0x69, 0x0d, 0x6e, 0x64, 0x0f, 0x1a, 0x32, 0x3a, 0x52, + 0x43, 0x7f, 0xc8, 0xfc, 0x53, 0x6b, 0xb2, 0xa5, 0xbd, 0x8c, 0x99, 0x5e, 0x19, 0x69, 0xf3, 0x55, + 0x19, 0xc8, 0x01, 0x8f, 0x22, 0xe6, 0xab, 0xae, 0xe0, 0x2f, 0x67, 0x07, 0x3c, 0x3a, 0x09, 0x03, + 0xf2, 0x39, 0x5c, 0xed, 0x30, 0xa9, 0xc2, 0x88, 0xaa, 0x90, 0x47, 0xc7, 0x26, 0xe8, 0x33, 0x3a, + 0x66, 0xae, 0xb3, 0xe3, 0xec, 0xd6, 0xbc, 0x7f, 0xd0, 0x92, 0xbb, 0xb0, 0x95, 0xd7, 0x3c, 0xee, + 0xb8, 0xcb, 0xe8, 0x55, 0xa8, 0x23, 0xb7, 0xe1, 0xca, 0x53, 0xee, 0xd3, 0x91, 0x95, 0x7c, 0x35, + 0x18, 0x08, 0x26, 0xa5, 0xbb, 0x82, 0x2e, 0x45, 0x2a, 0xf2, 0x29, 0x6c, 0xa4, 0xc5, 0x5d, 0x2e, + 0x94, 0x5b, 0xda, 0x71, 0x76, 0xcb, 0x5e, 0x4e, 0x4e, 0xee, 0xc1, 0xaa, 0xf9, 0x26, 0xb7, 0xbc, + 0xe3, 0xec, 0xae, 0xdd, 0xfd, 0xa8, 0x65, 0x32, 0xdd, 0x8a, 0x33, 0xdd, 0x3a, 0xc6, 0x4c, 0xef, + 0x97, 0x5e, 0x9f, 0x6f, 0x3b, 0x9e, 0x35, 0x26, 0xf7, 0xa1, 0xf6, 0xfd, 0x44, 0x2a, 0xc1, 0xe8, + 0x58, 0xba, 0xab, 0x3b, 0x2b, 0xbb, 0x6b, 0x77, 0xaf, 0xb4, 0x92, 0xf4, 0xb6, 0x62, 0x1d, 0x7a, + 0x2d, 0x79, 0x73, 0x5b, 0xd2, 0x81, 0xb5, 0x23, 0x26, 0x87, 0xdf, 0x50, 0xc5, 0x7e, 0xa6, 0x33, + 0xb7, 0x82, 0x87, 0x5e, 0x4b, 0xb9, 0xa6, 0xb4, 0xe6, 0x2c, 0x1b, 0x23, 0xed, 0xa6, 0x6f, 0x7d, + 0xf8, 0x72, 0xc2, 0x25, 0x73, 0xab, 0xf6, 0xd6, 0xf3, 0x00, 0x46, 0xb1, 0xe0, 0x6b, 0x8d, 0xc9, + 0x13, 0x28, 0x1d, 0xf1, 0x01, 0x73, 0x6b, 0x3a, 0x77, 0xfb, 0xf7, 0xdf, 0x9d, 0x6f, 0xef, 0x05, + 0xa1, 0x1a, 0x4e, 0xfb, 0x2d, 0x9f, 0x8f, 0xdb, 0x43, 0x2a, 0x87, 0xa1, 0xcf, 0xc5, 0xa4, 0xed, + 0xf3, 0x48, 0x4e, 0x47, 0x6d, 0x1a, 0xb0, 0x48, 0x59, 0xd2, 0x64, 0x0b, 0xeb, 0xaf, 0xdd, 0x3d, + 0x0c, 0x42, 0x8e, 0x61, 0xa3, 0x27, 0x68, 0x24, 0x27, 0x54, 0xb0, 0xc8, 0xd0, 0xe1, 0x02, 0xde, + 0xe6, 0x46, 0xea, 0x36, 0x59, 0x93, 0x85, 0x7b, 0xe5, 0x02, 0x68, 0xb0, 0xd2, 0x25, 0x3a, 0xe6, + 0xfe, 0x29, 0x53, 0x5d, 0xaa, 0x86, 0xee, 0x9a, 0x01, 0xab, 0x58, 0xdb, 0xfc, 0xbd, 0x04, 0xd5, + 0x38, 0xc9, 0x64, 0x17, 0x2e, 0xa7, 0x48, 0xea, 0xcd, 0x26, 0x31, 0x96, 0x59, 0x71, 0x86, 0x47, + 0x8d, 0xa8, 0x9c, 0x50, 0x9f, 0x15, 0xf0, 0x98, 0xe8, 0x32, 0xd1, 0x11, 0xfa, 0x95, 0x5c, 0x74, + 0xa4, 0xbd, 0x01, 0xd0, 0xa1, 0x8a, 0xfa, 0x2c, 0x52, 0x4c, 0x20, 0x81, 0x35, 0x2f, 0x25, 0x49, + 0x38, 0xdd, 0x0f, 0xa3, 0x41, 0x8c, 0x75, 0x19, 0xad, 0x72, 0x72, 0xf2, 0x09, 0xfc, 0x2f, 0x91, + 0x21, 0xd0, 0xab, 0x08, 0xf4, 0xa2, 0x30, 0x45, 0x73, 0xe5, 0xdf, 0xd0, 0x9c, 0x81, 0xb2, 0xfa, + 0xdf, 0xa0, 0xbc, 0x0d, 0x57, 0x0e, 0x58, 0xa4, 0x04, 0x1d, 0x8d, 0xac, 0xd5, 0x54, 0xb0, 0x01, + 0xc2, 0x56, 0xf5, 0x8a, 0x54, 0x49, 0x6b, 0xeb, 0xfb, 0xa7, 0x4a, 0x0d, 0xa9, 0xd6, 0x5e, 0x54, + 0x15, 0x78, 0x20, 0xd0, 0x6b, 0x85, 0x1e, 0x88, 0xe9, 0x62, 0x89, 0xbb, 0x54, 0xa8, 0x50, 0x2f, + 0xdc, 0x4b, 0xb9, 0x12, 0x27, 0xba, 0x66, 0x04, 0xeb, 0x16, 0x31, 0x3b, 0xfb, 0xc8, 0x55, 0x58, + 0x7d, 0x46, 0x55, 0x78, 0x66, 0x48, 0xaa, 0x7a, 0x76, 0x47, 0x3a, 0xb0, 0x7e, 0x1c, 0x0e, 0x98, + 0x4f, 0x85, 0x75, 0x40, 0x16, 0x16, 0x93, 0x67, 0x35, 0x1d, 0x76, 0x12, 0x46, 0x18, 0xdf, 0xcb, + 0xf8, 0x34, 0x7f, 0x80, 0x4b, 0xe9, 0xae, 0xd5, 0xa7, 0x1d, 0xe8, 0xd1, 0x2c, 0xe3, 0xd3, 0xcc, + 0x8e, 0xdc, 0x81, 0xb2, 0xce, 0x82, 0x74, 0x97, 0x71, 0xe2, 0xfc, 0x3f, 0xd7, 0xf5, 0x5a, 0x6b, + 0x4b, 0x63, 0x2c, 0x9b, 0xbf, 0x39, 0x00, 0x73, 0x1d, 0x69, 0xc2, 0xa5, 0xa7, 0xa1, 0x54, 0x2c, + 0x62, 0x02, 0x29, 0x72, 0x90, 0xa2, 0x05, 0x19, 0x21, 0x50, 0xc2, 0x32, 0x98, 0x26, 0xc0, 0x75, + 0x82, 0x9f, 0xde, 0xa0, 0xe3, 0x4a, 0x0a, 0xbf, 0x58, 0x48, 0xea, 0x50, 0xed, 0x6a, 0xd0, 0x7c, + 0x3e, 0xb2, 0xb8, 0x27, 0x7b, 0xdd, 0x36, 0x5d, 0x2a, 0x24, 0x1b, 0x7c, 0x2d, 0xf8, 0x18, 0xbf, + 0x07, 0x59, 0xaf, 0x7a, 0x59, 0x71, 0xf3, 0x04, 0x36, 0x73, 0xbc, 0x91, 0xef, 0xec, 0xe8, 0xc2, + 0x46, 0xde, 0x7f, 0xf4, 0xee, 0x7c, 0xfb, 0xc1, 0x87, 0x8f, 0xae, 0x54, 0xb8, 0xf9, 0x00, 0x6b, + 0x2a, 0xb8, 0x5a, 0x3c, 0x9d, 0x34, 0x33, 0xcf, 0xa7, 0xaa, 0xcf, 0xa7, 0xd1, 0xa0, 0x20, 0x5b, + 0x85, 0x3a, 0x72, 0x13, 0xd6, 0x3b, 0x21, 0x1d, 0xb1, 0x41, 0x27, 0x14, 0xcc, 0x57, 0xa3, 0x19, + 0xe6, 0xaf, 0xea, 0x65, 0xa4, 0xcd, 0x57, 0x15, 0xd8, 0xcc, 0x11, 0x41, 0x8e, 0xa0, 0xf4, 0x24, + 0x8c, 0x06, 0xf6, 0xf3, 0x1e, 0xbc, 0x3b, 0xdf, 0xbe, 0xf7, 0xe1, 0x9f, 0x67, 0xc3, 0xe9, 0x00, + 0x1e, 0x86, 0x21, 0xeb, 0xb0, 0x9c, 0xbc, 0xaa, 0xcb, 0x8f, 0x3b, 0xba, 0xa4, 0xa9, 0x41, 0x85, + 0x6b, 0x2d, 0xeb, 0xd1, 0x40, 0xba, 0xa5, 0x9d, 0x15, 0x2d, 0xd3, 0x6b, 0xe2, 0x42, 0x65, 0x71, + 0x10, 0xc5, 0x5b, 0x42, 0xe1, 0x72, 0x8f, 0x06, 0x01, 0x8b, 0x07, 0x12, 0x93, 0xee, 0x06, 0x42, + 0x78, 0xe7, 0x7d, 0xa4, 0xb7, 0x32, 0x3e, 0x87, 0x91, 0x12, 0x33, 0x0b, 0x68, 0x36, 0x1e, 0x79, + 0x08, 0xa5, 0x23, 0xa6, 0xa8, 0x7d, 0x4e, 0x6f, 0xbe, 0x37, 0xae, 0x36, 0xc4, 0x60, 0x1e, 0xfa, + 0x20, 0xb3, 0xba, 0x42, 0x15, 0xac, 0x10, 0xae, 0xf5, 0xf8, 0x4d, 0x0d, 0x15, 0x62, 0xc6, 0xef, + 0xc2, 0x2c, 0x29, 0x1b, 0x0e, 0xcd, 0xbc, 0xdb, 0x4a, 0x1d, 0x88, 0x72, 0xfd, 0x42, 0xc4, 0xcd, + 0x84, 0x02, 0x72, 0x2b, 0xe9, 0xcb, 0x1a, 0xde, 0xb1, 0xd0, 0x25, 0xe9, 0xd6, 0x5b, 0x50, 0x79, + 0xc1, 0xc2, 0x60, 0xa8, 0xa4, 0x7d, 0x17, 0x49, 0xca, 0xdc, 0x6a, 0xbc, 0xd8, 0x84, 0x6c, 0x41, + 0xb9, 0xc7, 0x4f, 0x59, 0x64, 0x67, 0x99, 0xd9, 0x90, 0x5b, 0xb0, 0x79, 0x18, 0xd1, 0xfe, 0x88, + 0xf5, 0x68, 0xf0, 0xfc, 0x8c, 0x09, 0x11, 0x0e, 0x18, 0x8e, 0xae, 0xaa, 0x97, 0x57, 0x90, 0x3d, + 0x28, 0x9b, 0x77, 0x78, 0x1d, 0xcf, 0xbb, 0x9e, 0xbe, 0x5e, 0xee, 0x4f, 0x9c, 0x67, 0x6c, 0xc9, + 0xb7, 0xb0, 0x7e, 0xa8, 0x9f, 0xa3, 0x89, 0x08, 0x25, 0xc3, 0x02, 0x6c, 0xa2, 0x77, 0xbd, 0x35, + 0xff, 0x37, 0xd9, 0x5a, 0xb4, 0xb0, 0x59, 0xc9, 0xf8, 0x91, 0x3d, 0xa8, 0xd8, 0x63, 0xdc, 0xcb, + 0x18, 0xe2, 0xe3, 0x7c, 0x0d, 0xad, 0x81, 0x17, 0x5b, 0xd6, 0x7f, 0x82, 0xad, 0x22, 0x48, 0xc8, + 0x06, 0xac, 0x9c, 0xb2, 0x99, 0x7d, 0xb8, 0xf5, 0x92, 0xb4, 0xa1, 0x7c, 0x46, 0x47, 0x53, 0xf3, + 0x3a, 0x17, 0x06, 0xb7, 0x21, 0x3c, 0x63, 0xf7, 0x70, 0xf9, 0x0b, 0xa7, 0x7e, 0x1f, 0x6a, 0x09, + 0x2b, 0x05, 0x31, 0xb7, 0xd2, 0x31, 0x6b, 0x29, 0xc7, 0xe6, 0x97, 0xc9, 0x1b, 0x10, 0xb7, 0x40, + 0xaa, 0x39, 0x9c, 0xc5, 0xe6, 0x88, 0xe9, 0x5b, 0x9e, 0xd3, 0xd7, 0x7c, 0x94, 0x54, 0x5f, 0x3b, + 0x76, 0xa9, 0x94, 0x61, 0x14, 0xd8, 0x09, 0x12, 0x6f, 0xb5, 0xe6, 0x05, 0x15, 0x91, 0xd6, 0x18, + 0xdf, 0x78, 0xbb, 0x7f, 0xf4, 0xfa, 0xaf, 0xc6, 0xd2, 0xeb, 0xb7, 0x0d, 0xe7, 0xcd, 0xdb, 0x86, + 0xf3, 0xe7, 0xdb, 0x86, 0xf3, 0xcb, 0x45, 0x63, 0xe9, 0xd7, 0x8b, 0xc6, 0xd2, 0x9b, 0x8b, 0xc6, + 0xd2, 0x1f, 0x17, 0x8d, 0xa5, 0x1f, 0x3f, 0x7b, 0xdf, 0x80, 0xc8, 0xfc, 0xdf, 0xef, 0xaf, 0xa2, + 0x60, 0xef, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6d, 0x83, 0xcc, 0x12, 0x72, 0x0c, 0x00, 0x00, } func (m *ConnectProxyConfig) Marshal() (dAtA []byte, err error) { diff --git a/proto/pbservice/service.proto b/proto/pbservice/service.proto index 5faf9fe92..360e3e3ff 100644 --- a/proto/pbservice/service.proto +++ b/proto/pbservice/service.proto @@ -5,7 +5,7 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; import "google/protobuf/struct.proto"; -import "proto/pbcommon/common.proto"; +import "proto/pbcommongogo/common.proto"; import "proto/pbservice/healthcheck.proto"; // This fake import path is replaced by the build script with a versioned path @@ -269,7 +269,7 @@ message ServiceDefinition { ConnectProxyConfig Proxy = 14; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - common.EnterpriseMeta EnterpriseMeta = 17 [(gogoproto.nullable) = false]; + commongogo.EnterpriseMeta EnterpriseMeta = 17 [(gogoproto.nullable) = false]; // mog: func-to=ServiceConnectPtrToStructs func-from=NewServiceConnectPtrFromStructs ServiceConnect Connect = 15; From 1bf4571c8eedbc4524f1cac410ee7e9e0a5dfc81 Mon Sep 17 00:00:00 2001 From: Karl Cardenas Date: Tue, 22 Mar 2022 15:40:53 -0700 Subject: [PATCH 003/785] docs: add link to k8s cli install page --- website/content/docs/k8s/k8s-cli.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/content/docs/k8s/k8s-cli.mdx b/website/content/docs/k8s/k8s-cli.mdx index a5ae845db..186fde137 100644 --- a/website/content/docs/k8s/k8s-cli.mdx +++ b/website/content/docs/k8s/k8s-cli.mdx @@ -9,7 +9,9 @@ description: >- Consul K8s CLI is a tool for quickly installing and interacting with Consul on Kubernetes. The Consul K8s CLI allows you to manage the lifecycle of Consul without requiring the usage of `Helm`, [Consul CLI](/commands/index), and `kubectl`. -The Consul K8s CLI offers a Kubernetes native experience for managing Consul. +The Consul K8s CLI offers a Kubernetes native experience for managing Consul. + +-> **Note**: For guidance on how to install the Consul K8s CLI, visit the [Installing the Consul K8s CLI](/docs/k8s/installation/install-cli) documentation page. This topic describes the subcommands and available options for using Consul K8s CLI. From 626fe75167aa8e21a17a8c5903856e714f7345ee Mon Sep 17 00:00:00 2001 From: Karl Cardenas Date: Tue, 22 Mar 2022 15:51:04 -0700 Subject: [PATCH 004/785] docs: removed the word page --- website/content/docs/k8s/k8s-cli.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/k8s/k8s-cli.mdx b/website/content/docs/k8s/k8s-cli.mdx index 186fde137..f3757748b 100644 --- a/website/content/docs/k8s/k8s-cli.mdx +++ b/website/content/docs/k8s/k8s-cli.mdx @@ -11,7 +11,7 @@ Consul K8s CLI is a tool for quickly installing and interacting with Consul on K The Consul K8s CLI allows you to manage the lifecycle of Consul without requiring the usage of `Helm`, [Consul CLI](/commands/index), and `kubectl`. The Consul K8s CLI offers a Kubernetes native experience for managing Consul. --> **Note**: For guidance on how to install the Consul K8s CLI, visit the [Installing the Consul K8s CLI](/docs/k8s/installation/install-cli) documentation page. +-> **Note**: For guidance on how to install the Consul K8s CLI, visit the [Installing the Consul K8s CLI](/docs/k8s/installation/install-cli) documentation. This topic describes the subcommands and available options for using Consul K8s CLI. From 04f1d9bcc9dc69d42e8e3a6fc7d000fa856aecf1 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 22 Mar 2022 16:58:41 -0700 Subject: [PATCH 005/785] oss: Add overview UI internal endpoint --- agent/consul/internal_endpoint.go | 22 +++ agent/consul/internal_endpoint_test.go | 91 +++++++++++++ agent/consul/server.go | 7 + agent/consul/server_overview.go | 182 +++++++++++++++++++++++++ agent/consul/server_overview_test.go | 166 ++++++++++++++++++++++ agent/consul/state/catalog.go | 36 +++++ agent/http_register.go | 1 + agent/structs/catalog.go | 36 +++++ agent/structs/structs_oss.go | 4 + agent/ui_endpoint.go | 18 +++ 10 files changed, 563 insertions(+) create mode 100644 agent/consul/server_overview.go create mode 100644 agent/consul/server_overview_test.go diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index 62fb667b1..9c2f2c75d 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -147,6 +147,28 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. }) } +func (m *Internal) CatalogOverview(args *structs.DCSpecificRequest, reply *structs.CatalogSummary) error { + if done, err := m.srv.ForwardRPC("Internal.CatalogOverview", args, reply); done { + return err + } + + authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil) + if err != nil { + return err + } + + if authz.OperatorRead(nil) != acl.Allow { + return acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessRead) + } + + summary := m.srv.overviewManager.GetCurrentSummary() + if summary != nil { + *reply = *summary + } + + return nil +} + func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceTopology) error { if done, err := m.srv.ForwardRPC("Internal.ServiceTopology", args, reply); done { return err diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 601eb7cc4..25c9c75f4 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -6,6 +6,7 @@ import ( "os" "strings" "testing" + "time" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/assert" @@ -2477,3 +2478,93 @@ service_prefix "mongo" { policy = "read" } }) }) } + +func TestInternal_CatalogOverview(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.MetricsReportingInterval = 100 * time.Millisecond + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + arg := structs.DCSpecificRequest{ + Datacenter: "dc1", + } + retry.Run(t, func(r *retry.R) { + var out structs.CatalogSummary + if err := msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out); err != nil { + r.Fatalf("err: %v", err) + } + + expected := structs.CatalogSummary{ + Nodes: []structs.HealthSummary{ + { + Total: 1, + Passing: 1, + EnterpriseMeta: *structs.NodeEnterpriseMetaInDefaultPartition(), + }, + }, + Services: []structs.HealthSummary{ + { + Name: "consul", + Total: 1, + Passing: 1, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + Checks: []structs.HealthSummary{ + { + Name: "Serf Health Status", + Total: 1, + Passing: 1, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + } + require.Equal(r, expected, out) + }) +} + +func TestInternal_CatalogOverview_ACLDeny(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = TestDefaultInitialManagementToken + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + arg := structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var out structs.CatalogSummary + err := msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out) + require.True(t, acl.IsErrPermissionDenied(err)) + + opReadToken, err := upsertTestTokenWithPolicyRules( + codec, TestDefaultInitialManagementToken, "dc1", `operator = "read"`) + require.NoError(t, err) + + arg.Token = opReadToken.SecretID + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out)) +} diff --git a/agent/consul/server.go b/agent/consul/server.go index ba77c4517..da821ebc8 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -308,6 +308,10 @@ type Server struct { // Consul router. statsFetcher *StatsFetcher + // overviewManager is used to periodically update the cluster overview + // and emit node/service/check health metrics. + overviewManager *OverviewManager + // reassertLeaderCh is used to signal the leader loop should re-run // leadership actions after a snapshot restore. reassertLeaderCh chan chan error @@ -613,6 +617,9 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve } go reporter.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) + s.overviewManager = NewOverviewManager(s.logger, s.fsm, s.config.MetricsReportingInterval) + go s.overviewManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) + s.grpcHandler = newGRPCHandlerFromConfig(flat, config, s) s.grpcLeaderForwarder = flat.LeaderForwarder go s.trackLeaderChanges() diff --git a/agent/consul/server_overview.go b/agent/consul/server_overview.go new file mode 100644 index 000000000..149743d3f --- /dev/null +++ b/agent/consul/server_overview.go @@ -0,0 +1,182 @@ +package consul + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/hashicorp/consul/agent/consul/usagemetrics" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-hclog" +) + +type OverviewManager struct { + stateProvider usagemetrics.StateProvider + logger hclog.Logger + interval time.Duration + + currentSummary *structs.CatalogSummary + sync.RWMutex +} + +func NewOverviewManager(logger hclog.Logger, sp usagemetrics.StateProvider, interval time.Duration) *OverviewManager { + return &OverviewManager{ + stateProvider: sp, + logger: logger.Named("catalog-overview"), + interval: interval, + currentSummary: &structs.CatalogSummary{}, + } +} + +func (m *OverviewManager) GetCurrentSummary() *structs.CatalogSummary { + m.RLock() + defer m.RUnlock() + return m.currentSummary +} + +func (m *OverviewManager) Run(ctx context.Context) { + ticker := time.NewTicker(m.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + state := m.stateProvider.State() + catalog, err := state.CatalogDump() + if err != nil { + m.logger.Error("failed to update overview", "error", err) + continue + } + + summary := getCatalogOverview(catalog) + m.Lock() + m.currentSummary = summary + m.Unlock() + } + } +} + +// getCatalogOverview returns a breakdown of the number of nodes, services, and checks +// in the passing/warning/critical states. In Enterprise, it will also return this +// breakdown for each partition and namespace. +func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummary { + nodeChecks := make(map[string][]*structs.HealthCheck) + serviceInstanceChecks := make(map[string][]*structs.HealthCheck) + checkSummaries := make(map[string]structs.HealthSummary) + + // Compute the health check summaries by taking the pass/warn/fail counts + // of each unique part/ns/checkname combo and storing them. Also store the + // per-node and per-service instance checks for their respective summaries below. + for _, check := range catalog.Checks { + checkID := fmt.Sprintf("%s/%s", check.EnterpriseMeta.String(), check.Name) + summary, ok := checkSummaries[checkID] + if !ok { + summary = structs.HealthSummary{ + Name: check.Name, + EnterpriseMeta: check.EnterpriseMeta, + } + } + + summary.Add(check.Status) + checkSummaries[checkID] = summary + + if check.ServiceID != "" { + serviceInstanceID := fmt.Sprintf("%s/%s/%s", check.EnterpriseMeta.String(), check.Node, check.ServiceID) + serviceInstanceChecks[serviceInstanceID] = append(serviceInstanceChecks[serviceInstanceID], check) + } else { + nodeMeta := check.NodeIdentity().EnterpriseMeta + nodeID := fmt.Sprintf("%s/%s", nodeMeta.String(), check.Node) + nodeChecks[nodeID] = append(nodeChecks[nodeID], check) + } + } + + // Compute the service instance summaries by taking the unhealthiest check for + // a given service instance as its health status and totaling the counts for each + // partition/ns/service combination. + serviceSummaries := make(map[string]structs.HealthSummary) + for _, svc := range catalog.Services { + sid := structs.NewServiceID(svc.ServiceName, &svc.EnterpriseMeta) + summary, ok := serviceSummaries[sid.String()] + if !ok { + summary = structs.HealthSummary{ + Name: svc.ServiceName, + EnterpriseMeta: svc.EnterpriseMeta, + } + } + + // Compute whether this service instance is healthy based on its associated checks. + serviceInstanceID := fmt.Sprintf("%s/%s/%s", svc.EnterpriseMeta.String(), svc.Node, svc.ServiceID) + status := api.HealthPassing + for _, checks := range serviceInstanceChecks[serviceInstanceID] { + if checks.Status == api.HealthWarning && status == api.HealthPassing { + status = api.HealthWarning + } + if checks.Status == api.HealthCritical { + status = api.HealthCritical + } + } + + summary.Add(status) + serviceSummaries[sid.String()] = summary + } + + // Compute the node summaries by taking the unhealthiest check for each node + // as its health status and totaling the passing/warning/critical counts for + // each partition. + nodeSummaries := make(map[string]structs.HealthSummary) + for _, node := range catalog.Nodes { + nodeMeta := structs.NodeEnterpriseMetaInPartition(node.Partition) + summary, ok := nodeSummaries[nodeMeta.String()] + if !ok { + summary = structs.HealthSummary{ + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(node.Partition), + } + } + + // Compute whether this node is healthy based on its associated checks. + status := api.HealthPassing + nodeID := fmt.Sprintf("%s/%s", nodeMeta.String(), node.Node) + for _, checks := range nodeChecks[nodeID] { + if checks.Status == api.HealthWarning && status == api.HealthPassing { + status = api.HealthWarning + } + if checks.Status == api.HealthCritical { + status = api.HealthCritical + } + } + + summary.Add(status) + nodeSummaries[nodeMeta.String()] = summary + } + + // Construct the summary. + summary := &structs.CatalogSummary{} + for _, healthSummary := range nodeSummaries { + summary.Nodes = append(summary.Nodes, healthSummary) + } + for _, healthSummary := range serviceSummaries { + summary.Services = append(summary.Services, healthSummary) + } + for _, healthSummary := range checkSummaries { + summary.Checks = append(summary.Checks, healthSummary) + } + + summarySort := func(slice []structs.HealthSummary) func(int, int) bool { + return func(i, j int) bool { + if slice[i].Name < slice[j].Name { + return true + } + return slice[i].EnterpriseMeta.String() < slice[j].EnterpriseMeta.String() + } + } + sort.Slice(summary.Nodes, summarySort(summary.Nodes)) + sort.Slice(summary.Services, summarySort(summary.Services)) + sort.Slice(summary.Checks, summarySort(summary.Checks)) + + return summary +} diff --git a/agent/consul/server_overview_test.go b/agent/consul/server_overview_test.go new file mode 100644 index 000000000..dc2d439e0 --- /dev/null +++ b/agent/consul/server_overview_test.go @@ -0,0 +1,166 @@ +package consul + +import ( + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" +) + +func TestCatalogOverview(t *testing.T) { + cases := []struct { + name string + nodes []*structs.Node + services []*structs.ServiceNode + checks []*structs.HealthCheck + expected structs.CatalogSummary + }{ + { + name: "empty", + expected: structs.CatalogSummary{}, + }, + { + name: "one node with no checks", + nodes: []*structs.Node{ + {Node: "node1"}, + }, + expected: structs.CatalogSummary{ + Nodes: []structs.HealthSummary{ + {Total: 1, Passing: 1}, + }, + }, + }, + { + name: "one service with no checks", + services: []*structs.ServiceNode{ + {Node: "node1", ServiceName: "service1"}, + }, + expected: structs.CatalogSummary{ + Services: []structs.HealthSummary{ + {Name: "service1", Total: 1, Passing: 1}, + }, + }, + }, + { + name: "three nodes with node checks", + nodes: []*structs.Node{ + {Node: "node1"}, + {Node: "node2"}, + {Node: "node3"}, + }, + checks: []*structs.HealthCheck{ + {Node: "node1", Name: "check1", CheckID: "check1", Status: api.HealthPassing}, + {Node: "node2", Name: "check1", CheckID: "check1", Status: api.HealthWarning}, + {Node: "node3", Name: "check1", CheckID: "check1", Status: api.HealthCritical}, + }, + expected: structs.CatalogSummary{ + Nodes: []structs.HealthSummary{ + {Total: 3, Passing: 1, Warning: 1, Critical: 1}, + }, + Checks: []structs.HealthSummary{ + {Name: "check1", Total: 3, Passing: 1, Warning: 1, Critical: 1}, + }, + }, + }, + { + name: "three instances of one service with checks", + nodes: []*structs.Node{ + {Node: "node1"}, + }, + services: []*structs.ServiceNode{ + {Node: "node1", ServiceName: "service1", ServiceID: "id1"}, + {Node: "node1", ServiceName: "service1", ServiceID: "id2"}, + {Node: "node1", ServiceName: "service1", ServiceID: "id3"}, + }, + checks: []*structs.HealthCheck{ + {Node: "node1", Name: "check1", CheckID: "check1", ServiceID: "id1", Status: api.HealthPassing}, + {Node: "node1", Name: "check1", CheckID: "check2", ServiceID: "id2", Status: api.HealthWarning}, + {Node: "node1", Name: "check1", CheckID: "check3", ServiceID: "id3", Status: api.HealthCritical}, + }, + expected: structs.CatalogSummary{ + Nodes: []structs.HealthSummary{ + {Total: 1, Passing: 1}, + }, + Services: []structs.HealthSummary{ + {Name: "service1", Total: 3, Passing: 1, Warning: 1, Critical: 1}, + }, + Checks: []structs.HealthSummary{ + {Name: "check1", Total: 3, Passing: 1, Warning: 1, Critical: 1}, + }, + }, + }, + { + name: "three instances of different services with checks", + nodes: []*structs.Node{ + {Node: "node1"}, + }, + services: []*structs.ServiceNode{ + {Node: "node1", ServiceName: "service1", ServiceID: "id1"}, + {Node: "node1", ServiceName: "service2", ServiceID: "id2"}, + {Node: "node1", ServiceName: "service3", ServiceID: "id3"}, + }, + checks: []*structs.HealthCheck{ + {Node: "node1", Name: "check1", CheckID: "check1", ServiceID: "id1", Status: api.HealthPassing}, + {Node: "node1", Name: "check1", CheckID: "check2", ServiceID: "id2", Status: api.HealthWarning}, + {Node: "node1", Name: "check1", CheckID: "check3", ServiceID: "id3", Status: api.HealthCritical}, + }, + expected: structs.CatalogSummary{ + Nodes: []structs.HealthSummary{ + {Total: 1, Passing: 1}, + }, + Services: []structs.HealthSummary{ + {Name: "service1", Total: 1, Passing: 1}, + {Name: "service2", Total: 1, Warning: 1}, + {Name: "service3", Total: 1, Critical: 1}, + }, + Checks: []structs.HealthSummary{ + {Name: "check1", Total: 3, Passing: 1, Warning: 1, Critical: 1}, + }, + }, + }, + { + name: "many instances of the same check", + checks: []*structs.HealthCheck{ + {Name: "check1", CheckID: "check1", Status: api.HealthPassing}, + {Name: "check1", CheckID: "check2", Status: api.HealthWarning}, + {Name: "check1", CheckID: "check3", Status: api.HealthCritical}, + {Name: "check1", CheckID: "check4", Status: api.HealthPassing}, + {Name: "check1", CheckID: "check5", Status: api.HealthCritical}, + }, + expected: structs.CatalogSummary{ + Checks: []structs.HealthSummary{ + {Name: "check1", Total: 5, Passing: 2, Warning: 1, Critical: 2}, + }, + }, + }, + { + name: "three different checks", + checks: []*structs.HealthCheck{ + {Name: "check1", CheckID: "check1", Status: api.HealthPassing}, + {Name: "check2", CheckID: "check2", Status: api.HealthWarning}, + {Name: "check3", CheckID: "check3", Status: api.HealthCritical}, + }, + expected: structs.CatalogSummary{ + Checks: []structs.HealthSummary{ + {Name: "check1", Total: 1, Passing: 1}, + {Name: "check2", Total: 1, Warning: 1}, + {Name: "check3", Total: 1, Critical: 1}, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + summary := getCatalogOverview(&structs.CatalogContents{ + Nodes: tc.nodes, + Services: tc.services, + Checks: tc.checks, + }) + require.ElementsMatch(t, tc.expected.Nodes, summary.Nodes) + require.ElementsMatch(t, tc.expected.Services, summary.Services) + require.ElementsMatch(t, tc.expected.Checks, summary.Checks) + }) + } +} diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index d8284d427..b882931d6 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -4106,3 +4106,39 @@ func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, k } return nil } + +// CatalogDump returns all the contents of the node, service and check tables. +// In Enterprise, this will return entries across all partitions and namespaces. +func (s *Store) CatalogDump() (*structs.CatalogContents, error) { + tx := s.db.Txn(false) + contents := &structs.CatalogContents{} + + nodes, err := tx.Get(tableNodes, indexID) + if err != nil { + return nil, fmt.Errorf("failed nodes lookup: %s", err) + } + for node := nodes.Next(); node != nil; node = nodes.Next() { + n := node.(*structs.Node) + contents.Nodes = append(contents.Nodes, n) + } + + services, err := tx.Get(tableServices, indexID) + if err != nil { + return nil, fmt.Errorf("failed services lookup: %s", err) + } + for service := services.Next(); service != nil; service = services.Next() { + svc := service.(*structs.ServiceNode) + contents.Services = append(contents.Services, svc) + } + + checks, err := tx.Get(tableChecks, indexID) + if err != nil { + return nil, fmt.Errorf("failed checks lookup: %s", err) + } + for check := checks.Next(); check != nil; check = checks.Next() { + c := check.(*structs.HealthCheck) + contents.Checks = append(contents.Checks, c) + } + + return contents, nil +} diff --git a/agent/http_register.go b/agent/http_register.go index df20cdfe3..47cdfcf1f 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -91,6 +91,7 @@ func init() { registerEndpoint("/v1/internal/ui/nodes", []string{"GET"}, (*HTTPHandlers).UINodes) registerEndpoint("/v1/internal/ui/node/", []string{"GET"}, (*HTTPHandlers).UINodeInfo) registerEndpoint("/v1/internal/ui/services", []string{"GET"}, (*HTTPHandlers).UIServices) + registerEndpoint("/v1/internal/ui/catalog-overview", []string{"GET"}, (*HTTPHandlers).UICatalogOverview) registerEndpoint("/v1/internal/ui/gateway-services-nodes/", []string{"GET"}, (*HTTPHandlers).UIGatewayServicesNodes) registerEndpoint("/v1/internal/ui/gateway-intentions/", []string{"GET"}, (*HTTPHandlers).UIGatewayIntentions) registerEndpoint("/v1/internal/ui/service-topology/", []string{"GET"}, (*HTTPHandlers).UIServiceTopology) diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index b118b9935..73cd0264f 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -1,6 +1,7 @@ package structs import ( + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" ) @@ -19,3 +20,38 @@ const ( ConsulServiceID = "consul" ConsulServiceName = "consul" ) + +type CatalogContents struct { + Nodes []*Node + Services []*ServiceNode + Checks []*HealthCheck +} + +type CatalogSummary struct { + Nodes []HealthSummary + Services []HealthSummary + Checks []HealthSummary +} + +type HealthSummary struct { + Name string `json:",omitempty"` + + Total int + Passing int + Warning int + Critical int + + EnterpriseMeta +} + +func (h *HealthSummary) Add(status string) { + h.Total++ + switch status { + case api.HealthPassing: + h.Passing++ + case api.HealthWarning: + h.Warning++ + case api.HealthCritical: + h.Critical++ + } +} diff --git a/agent/structs/structs_oss.go b/agent/structs/structs_oss.go index 669361802..7f56c4355 100644 --- a/agent/structs/structs_oss.go +++ b/agent/structs/structs_oss.go @@ -15,6 +15,10 @@ var emptyEnterpriseMeta = EnterpriseMeta{} // EnterpriseMeta stub type EnterpriseMeta struct{} +func (m *EnterpriseMeta) String() string { + return "" +} + func (m *EnterpriseMeta) ToEnterprisePolicyMeta() *acl.EnterprisePolicyMeta { return nil } diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index f794f2f66..1defb241b 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -172,6 +172,24 @@ RPC: return nil, nil } +// UICatalogOverview is used to get a high-level overview of the health of nodes, services, +// and checks in the datacenter. +func (s *HTTPHandlers) UICatalogOverview(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Parse arguments + args := structs.DCSpecificRequest{} + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + // Make the RPC request + var out structs.CatalogSummary + if err := s.agent.RPC("Internal.CatalogOverview", &args, &out); err != nil { + return nil, err + } + + return out, nil +} + // UIServices is used to list the services in a given datacenter. We return a // ServiceSummary which provides overview information for the service func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) { From 28c925f6d04a26cb2a54928e28d39fc53b6847b0 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Tue, 22 Mar 2022 20:41:13 -0700 Subject: [PATCH 006/785] Fixup dropped SecretID usage Looks like something got munged at some point. Not sure how it slipped in, but my best guess is that because TestTxn_Apply_ACLDeny is marked flaky we didn't block merge because it failed. Signed-off-by: Mark Anderson --- agent/consul/txn_endpoint_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index c48d1a035..4f82c98d6 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -875,12 +875,11 @@ func TestTxn_Read_ACLDeny(t *testing.T) { state.EnsureCheck(4, &check) token := createTokenFull(t, codec, testTxnRules) - id := token.AccessorID t.Run("simple read operations (results get filtered out)", func(t *testing.T) { arg := structs.TxnReadRequest{ Datacenter: "dc1", - QueryOptions: structs.QueryOptions{Token: id}, + QueryOptions: structs.QueryOptions{Token: token.SecretID}, Ops: structs.TxnOps{ { KV: &structs.TxnKVOp{ @@ -912,7 +911,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) { t.Run("complex operations (return permission denied errors)", func(t *testing.T) { arg := structs.TxnReadRequest{ Datacenter: "dc1", - QueryOptions: structs.QueryOptions{Token: id}, + QueryOptions: structs.QueryOptions{Token: token.SecretID}, Ops: structs.TxnOps{ { KV: &structs.TxnKVOp{ From 13c6959b2b933b211f6a437ed12a2324e7fca441 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 23 Mar 2022 10:34:26 +0000 Subject: [PATCH 007/785] ui: Tile CSS component (#12570) * ui: Tile CSS component * ui: Consul ServerCard component (#12576) --- .../components/consul/server/card/README.mdx | 118 ++++++++++++++++++ .../components/consul/server/card/index.hbs | 41 ++++++ .../components/consul/server/card/index.scss | 5 + .../components/consul/server/card/layout.scss | 24 ++++ .../components/consul/server/card/skin.scss | 40 ++++++ .../components/consul/server/list/index.hbs | 17 +++ .../components/consul/server/list/index.scss | 9 ++ .../consul-ui/app/components/tile/README.mdx | 20 +++ .../consul-ui/app/components/tile/debug.scss | 6 + .../consul-ui/app/components/tile/index.scss | 37 ++++++ .../app/styles/base/component/index.scss | 6 + .../base/decoration/base-variables.scss | 1 + .../app/styles/base/icons/icons/index.scss | 12 +- .../consul-ui/app/styles/base/index.scss | 1 + .../base/typography/base-keyframes.scss | 7 ++ .../base/typography/base-variables.scss | 1 + .../app/styles/base/typography/index.scss | 1 + .../consul-ui/app/styles/components.scss | 3 + ui/packages/consul-ui/app/styles/debug.scss | 1 + 19 files changed, 344 insertions(+), 6 deletions(-) create mode 100644 ui/packages/consul-ui/app/components/consul/server/card/README.mdx create mode 100644 ui/packages/consul-ui/app/components/consul/server/card/index.hbs create mode 100644 ui/packages/consul-ui/app/components/consul/server/card/index.scss create mode 100644 ui/packages/consul-ui/app/components/consul/server/card/layout.scss create mode 100644 ui/packages/consul-ui/app/components/consul/server/card/skin.scss create mode 100644 ui/packages/consul-ui/app/components/consul/server/list/index.hbs create mode 100644 ui/packages/consul-ui/app/components/consul/server/list/index.scss create mode 100644 ui/packages/consul-ui/app/components/tile/README.mdx create mode 100644 ui/packages/consul-ui/app/components/tile/debug.scss create mode 100644 ui/packages/consul-ui/app/components/tile/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/component/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/typography/base-keyframes.scss diff --git a/ui/packages/consul-ui/app/components/consul/server/card/README.mdx b/ui/packages/consul-ui/app/components/consul/server/card/README.mdx new file mode 100644 index 000000000..a4b2ebc7d --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/card/README.mdx @@ -0,0 +1,118 @@ + +# Consul::Server::Card + + + +A presentational component representing a Consul Server + + +```hbs preview-template +
+
+ Read Replicas just show the name of the Server +
+ + +
+ +
+
+ Leaders have a special icon-tile +
+ + +
+ +
+
+ Unhealthy voters have a differently colored badge +
+ + +
+ +
+
+ Non-voters have different text and coloring +
+ + +
+
+ + +
+``` + +## Attributes + + + + +## Arguments + + + +| Argument | Type | Default | Description | +| :------- | :----- | :------ | :----------------------------------------- | +| item | object | | Consul Server shaped object | + + + +## Slots + + + + +## CSS Parts + + + + +## CSS Properties + + + + +## Contextual Components + + + diff --git a/ui/packages/consul-ui/app/components/consul/server/card/index.hbs b/ui/packages/consul-ui/app/components/consul/server/card/index.hbs new file mode 100644 index 000000000..333ebd667 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/card/index.hbs @@ -0,0 +1,41 @@ +
+
+ +
+
+
+ {{@item.Name}} +
+ +{{#if (not @item.ReadReplica)}} +
+ Status +
+
+ {{if (contains @item.Status (array 'leader' 'voter')) 'Active voter' 'Backup voter'}} +
+{{/if}} + +
+
+ diff --git a/ui/packages/consul-ui/app/components/consul/server/card/index.scss b/ui/packages/consul-ui/app/components/consul/server/card/index.scss new file mode 100644 index 000000000..284860e70 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/card/index.scss @@ -0,0 +1,5 @@ +@import './skin'; +@import './layout'; +.consul-server-card { + @extend %consul-server-card; +} diff --git a/ui/packages/consul-ui/app/components/consul/server/card/layout.scss b/ui/packages/consul-ui/app/components/consul/server/card/layout.scss new file mode 100644 index 000000000..8ca116a8f --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/card/layout.scss @@ -0,0 +1,24 @@ +%consul-server-card { + position: relative; + overflow: hidden; + + @extend %panel; + --padding-x: 24px; + --padding-y: 24px; + padding: var(--padding-y) var(--padding-x); + + --tile-size: 3rem; /* 48px */ +} +%consul-server-card.voting-status-leader .name { + position: absolute !important; +} +%consul-server-card dd:not(:last-of-type) { + margin-bottom: calc(var(--padding-y) / 2); +} +%consul-server-card.voting-status-leader dd { + margin-left: calc(var(--tile-size) + var(--padding-x)); +} + + + + diff --git a/ui/packages/consul-ui/app/components/consul/server/card/skin.scss b/ui/packages/consul-ui/app/components/consul/server/card/skin.scss new file mode 100644 index 000000000..1478ac3fa --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/card/skin.scss @@ -0,0 +1,40 @@ +%consul-server-card dt:not(.name) { + @extend %visually-hidden; +} +%consul-server-card.voting-status-leader .name { + @extend %with-leader-tile; +} +%consul-server-card .name + dd { + @extend %h300; + color: rgb(var(--tone-gray-999)); + animation-name: typo-truncate; +} +%consul-server-card .health-status + dd { + @extend %pill-200; + font-size: var(--typo-size-700); +} +%consul-server-card.voting-status-non-voter .health-status + dd { + background-color: rgb(var(--tone-gray-100)); + color: rgb(var(--tone-gray-600)); +} +%consul-server-card:not(.voting-status-non-voter) .health-status.healthy + dd { + background-color: rgb(var(--tone-green-050)); + color: rgb(var(--tone-green-800)); +} +%consul-server-card:not(.voting-status-non-voter) .health-status:not(.healthy) + dd { + background-color: rgb(var(--tone-red-050)); + color: rgb(var(--tone-red-500)); +} +%consul-server-card .health-status + dd::before { + --icon-size: icon-000; + content: ''; +} +%consul-server-card .health-status.healthy + dd::before { + --icon-name: icon-check; + --icon-color: rgb(var(--tone-green-800)); +} +%consul-server-card .health-status:not(.healthy) + dd::before { + --icon-name: icon-x; + --icon-color: rgb(var(--tone-red-500)); +} + diff --git a/ui/packages/consul-ui/app/components/consul/server/list/index.hbs b/ui/packages/consul-ui/app/components/consul/server/list/index.hbs new file mode 100644 index 000000000..4f9d1b50a --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/list/index.hbs @@ -0,0 +1,17 @@ +
+
    +{{#each @items as |item|}} +
  • + +
  • +{{/each}} +
+
+ diff --git a/ui/packages/consul-ui/app/components/consul/server/list/index.scss b/ui/packages/consul-ui/app/components/consul/server/list/index.scss new file mode 100644 index 000000000..4f8256b91 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/server/list/index.scss @@ -0,0 +1,9 @@ +%consul-server-list ul { + display: grid; + grid-template-columns: repeat(4, minmax(215px, 25%)); + gap: 12px; +} + +.consul-server-list { + @extend %consul-server-list; +} diff --git a/ui/packages/consul-ui/app/components/tile/README.mdx b/ui/packages/consul-ui/app/components/tile/README.mdx new file mode 100644 index 000000000..be1361183 --- /dev/null +++ b/ui/packages/consul-ui/app/components/tile/README.mdx @@ -0,0 +1,20 @@ +# tile + + +```hbs preview-template +
+
+ Leader tile +
+
+ +
+
+ +``` + +```css +.tile { + @extend %with-tile, %with-leader-tile; +} +``` diff --git a/ui/packages/consul-ui/app/components/tile/debug.scss b/ui/packages/consul-ui/app/components/tile/debug.scss new file mode 100644 index 000000000..8b3e0797d --- /dev/null +++ b/ui/packages/consul-ui/app/components/tile/debug.scss @@ -0,0 +1,6 @@ +#docfy-demo-preview-tile { + .tile { + @extend %with-tile, %with-leader-tile; + } +} + diff --git a/ui/packages/consul-ui/app/components/tile/index.scss b/ui/packages/consul-ui/app/components/tile/index.scss new file mode 100644 index 000000000..7754657eb --- /dev/null +++ b/ui/packages/consul-ui/app/components/tile/index.scss @@ -0,0 +1,37 @@ + +%with-tile { + position: relative; + width: var(--tile-size, 3rem); + height: var(--tile-size, 3rem); +} +%with-tile::before { + display: block; + content: ''; + width: 100%; + height: 100%; + border-radius: var(--decor-radius-250); + border: var(--decor-border-100); +} +%with-tile::after { + content: ''; + position: absolute; + top: calc(var(--tile-size, 3rem) / 4); + left: calc(var(--tile-size, 3rem) / 4); +} + +%with-leader-tile { + @extend %with-tile; +} +%with-leader-tile::before { + background-image: linear-gradient(135deg, + rgb(var(--strawberry-010)) 0%, + rgb(var(--strawberry-200)) 100% + ); + border-color: rgb(var(--tone-gray-999) / 10%); +} +%with-leader-tile::after { + --icon-name: icon-star-circle; + --icon-size: icon-700; + color: rgb(var(--strawberry-500)); +} + diff --git a/ui/packages/consul-ui/app/styles/base/component/index.scss b/ui/packages/consul-ui/app/styles/base/component/index.scss new file mode 100644 index 000000000..821ce98da --- /dev/null +++ b/ui/packages/consul-ui/app/styles/base/component/index.scss @@ -0,0 +1,6 @@ +/* allows easy application of animation-name based composition */ +*, *::before, *::after { + animation-play-state: paused; + animation-fill-mode: forwards; +} + diff --git a/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss b/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss index 1aff329e2..e97f8df12 100644 --- a/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss +++ b/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss @@ -3,6 +3,7 @@ --decor-radius-000: 0; --decor-radius-100: 2px; --decor-radius-200: 4px; + --decor-radius-250: 6px; --decor-radius-300: 7px; --decor-radius-999: 9999px; --decor-radius-full: 100%; diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index fc3360b03..8eb65d292 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -236,8 +236,8 @@ // @import './change/index.scss'; // @import './change-circle/index.scss'; // @import './change-square/index.scss'; -// @import './check/index.scss'; -// @import './check-circle/index.scss'; +@import './check/index.scss'; +@import './check-circle/index.scss'; @import './check-circle-fill/index.scss'; // @import './check-diamond/index.scss'; // @import './check-diamond-fill/index.scss'; @@ -392,7 +392,7 @@ // @import './identity-user/index.scss'; // @import './image/index.scss'; // @import './inbox/index.scss'; -// @import './info/index.scss'; +@import './info/index.scss'; // @import './jump-link/index.scss'; // @import './key/index.scss'; // @import './key-values/index.scss'; @@ -542,7 +542,7 @@ // @import './square/index.scss'; // @import './square-fill/index.scss'; // @import './star/index.scss'; -// @import './star-circle/index.scss'; +@import './star-circle/index.scss'; @import './star-fill/index.scss'; // @import './star-off/index.scss'; // @import './stop-circle/index.scss'; @@ -614,8 +614,8 @@ // @import './wifi/index.scss'; // @import './wifi-off/index.scss'; // @import './wrench/index.scss'; -// @import './x/index.scss'; -// @import './x-circle/index.scss'; +@import './x/index.scss'; +@import './x-circle/index.scss'; // @import './x-circle-fill/index.scss'; // @import './x-diamond/index.scss'; // @import './x-diamond-fill/index.scss'; diff --git a/ui/packages/consul-ui/app/styles/base/index.scss b/ui/packages/consul-ui/app/styles/base/index.scss index 8fed6a285..c352c682e 100644 --- a/ui/packages/consul-ui/app/styles/base/index.scss +++ b/ui/packages/consul-ui/app/styles/base/index.scss @@ -1,3 +1,4 @@ +@import './component/index'; @import './decoration/index'; @import './color/index'; @import './animation/index'; diff --git a/ui/packages/consul-ui/app/styles/base/typography/base-keyframes.scss b/ui/packages/consul-ui/app/styles/base/typography/base-keyframes.scss new file mode 100644 index 000000000..7f29930eb --- /dev/null +++ b/ui/packages/consul-ui/app/styles/base/typography/base-keyframes.scss @@ -0,0 +1,7 @@ +@keyframes typo-truncate { + 100% { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + } +} diff --git a/ui/packages/consul-ui/app/styles/base/typography/base-variables.scss b/ui/packages/consul-ui/app/styles/base/typography/base-variables.scss index 0c271067e..0d4294737 100644 --- a/ui/packages/consul-ui/app/styles/base/typography/base-variables.scss +++ b/ui/packages/consul-ui/app/styles/base/typography/base-variables.scss @@ -11,6 +11,7 @@ /* also maybe use 100, 200 etc like colors */ --typo-size-100: 3.5rem; --typo-size-200: 1.8rem; + --typo-size-250: 1.750rem; /* 28px */ --typo-size-300: 1.3rem; /* mktg only ^ */ --typo-size-400: 1.2rem; /* $size-large? 24 */ diff --git a/ui/packages/consul-ui/app/styles/base/typography/index.scss b/ui/packages/consul-ui/app/styles/base/typography/index.scss index 3fe0d277f..72abf8e5a 100644 --- a/ui/packages/consul-ui/app/styles/base/typography/index.scss +++ b/ui/packages/consul-ui/app/styles/base/typography/index.scss @@ -1,2 +1,3 @@ @import './base-variables'; +@import './base-keyframes'; @import './base-placeholders'; diff --git a/ui/packages/consul-ui/app/styles/components.scss b/ui/packages/consul-ui/app/styles/components.scss index 143c305e6..875e3a142 100644 --- a/ui/packages/consul-ui/app/styles/components.scss +++ b/ui/packages/consul-ui/app/styles/components.scss @@ -36,6 +36,7 @@ @import 'consul-ui/components/secret-button'; @import 'consul-ui/components/sliding-toggle'; @import 'consul-ui/components/table'; +@import 'consul-ui/components/tile'; @import 'consul-ui/components/toggle-button'; @import 'consul-ui/components/tabular-collection'; @import 'consul-ui/components/tabular-details'; @@ -91,6 +92,8 @@ @import 'consul-ui/components/consul/intention'; @import 'consul-ui/components/consul/lock-session/list'; @import 'consul-ui/components/consul/lock-session/form'; +@import 'consul-ui/components/consul/server/list'; +@import 'consul-ui/components/consul/server/card'; @import 'consul-ui/components/consul/auth-method'; @import 'consul-ui/components/role-selector'; diff --git a/ui/packages/consul-ui/app/styles/debug.scss b/ui/packages/consul-ui/app/styles/debug.scss index f99e263a2..8209e6ac4 100644 --- a/ui/packages/consul-ui/app/styles/debug.scss +++ b/ui/packages/consul-ui/app/styles/debug.scss @@ -4,6 +4,7 @@ @import 'consul-ui/components/main-nav-vertical/debug'; @import 'consul-ui/components/badge/debug'; @import 'consul-ui/components/panel/debug'; +@import 'consul-ui/components/tile/debug'; @import 'consul-ui/components/shadow-template/debug'; @import 'consul-ui/components/csv-list/debug'; @import 'consul-ui/components/horizontal-kv-list/debug'; From 467f771d74b06e76ea5b78fc1212b9152df9a323 Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 23 Mar 2022 09:25:56 -0400 Subject: [PATCH 008/785] remove gogo pbconnect, pbconfig and pbautoconf --- build-support/scripts/proto-gen-entry.sh | 9 + proto/pbautoconf/auto_config.pb.go | 869 +------ proto/pbcommon/common.gen.go | 14 + proto/pbcommon/common.pb.go | 7 + proto/pbcommon/common.proto | 7 + proto/pbconfig/config.pb.go | 2836 +--------------------- proto/pbconnect/connect.gen.go | 20 +- proto/pbconnect/connect.go | 32 +- proto/pbconnect/connect.pb.go | 1971 +-------------- proto/pbconnect/connect.proto | 22 +- 10 files changed, 288 insertions(+), 5499 deletions(-) diff --git a/build-support/scripts/proto-gen-entry.sh b/build-support/scripts/proto-gen-entry.sh index c02df1b5a..4deb2bf29 100644 --- a/build-support/scripts/proto-gen-entry.sh +++ b/build-support/scripts/proto-gen-entry.sh @@ -5,6 +5,15 @@ echo $PWD if [[ "$FILENAME" =~ .*pbcommon/.* ]]; then echo "$FILENAME no gogo" ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +elif [[ "$FILENAME" =~ .*pbconnect/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +elif [[ "$FILENAME" =~ .*pbconfig/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +elif [[ "$FILENAME" =~ .*pbautoconf/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 else echo "$FILENAME gogo" ./build-support/scripts/proto-gen.sh $1 $2 $3 diff --git a/proto/pbautoconf/auto_config.pb.go b/proto/pbautoconf/auto_config.pb.go index 8a4c25b5d..0f64d9688 100644 --- a/proto/pbautoconf/auto_config.pb.go +++ b/proto/pbautoconf/auto_config.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbautoconf/auto_config.proto package pbautoconf @@ -8,9 +8,7 @@ import ( proto "github.com/golang/protobuf/proto" pbconfig "github.com/hashicorp/consul/proto/pbconfig" pbconnect "github.com/hashicorp/consul/proto/pbconnect" - io "io" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -58,26 +56,18 @@ func (*AutoConfigRequest) ProtoMessage() {} func (*AutoConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ccc5af992e5daf69, []int{0} } + func (m *AutoConfigRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_AutoConfigRequest.Unmarshal(m, b) } func (m *AutoConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AutoConfigRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_AutoConfigRequest.Marshal(b, m, deterministic) } func (m *AutoConfigRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_AutoConfigRequest.Merge(m, src) } func (m *AutoConfigRequest) XXX_Size() int { - return m.Size() + return xxx_messageInfo_AutoConfigRequest.Size(m) } func (m *AutoConfigRequest) XXX_DiscardUnknown() { xxx_messageInfo_AutoConfigRequest.DiscardUnknown(m) @@ -156,26 +146,18 @@ func (*AutoConfigResponse) ProtoMessage() {} func (*AutoConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ccc5af992e5daf69, []int{1} } + func (m *AutoConfigResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_AutoConfigResponse.Unmarshal(m, b) } func (m *AutoConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AutoConfigResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_AutoConfigResponse.Marshal(b, m, deterministic) } func (m *AutoConfigResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_AutoConfigResponse.Merge(m, src) } func (m *AutoConfigResponse) XXX_Size() int { - return m.Size() + return xxx_messageInfo_AutoConfigResponse.Size(m) } func (m *AutoConfigResponse) XXX_DiscardUnknown() { xxx_messageInfo_AutoConfigResponse.DiscardUnknown(m) @@ -221,814 +203,27 @@ func init() { } var fileDescriptor_ccc5af992e5daf69 = []byte{ - // 361 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcf, 0x4a, 0xc3, 0x40, - 0x10, 0xc6, 0x8d, 0xad, 0xfd, 0xb3, 0x01, 0xa9, 0xdb, 0xcb, 0x52, 0x35, 0x94, 0x1e, 0xa4, 0x88, - 0x24, 0x52, 0xf1, 0x2a, 0xd4, 0xe8, 0x41, 0x0f, 0x22, 0xdb, 0x82, 0xe0, 0x45, 0xd2, 0x74, 0xdb, - 0x06, 0xdb, 0x9d, 0x98, 0x9d, 0x80, 0x8f, 0xe2, 0x0b, 0x09, 0x1e, 0xf5, 0x0d, 0xa4, 0xbe, 0x88, - 0x64, 0x93, 0xe8, 0x22, 0x9e, 0xf2, 0xe5, 0xf7, 0xfb, 0xe6, 0x30, 0x93, 0x90, 0x5e, 0x9c, 0x00, - 0x82, 0x17, 0x4f, 0x82, 0x14, 0x21, 0x04, 0x39, 0xf3, 0xb2, 0xf0, 0x90, 0xa5, 0x68, 0xee, 0x6a, - 0x49, 0x1b, 0xa5, 0xeb, 0xec, 0x96, 0xed, 0xdc, 0x7b, 0x66, 0xad, 0xb3, 0x6f, 0x48, 0x29, 0x42, - 0xf4, 0x8a, 0x67, 0xae, 0x7b, 0xaf, 0x16, 0xd9, 0x19, 0xa6, 0x08, 0xbe, 0x9e, 0xe1, 0xe2, 0x29, - 0x15, 0x0a, 0xa9, 0x43, 0xc8, 0x45, 0x80, 0x41, 0x28, 0x24, 0x8a, 0x84, 0x59, 0x5d, 0xab, 0xdf, - 0xe4, 0x06, 0xa1, 0x94, 0x54, 0x6f, 0x60, 0x2a, 0xd8, 0xa6, 0x36, 0x3a, 0x53, 0x46, 0xea, 0x23, - 0x31, 0x5f, 0x09, 0x89, 0xac, 0xaa, 0x71, 0xf9, 0x4a, 0xf7, 0x48, 0xf3, 0x36, 0x48, 0x30, 0xc2, - 0x08, 0x24, 0x6b, 0x68, 0xf7, 0x0b, 0x68, 0x8b, 0x54, 0xae, 0xef, 0xc6, 0x6c, 0x4b, 0xf3, 0x2c, - 0xd2, 0x2e, 0xb1, 0x7d, 0x90, 0x2a, 0x5d, 0x8e, 0xe1, 0x51, 0x48, 0x56, 0xd3, 0xc6, 0x44, 0xd9, - 0x8c, 0x3f, 0xe2, 0xac, 0x9e, 0xcf, 0xf8, 0x23, 0xde, 0xfb, 0xb0, 0x08, 0x35, 0xf7, 0x50, 0x31, - 0x48, 0x25, 0xe8, 0x01, 0xa9, 0xe5, 0x44, 0x2f, 0x61, 0x0f, 0xb6, 0xdd, 0xe2, 0x38, 0x45, 0xaf, - 0xb0, 0xf4, 0x90, 0xd4, 0xfd, 0x21, 0x07, 0x40, 0xa5, 0x77, 0xb2, 0x07, 0x2d, 0xb7, 0xbc, 0x53, - 0xc1, 0x79, 0x59, 0xa0, 0xa7, 0xc4, 0xf6, 0x45, 0x82, 0xd1, 0x2c, 0x0a, 0x03, 0x14, 0xac, 0xa2, - 0xfb, 0xed, 0x9f, 0xfe, 0x95, 0x52, 0xa9, 0x98, 0x66, 0x0d, 0x6e, 0xf6, 0xe8, 0x31, 0x69, 0x5f, - 0x3e, 0x63, 0x12, 0xf8, 0x43, 0x83, 0x2a, 0x56, 0xed, 0x56, 0xfa, 0x4d, 0xfe, 0x9f, 0x3a, 0x3f, - 0x7b, 0x5b, 0x3b, 0xd6, 0xfb, 0xda, 0xb1, 0x3e, 0xd7, 0x8e, 0xf5, 0xf2, 0xe5, 0x6c, 0xdc, 0x1f, - 0xcd, 0x23, 0x5c, 0xa4, 0x13, 0x37, 0x84, 0x95, 0xb7, 0x08, 0xd4, 0x22, 0x0a, 0x21, 0x89, 0xb3, - 0x2f, 0xaa, 0xd2, 0xa5, 0xf7, 0xf7, 0x9f, 0x99, 0xd4, 0x34, 0x39, 0xf9, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x65, 0x57, 0x2e, 0x30, 0x4e, 0x02, 0x00, 0x00, + // 340 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcd, 0x4a, 0xc3, 0x40, + 0x10, 0xc7, 0x89, 0xad, 0xfd, 0x98, 0x80, 0xd4, 0xed, 0x65, 0xa9, 0x1f, 0x94, 0x1e, 0xa4, 0x88, + 0x24, 0x52, 0xf1, 0x01, 0x6a, 0xf4, 0xa0, 0x07, 0x91, 0x6d, 0x41, 0xf0, 0x22, 0x69, 0xba, 0x6d, + 0x83, 0xed, 0x4e, 0xcc, 0xce, 0x82, 0x2f, 0xe8, 0x03, 0xf8, 0x46, 0x92, 0x4d, 0xa2, 0x8b, 0x78, + 0xca, 0x3f, 0xbf, 0xdf, 0x7f, 0x0e, 0x33, 0x09, 0x8c, 0xb2, 0x1c, 0x09, 0xc3, 0x6c, 0x11, 0x1b, + 0xc2, 0x04, 0xd5, 0x2a, 0x2c, 0xc2, 0x6b, 0x91, 0xd2, 0x75, 0x60, 0x25, 0xeb, 0xd4, 0x6e, 0x70, + 0x54, 0xb7, 0x4b, 0x1f, 0xba, 0xb5, 0xc1, 0x89, 0x23, 0x95, 0x4c, 0x28, 0xac, 0x9e, 0xa5, 0x1e, + 0x7d, 0x7a, 0x70, 0x38, 0x35, 0x84, 0x91, 0x9d, 0x11, 0xf2, 0xdd, 0x48, 0x4d, 0xec, 0x14, 0xe0, + 0x36, 0xa6, 0x38, 0x91, 0x8a, 0x64, 0xce, 0xbd, 0xa1, 0x37, 0xee, 0x0a, 0x87, 0x30, 0x06, 0xcd, + 0x47, 0x5c, 0x4a, 0xbe, 0x67, 0x8d, 0xcd, 0x8c, 0x43, 0x7b, 0x26, 0xd7, 0x3b, 0xa9, 0x88, 0x37, + 0x2d, 0xae, 0x5f, 0xd9, 0x31, 0x74, 0x9f, 0xe2, 0x9c, 0x52, 0x4a, 0x51, 0xf1, 0x8e, 0x75, 0xbf, + 0x80, 0xf5, 0xa0, 0xf1, 0xf0, 0x3c, 0xe7, 0xfb, 0x96, 0x17, 0x91, 0x0d, 0xc1, 0x8f, 0x50, 0x69, + 0xb3, 0x9d, 0xe3, 0x9b, 0x54, 0xbc, 0x65, 0x8d, 0x8b, 0x8a, 0x99, 0x68, 0x26, 0x78, 0xbb, 0x9c, + 0x89, 0x66, 0x62, 0xf4, 0xe5, 0x01, 0x73, 0xf7, 0xd0, 0x19, 0x2a, 0x2d, 0xd9, 0x19, 0xb4, 0x4a, + 0x62, 0x97, 0xf0, 0x27, 0x07, 0x41, 0x75, 0x9c, 0xaa, 0x57, 0x59, 0x76, 0x0e, 0xed, 0x68, 0x2a, + 0x10, 0x49, 0xdb, 0x9d, 0xfc, 0x49, 0x2f, 0xa8, 0xef, 0x54, 0x71, 0x51, 0x17, 0xd8, 0x35, 0xf8, + 0x91, 0xcc, 0x29, 0x5d, 0xa5, 0x49, 0x4c, 0x92, 0x37, 0x6c, 0xbf, 0xff, 0xd3, 0xbf, 0xd7, 0xda, + 0xc8, 0x65, 0xd1, 0x10, 0x6e, 0x8f, 0x5d, 0x42, 0xff, 0xee, 0x83, 0xf2, 0x38, 0x9a, 0x3a, 0x54, + 0xf3, 0xe6, 0xb0, 0x31, 0xee, 0x8a, 0xff, 0xd4, 0x4d, 0xf0, 0x72, 0xb1, 0x4e, 0x69, 0x63, 0x16, + 0x41, 0x82, 0xbb, 0x70, 0x13, 0xeb, 0x4d, 0x9a, 0x60, 0x9e, 0x15, 0x5f, 0x50, 0x9b, 0x6d, 0xf8, + 0xf7, 0x1f, 0x59, 0xb4, 0x2c, 0xb9, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x29, 0xae, 0x66, 0x30, + 0x3e, 0x02, 0x00, 0x00, } - -func (m *AutoConfigRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AutoConfigRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AutoConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x42 - } - if len(m.CSR) > 0 { - i -= len(m.CSR) - copy(dAtA[i:], m.CSR) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.CSR))) - i-- - dAtA[i] = 0x3a - } - if len(m.ConsulToken) > 0 { - i -= len(m.ConsulToken) - copy(dAtA[i:], m.ConsulToken) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.ConsulToken))) - i-- - dAtA[i] = 0x32 - } - if len(m.JWT) > 0 { - i -= len(m.JWT) - copy(dAtA[i:], m.JWT) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.JWT))) - i-- - dAtA[i] = 0x2a - } - if len(m.Segment) > 0 { - i -= len(m.Segment) - copy(dAtA[i:], m.Segment) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.Segment))) - i-- - dAtA[i] = 0x22 - } - if len(m.Node) > 0 { - i -= len(m.Node) - copy(dAtA[i:], m.Node) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.Node))) - i-- - dAtA[i] = 0x12 - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AutoConfigResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AutoConfigResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AutoConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExtraCACertificates) > 0 { - for iNdEx := len(m.ExtraCACertificates) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExtraCACertificates[iNdEx]) - copy(dAtA[i:], m.ExtraCACertificates[iNdEx]) - i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.ExtraCACertificates[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.Certificate != nil { - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutoConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.CARoots != nil { - { - size, err := m.CARoots.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutoConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutoConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintAutoConfig(dAtA []byte, offset int, v uint64) int { - offset -= sovAutoConfig(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AutoConfigRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.Node) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.Segment) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.JWT) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.ConsulToken) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.CSR) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovAutoConfig(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AutoConfigResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovAutoConfig(uint64(l)) - } - if m.CARoots != nil { - l = m.CARoots.Size() - n += 1 + l + sovAutoConfig(uint64(l)) - } - if m.Certificate != nil { - l = m.Certificate.Size() - n += 1 + l + sovAutoConfig(uint64(l)) - } - if len(m.ExtraCACertificates) > 0 { - for _, s := range m.ExtraCACertificates { - l = len(s) - n += 1 + l + sovAutoConfig(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovAutoConfig(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAutoConfig(x uint64) (n int) { - return sovAutoConfig(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AutoConfigRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AutoConfigRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AutoConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Node = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Segment", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Segment = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JWT", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JWT = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsulToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsulToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CSR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutoConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAutoConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AutoConfigResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AutoConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AutoConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Config == nil { - m.Config = &pbconfig.Config{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CARoots", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CARoots == nil { - m.CARoots = &pbconnect.CARoots{} - } - if err := m.CARoots.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Certificate == nil { - m.Certificate = &pbconnect.IssuedCert{} - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraCACertificates", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutoConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutoConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtraCACertificates = append(m.ExtraCACertificates, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutoConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAutoConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAutoConfig(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutoConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAutoConfig - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAutoConfig - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAutoConfig - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAutoConfig = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAutoConfig = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAutoConfig = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbcommon/common.gen.go b/proto/pbcommon/common.gen.go index 14aa7128b..867e8089c 100644 --- a/proto/pbcommon/common.gen.go +++ b/proto/pbcommon/common.gen.go @@ -54,3 +54,17 @@ func QueryOptionsFromStructs(t *structs.QueryOptions, s *QueryOptions) { s.MustRevalidate = t.MustRevalidate s.Filter = t.Filter } +func RaftIndexToStructs(s *RaftIndex, t *structs.RaftIndex) { + if s == nil { + return + } + t.CreateIndex = s.CreateIndex + t.ModifyIndex = s.ModifyIndex +} +func RaftIndexFromStructs(t *structs.RaftIndex, s *RaftIndex) { + if s == nil { + return + } + s.CreateIndex = t.CreateIndex + s.ModifyIndex = t.ModifyIndex +} diff --git a/proto/pbcommon/common.pb.go b/proto/pbcommon/common.pb.go index 21f3b1dee..88a2d55b6 100644 --- a/proto/pbcommon/common.pb.go +++ b/proto/pbcommon/common.pb.go @@ -23,6 +23,13 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // RaftIndex is used to track the index used while creating // or modifying a given struct type. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.RaftIndex +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields type RaftIndex struct { // @gotags: bexpr:"-" CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` diff --git a/proto/pbcommon/common.proto b/proto/pbcommon/common.proto index b38e77fda..19efd232b 100644 --- a/proto/pbcommon/common.proto +++ b/proto/pbcommon/common.proto @@ -9,6 +9,13 @@ import "google/protobuf/duration.proto"; // RaftIndex is used to track the index used while creating // or modifying a given struct type. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.RaftIndex +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields message RaftIndex { // @gotags: bexpr:"-" uint64 CreateIndex = 1; diff --git a/proto/pbconfig/config.pb.go b/proto/pbconfig/config.pb.go index 5828b88ac..a3dfa4c81 100644 --- a/proto/pbconfig/config.pb.go +++ b/proto/pbconfig/config.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbconfig/config.proto package pbconfig @@ -6,9 +6,7 @@ package pbconfig import ( fmt "fmt" proto "github.com/golang/protobuf/proto" - io "io" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -43,26 +41,18 @@ func (*Config) ProtoMessage() {} func (*Config) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{0} } + func (m *Config) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Config.Unmarshal(m, b) } func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Config.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Config.Marshal(b, m, deterministic) } func (m *Config) XXX_Merge(src proto.Message) { xxx_messageInfo_Config.Merge(m, src) } func (m *Config) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Config.Size(m) } func (m *Config) XXX_DiscardUnknown() { xxx_messageInfo_Config.DiscardUnknown(m) @@ -147,26 +137,18 @@ func (*Gossip) ProtoMessage() {} func (*Gossip) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{1} } + func (m *Gossip) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Gossip.Unmarshal(m, b) } func (m *Gossip) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Gossip.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Gossip.Marshal(b, m, deterministic) } func (m *Gossip) XXX_Merge(src proto.Message) { xxx_messageInfo_Gossip.Merge(m, src) } func (m *Gossip) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Gossip.Size(m) } func (m *Gossip) XXX_DiscardUnknown() { xxx_messageInfo_Gossip.DiscardUnknown(m) @@ -203,26 +185,18 @@ func (*GossipEncryption) ProtoMessage() {} func (*GossipEncryption) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{2} } + func (m *GossipEncryption) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_GossipEncryption.Unmarshal(m, b) } func (m *GossipEncryption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GossipEncryption.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_GossipEncryption.Marshal(b, m, deterministic) } func (m *GossipEncryption) XXX_Merge(src proto.Message) { xxx_messageInfo_GossipEncryption.Merge(m, src) } func (m *GossipEncryption) XXX_Size() int { - return m.Size() + return xxx_messageInfo_GossipEncryption.Size(m) } func (m *GossipEncryption) XXX_DiscardUnknown() { xxx_messageInfo_GossipEncryption.DiscardUnknown(m) @@ -270,26 +244,18 @@ func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{3} } + func (m *TLS) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_TLS.Unmarshal(m, b) } func (m *TLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TLS.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_TLS.Marshal(b, m, deterministic) } func (m *TLS) XXX_Merge(src proto.Message) { xxx_messageInfo_TLS.Merge(m, src) } func (m *TLS) XXX_Size() int { - return m.Size() + return xxx_messageInfo_TLS.Size(m) } func (m *TLS) XXX_DiscardUnknown() { xxx_messageInfo_TLS.DiscardUnknown(m) @@ -358,26 +324,18 @@ func (*ACL) ProtoMessage() {} func (*ACL) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{4} } + func (m *ACL) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ACL.Unmarshal(m, b) } func (m *ACL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ACL.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ACL.Marshal(b, m, deterministic) } func (m *ACL) XXX_Merge(src proto.Message) { xxx_messageInfo_ACL.Merge(m, src) } func (m *ACL) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ACL.Size(m) } func (m *ACL) XXX_DiscardUnknown() { xxx_messageInfo_ACL.DiscardUnknown(m) @@ -481,26 +439,18 @@ func (*ACLTokens) ProtoMessage() {} func (*ACLTokens) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{5} } + func (m *ACLTokens) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ACLTokens.Unmarshal(m, b) } func (m *ACLTokens) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ACLTokens.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ACLTokens.Marshal(b, m, deterministic) } func (m *ACLTokens) XXX_Merge(src proto.Message) { xxx_messageInfo_ACLTokens.Merge(m, src) } func (m *ACLTokens) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ACLTokens.Size(m) } func (m *ACLTokens) XXX_DiscardUnknown() { xxx_messageInfo_ACLTokens.DiscardUnknown(m) @@ -564,26 +514,18 @@ func (*ACLServiceProviderToken) ProtoMessage() {} func (*ACLServiceProviderToken) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{6} } + func (m *ACLServiceProviderToken) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ACLServiceProviderToken.Unmarshal(m, b) } func (m *ACLServiceProviderToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ACLServiceProviderToken.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ACLServiceProviderToken.Marshal(b, m, deterministic) } func (m *ACLServiceProviderToken) XXX_Merge(src proto.Message) { xxx_messageInfo_ACLServiceProviderToken.Merge(m, src) } func (m *ACLServiceProviderToken) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ACLServiceProviderToken.Size(m) } func (m *ACLServiceProviderToken) XXX_DiscardUnknown() { xxx_messageInfo_ACLServiceProviderToken.DiscardUnknown(m) @@ -621,26 +563,18 @@ func (*AutoEncrypt) ProtoMessage() {} func (*AutoEncrypt) Descriptor() ([]byte, []int) { return fileDescriptor_aefa824db7b74d77, []int{7} } + func (m *AutoEncrypt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_AutoEncrypt.Unmarshal(m, b) } func (m *AutoEncrypt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AutoEncrypt.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_AutoEncrypt.Marshal(b, m, deterministic) } func (m *AutoEncrypt) XXX_Merge(src proto.Message) { xxx_messageInfo_AutoEncrypt.Merge(m, src) } func (m *AutoEncrypt) XXX_Size() int { - return m.Size() + return xxx_messageInfo_AutoEncrypt.Size(m) } func (m *AutoEncrypt) XXX_DiscardUnknown() { xxx_messageInfo_AutoEncrypt.DiscardUnknown(m) @@ -687,2655 +621,61 @@ func init() { proto.RegisterType((*AutoEncrypt)(nil), "config.AutoEncrypt") } -func init() { proto.RegisterFile("proto/pbconfig/config.proto", fileDescriptor_aefa824db7b74d77) } +func init() { + proto.RegisterFile("proto/pbconfig/config.proto", fileDescriptor_aefa824db7b74d77) +} var fileDescriptor_aefa824db7b74d77 = []byte{ - // 831 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x8e, 0xdb, 0x44, - 0x14, 0xc6, 0x71, 0xeb, 0xdd, 0x4c, 0xa0, 0x6a, 0xa7, 0x65, 0xb1, 0xf8, 0x09, 0x91, 0x41, 0xd5, - 0x82, 0xd0, 0x2e, 0x5a, 0x04, 0x02, 0x89, 0x9b, 0xec, 0xa6, 0x82, 0xd0, 0x6c, 0x88, 0xec, 0x50, - 0x24, 0x6e, 0x90, 0xe3, 0x9c, 0x24, 0x23, 0x9c, 0x19, 0x6b, 0x3c, 0xd9, 0xca, 0xaf, 0xc0, 0x13, - 0xf0, 0x2e, 0xbc, 0x00, 0x77, 0xf0, 0x08, 0x68, 0x79, 0x0b, 0xae, 0xaa, 0x33, 0x33, 0xfe, 0xdb, - 0x26, 0x57, 0xc9, 0xf9, 0xbe, 0x6f, 0xce, 0x9c, 0x33, 0xe7, 0xc7, 0xe4, 0xbd, 0x4c, 0x0a, 0x25, - 0xce, 0xb3, 0x45, 0x22, 0xf8, 0x8a, 0xad, 0xcf, 0xcd, 0xcf, 0x99, 0x46, 0xa9, 0x67, 0xac, 0xe0, - 0xef, 0x0e, 0xf1, 0xae, 0xf4, 0x5f, 0xda, 0x27, 0x64, 0x14, 0xab, 0x38, 0x01, 0xae, 0x40, 0xfa, - 0xce, 0xc0, 0x39, 0xed, 0x86, 0x0d, 0x84, 0x7e, 0x46, 0x1e, 0xcd, 0x24, 0xdb, 0xc6, 0xb2, 0x68, - 0xc8, 0x3a, 0x5a, 0xf6, 0x3a, 0x41, 0xdf, 0x25, 0xc7, 0x53, 0xb1, 0x84, 0x69, 0xbc, 0x05, 0xdf, - 0xd5, 0xa2, 0xca, 0xa6, 0x03, 0xd2, 0x8b, 0x60, 0xbd, 0x05, 0xae, 0x34, 0x7d, 0x4f, 0xd3, 0x4d, - 0x88, 0xbe, 0x4f, 0xba, 0xb3, 0x58, 0x2a, 0xa6, 0x98, 0xe0, 0x7e, 0x57, 0xf3, 0x35, 0x40, 0x3f, - 0x20, 0xee, 0xf0, 0x6a, 0xe2, 0xdf, 0x1f, 0x38, 0xa7, 0xbd, 0x8b, 0xde, 0x99, 0x4d, 0x6c, 0x78, - 0x35, 0x09, 0x11, 0xa7, 0x5f, 0x92, 0xde, 0x70, 0xa7, 0xc4, 0x33, 0x9e, 0xc8, 0x22, 0x53, 0xbe, - 0xa7, 0x65, 0x8f, 0x2b, 0x59, 0x4d, 0x85, 0x4d, 0x1d, 0x7d, 0x4a, 0xbc, 0xef, 0x44, 0x9e, 0xb3, - 0xcc, 0x3f, 0xd2, 0x27, 0x1e, 0x94, 0x27, 0x0c, 0x1a, 0x5a, 0x16, 0x6f, 0x9f, 0x4f, 0x22, 0xff, - 0xb8, 0x7d, 0xfb, 0x7c, 0x12, 0x85, 0x88, 0x07, 0xab, 0xd2, 0x0d, 0xfd, 0x9a, 0x10, 0xeb, 0x1b, - 0xb3, 0x70, 0xb4, 0xde, 0x6f, 0x3b, 0xad, 0xf9, 0xb0, 0xa1, 0xa5, 0x01, 0x79, 0x33, 0x04, 0x25, - 0x8b, 0x1f, 0x04, 0xe3, 0x93, 0xe1, 0xd4, 0xef, 0x0c, 0xdc, 0xd3, 0x6e, 0xd8, 0xc2, 0x02, 0x45, - 0x1e, 0xde, 0xf5, 0x41, 0x1f, 0x12, 0xf7, 0x39, 0x14, 0xb6, 0x76, 0xf8, 0x97, 0x3e, 0x25, 0x0f, - 0x5e, 0x80, 0x64, 0xab, 0x62, 0xcc, 0x13, 0xb1, 0x65, 0x7c, 0xad, 0x2b, 0x76, 0x1c, 0xde, 0x41, - 0x6b, 0xdd, 0x8f, 0x3b, 0xb5, 0x16, 0xa8, 0x73, 0x9b, 0xba, 0x12, 0x0d, 0xfe, 0x77, 0x74, 0xf6, - 0x7b, 0xf4, 0xce, 0x3e, 0x3d, 0xbd, 0x20, 0x4f, 0x0c, 0x12, 0x81, 0xbc, 0x01, 0xf9, 0xbd, 0xc8, - 0x15, 0xc7, 0x9a, 0x9b, 0x28, 0xf6, 0x72, 0x98, 0xfd, 0x15, 0xcb, 0x36, 0x20, 0xa3, 0x1d, 0x53, - 0x90, 0xdb, 0xf6, 0x69, 0x61, 0xd8, 0xac, 0xd7, 0x8c, 0xbf, 0x00, 0x99, 0xe3, 0xdb, 0x9a, 0x0e, - 0x6a, 0x20, 0x34, 0x22, 0x1f, 0x8d, 0x20, 0x93, 0x90, 0xc4, 0x0a, 0x96, 0xbf, 0xce, 0x24, 0xac, - 0x40, 0x9a, 0x6b, 0x5a, 0xae, 0xb1, 0x85, 0x8e, 0x2f, 0x3b, 0xbe, 0x13, 0x06, 0xb5, 0xfc, 0x90, - 0x3a, 0xf8, 0xd3, 0xd5, 0x8d, 0x47, 0x7d, 0x72, 0xf4, 0x8c, 0xc7, 0x8b, 0x14, 0x96, 0x36, 0xeb, - 0xd2, 0xd4, 0x7d, 0x2b, 0x52, 0x96, 0x14, 0xf3, 0xf9, 0xc4, 0xce, 0x46, 0x0d, 0xe0, 0xb9, 0x50, - 0xa4, 0x80, 0x9c, 0xc9, 0xa9, 0x34, 0x71, 0x5a, 0xe6, 0xe2, 0x37, 0xe0, 0x48, 0x99, 0x64, 0x2a, - 0x5b, 0xcf, 0xa5, 0x78, 0xc9, 0x8d, 0x1b, 0x1d, 0x31, 0xce, 0x65, 0x85, 0xd0, 0x8f, 0xc9, 0x5b, - 0x23, 0x58, 0xc5, 0xbb, 0x54, 0x59, 0x89, 0xa7, 0x25, 0x6d, 0x90, 0x7e, 0x4e, 0x1e, 0x9b, 0x20, - 0x9f, 0x43, 0x31, 0x61, 0x79, 0xa9, 0x3d, 0xd2, 0xf1, 0xef, 0xa3, 0xe8, 0x27, 0xc4, 0xd3, 0x31, - 0xe4, 0xb6, 0xd5, 0x1f, 0x35, 0x06, 0xcd, 0x10, 0xa1, 0x15, 0xd0, 0x6f, 0xc8, 0x49, 0xe3, 0xb5, - 0x47, 0x2c, 0xd7, 0xaf, 0x81, 0xc9, 0xe8, 0xd9, 0xd5, 0x0f, 0xfc, 0x76, 0xad, 0x68, 0x08, 0xe8, - 0x57, 0xe4, 0xc4, 0x5c, 0xae, 0x5d, 0xcd, 0xb0, 0x7c, 0xb9, 0x02, 0x9e, 0x80, 0x4f, 0x74, 0x68, - 0x07, 0x58, 0xcc, 0xe7, 0x3a, 0x9a, 0x59, 0x4f, 0x97, 0x42, 0xa8, 0x5c, 0xc9, 0x38, 0xf3, 0x7b, - 0x26, 0x9f, 0x3d, 0x54, 0xf0, 0x7b, 0x87, 0x74, 0xab, 0xd0, 0x71, 0x9b, 0x8d, 0x39, 0x53, 0x2c, - 0x4e, 0xaf, 0x63, 0x1e, 0xaf, 0x01, 0x57, 0x8f, 0x1d, 0x9c, 0xd7, 0x09, 0xdc, 0x58, 0x21, 0x64, - 0x29, 0x4b, 0x62, 0x3d, 0xcb, 0xa6, 0xb2, 0x4d, 0x08, 0xab, 0x30, 0x5c, 0x03, 0x57, 0x21, 0x24, - 0xe2, 0x06, 0x64, 0x61, 0x2b, 0xdc, 0x06, 0xb1, 0x03, 0x6c, 0x59, 0x6c, 0x99, 0x4b, 0x93, 0x3e, - 0x21, 0xf7, 0xb5, 0xd4, 0x16, 0xd8, 0x18, 0xf4, 0x67, 0x72, 0x62, 0xa2, 0x58, 0x62, 0x3b, 0xb2, - 0x04, 0x66, 0x52, 0xdc, 0xb0, 0x25, 0x48, 0xdf, 0x1b, 0xb8, 0xa7, 0xbd, 0x8b, 0x0f, 0x1b, 0x35, - 0xb9, 0xa3, 0xd0, 0x79, 0x86, 0x07, 0x8e, 0x07, 0x3f, 0x91, 0x77, 0x0e, 0x1c, 0xc1, 0x7e, 0x1b, - 0x26, 0x09, 0xe4, 0xb9, 0x90, 0xe3, 0x51, 0xf9, 0x1d, 0xa8, 0x11, 0xec, 0xd5, 0x08, 0x12, 0x09, - 0x6a, 0x3c, 0xb2, 0x0f, 0x51, 0xd9, 0x01, 0x6b, 0xad, 0x5e, 0xdc, 0x47, 0xb8, 0x2a, 0xcd, 0x90, - 0xe8, 0xbd, 0x71, 0x42, 0xbc, 0xd1, 0x34, 0x8a, 0xaa, 0x9d, 0x66, 0x2d, 0x4c, 0x7f, 0x3c, 0x43, - 0xd8, 0xd5, 0xb0, 0x31, 0xf0, 0xaa, 0x61, 0x9a, 0x8a, 0x97, 0xe8, 0xe4, 0x9e, 0x76, 0x52, 0xd9, - 0x97, 0xdf, 0xfe, 0x75, 0xdb, 0x77, 0xfe, 0xb9, 0xed, 0x3b, 0xff, 0xde, 0xf6, 0x9d, 0x3f, 0xfe, - 0xeb, 0xbf, 0xf1, 0xcb, 0xa7, 0x6b, 0xa6, 0x36, 0xbb, 0xc5, 0x59, 0x22, 0xb6, 0xe7, 0x9b, 0x38, - 0xdf, 0xb0, 0x44, 0xc8, 0x0c, 0x3f, 0x7b, 0xf9, 0x2e, 0x3d, 0x6f, 0x7f, 0x0c, 0x17, 0x9e, 0xb6, - 0xbf, 0x78, 0x15, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xaf, 0xda, 0x68, 0x25, 0x07, 0x00, 0x00, + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x8e, 0x22, 0x45, + 0x14, 0x0e, 0xf4, 0x6e, 0x0f, 0x14, 0xba, 0xd9, 0xad, 0x5d, 0xb1, 0xe3, 0x2f, 0x69, 0xcd, 0x06, + 0xcd, 0x66, 0x30, 0x63, 0x34, 0x7a, 0xc9, 0xc0, 0x46, 0x71, 0x19, 0x24, 0xdd, 0x38, 0x26, 0xde, + 0x98, 0xa6, 0x39, 0x40, 0xc5, 0xa6, 0xaa, 0x53, 0x5d, 0xcc, 0xa4, 0x5f, 0xc1, 0x57, 0xf2, 0x21, + 0x7c, 0x1f, 0xaf, 0xcc, 0xa9, 0xaa, 0xfe, 0x9b, 0x81, 0x2b, 0x38, 0xdf, 0xf7, 0xd5, 0xa9, 0x73, + 0xea, 0xfc, 0x34, 0xf9, 0x38, 0x95, 0x42, 0x89, 0x51, 0xba, 0x8e, 0x05, 0xdf, 0xb2, 0xdd, 0xc8, + 0xfc, 0x5c, 0x6a, 0x94, 0xba, 0xc6, 0xf2, 0xff, 0x6d, 0x13, 0x77, 0xa2, 0xff, 0xd2, 0xcf, 0x08, + 0x99, 0x46, 0x2a, 0x8a, 0x81, 0x2b, 0x90, 0x5e, 0x6b, 0xd0, 0x1a, 0x76, 0x83, 0x1a, 0x42, 0xdf, + 0x90, 0x17, 0x4b, 0xc9, 0x0e, 0x91, 0xcc, 0x6b, 0xb2, 0xb6, 0x96, 0x3d, 0x26, 0xe8, 0x47, 0xa4, + 0xb3, 0x10, 0x1b, 0x58, 0x44, 0x07, 0xf0, 0x1c, 0x2d, 0x2a, 0x6d, 0x3a, 0x20, 0xbd, 0x10, 0x76, + 0x07, 0xe0, 0x4a, 0xd3, 0x4f, 0x34, 0x5d, 0x87, 0xe8, 0x27, 0xa4, 0xbb, 0x8c, 0xa4, 0x62, 0x8a, + 0x09, 0xee, 0x75, 0x35, 0x5f, 0x01, 0xf4, 0x53, 0xe2, 0x8c, 0x27, 0x73, 0xef, 0xe9, 0xa0, 0x35, + 0xec, 0x5d, 0xf5, 0x2e, 0x6d, 0x62, 0xe3, 0xc9, 0x3c, 0x40, 0x9c, 0x7e, 0x47, 0x7a, 0xe3, 0xa3, + 0x12, 0x6f, 0x79, 0x2c, 0xf3, 0x54, 0x79, 0xae, 0x96, 0xbd, 0x2c, 0x65, 0x15, 0x15, 0xd4, 0x75, + 0xf4, 0x35, 0x71, 0x7f, 0x12, 0x59, 0xc6, 0x52, 0xef, 0x42, 0x9f, 0x78, 0x56, 0x9c, 0x30, 0x68, + 0x60, 0x59, 0xbc, 0x7d, 0x35, 0x0f, 0xbd, 0x4e, 0xf3, 0xf6, 0xd5, 0x3c, 0x0c, 0x10, 0xf7, 0xb7, + 0x85, 0x1b, 0xfa, 0x03, 0x21, 0xd6, 0x37, 0x66, 0xd1, 0xd2, 0x7a, 0xaf, 0xe9, 0xb4, 0xe2, 0x83, + 0x9a, 0x96, 0xfa, 0xe4, 0xbd, 0x00, 0x94, 0xcc, 0x7f, 0x11, 0x8c, 0xcf, 0xc7, 0x0b, 0xaf, 0x3d, + 0x70, 0x86, 0xdd, 0xa0, 0x81, 0xf9, 0x8a, 0x3c, 0x7f, 0xe8, 0x83, 0x3e, 0x27, 0xce, 0x3b, 0xc8, + 0x6d, 0xed, 0xf0, 0x2f, 0x7d, 0x4d, 0x9e, 0xdd, 0x82, 0x64, 0xdb, 0x7c, 0xc6, 0x63, 0x71, 0x60, + 0x7c, 0xa7, 0x2b, 0xd6, 0x09, 0x1e, 0xa0, 0x95, 0xee, 0xd7, 0xa3, 0xda, 0x09, 0xd4, 0x39, 0x75, + 0x5d, 0x81, 0xfa, 0xff, 0xb5, 0x74, 0xf6, 0x27, 0xf4, 0xad, 0x53, 0x7a, 0x7a, 0x45, 0x5e, 0x19, + 0x24, 0x04, 0x79, 0x07, 0xf2, 0x67, 0x91, 0x29, 0x8e, 0x35, 0x37, 0x51, 0x9c, 0xe4, 0x30, 0xfb, + 0x09, 0x4b, 0xf7, 0x20, 0xc3, 0x23, 0x53, 0x90, 0xd9, 0xf6, 0x69, 0x60, 0xd8, 0xac, 0x37, 0x8c, + 0xdf, 0x82, 0xcc, 0xf0, 0x6d, 0x4d, 0x07, 0xd5, 0x10, 0x1a, 0x92, 0x2f, 0xa6, 0x90, 0x4a, 0x88, + 0x23, 0x05, 0x9b, 0x3f, 0x97, 0x12, 0xb6, 0x20, 0xcd, 0x35, 0x0d, 0xd7, 0xd8, 0x42, 0x9d, 0xeb, + 0xb6, 0xd7, 0x0a, 0xfc, 0x4a, 0x7e, 0x4e, 0xed, 0xff, 0xe3, 0xe8, 0xc6, 0xa3, 0x1e, 0xb9, 0x78, + 0xcb, 0xa3, 0x75, 0x02, 0x1b, 0x9b, 0x75, 0x61, 0xea, 0xbe, 0x15, 0x09, 0x8b, 0xf3, 0xd5, 0x6a, + 0x6e, 0x67, 0xa3, 0x02, 0xf0, 0x5c, 0x20, 0x12, 0x40, 0xce, 0xe4, 0x54, 0x98, 0x38, 0x2d, 0x2b, + 0xf1, 0x17, 0x70, 0xa4, 0x4c, 0x32, 0xa5, 0xad, 0xe7, 0x52, 0xdc, 0x73, 0xe3, 0x46, 0x47, 0x8c, + 0x73, 0x59, 0x22, 0xf4, 0x4b, 0xf2, 0xfe, 0x14, 0xb6, 0xd1, 0x31, 0x51, 0x56, 0xe2, 0x6a, 0x49, + 0x13, 0xa4, 0xdf, 0x90, 0x97, 0x26, 0xc8, 0x77, 0x90, 0xcf, 0x59, 0x56, 0x68, 0x2f, 0x74, 0xfc, + 0xa7, 0x28, 0xfa, 0x15, 0x71, 0x75, 0x0c, 0x99, 0x6d, 0xf5, 0x17, 0xb5, 0x41, 0x33, 0x44, 0x60, + 0x05, 0xf4, 0x47, 0xd2, 0xaf, 0xbd, 0xf6, 0x94, 0x65, 0xfa, 0x35, 0x30, 0x19, 0x3d, 0xbb, 0xfa, + 0x81, 0x3f, 0xa8, 0x14, 0x35, 0x01, 0xfd, 0x9e, 0xf4, 0xcd, 0xe5, 0xda, 0xd5, 0x12, 0xcb, 0x97, + 0x29, 0xe0, 0x31, 0x78, 0x44, 0x87, 0x76, 0x86, 0xc5, 0x7c, 0x6e, 0xc2, 0xa5, 0xf5, 0x74, 0x2d, + 0x84, 0xca, 0x94, 0x8c, 0x52, 0xaf, 0x67, 0xf2, 0x39, 0x41, 0xf9, 0x7f, 0xb7, 0x49, 0xb7, 0x0c, + 0x1d, 0xb7, 0xd9, 0x8c, 0x33, 0xc5, 0xa2, 0xe4, 0x26, 0xe2, 0xd1, 0x0e, 0x70, 0xf5, 0xd8, 0xc1, + 0x79, 0x4c, 0xe0, 0xc6, 0x0a, 0x20, 0x4d, 0x58, 0x1c, 0xe9, 0x59, 0x36, 0x95, 0xad, 0x43, 0x58, + 0x85, 0xf1, 0x0e, 0xb8, 0x0a, 0x20, 0x16, 0x77, 0x20, 0x73, 0x5b, 0xe1, 0x26, 0x88, 0x1d, 0x60, + 0xcb, 0x62, 0xcb, 0x5c, 0x98, 0xf4, 0x15, 0x79, 0xaa, 0xa5, 0xb6, 0xc0, 0xc6, 0xa0, 0xbf, 0x93, + 0xbe, 0x89, 0x62, 0x83, 0xed, 0xc8, 0x62, 0x58, 0x4a, 0x71, 0xc7, 0x36, 0x20, 0x3d, 0x77, 0xe0, + 0x0c, 0x7b, 0x57, 0x9f, 0xd7, 0x6a, 0xf2, 0x40, 0xa1, 0xf3, 0x0c, 0xce, 0x1c, 0xf7, 0x7f, 0x23, + 0x1f, 0x9e, 0x39, 0x82, 0xfd, 0x36, 0x8e, 0x63, 0xc8, 0x32, 0x21, 0x67, 0xd3, 0xe2, 0x3b, 0x50, + 0x21, 0xd8, 0xab, 0x21, 0xc4, 0x12, 0xd4, 0x6c, 0x6a, 0x1f, 0xa2, 0xb4, 0x7d, 0xd6, 0x58, 0xbd, + 0xb8, 0x8f, 0x70, 0x55, 0x9a, 0x21, 0xd1, 0x7b, 0xa3, 0x4f, 0xdc, 0xe9, 0x22, 0x0c, 0xcb, 0x9d, + 0x66, 0x2d, 0x4c, 0x7f, 0xb6, 0x44, 0xd8, 0xd1, 0xb0, 0x31, 0xf0, 0xaa, 0x71, 0x92, 0x88, 0x7b, + 0x74, 0xf2, 0x44, 0x3b, 0x29, 0xed, 0xeb, 0x37, 0x7f, 0x7c, 0xbd, 0x63, 0x6a, 0x7f, 0x5c, 0x5f, + 0xc6, 0xe2, 0x30, 0xda, 0x47, 0xd9, 0x9e, 0xc5, 0x42, 0xa6, 0xf8, 0x99, 0xcb, 0x8e, 0xc9, 0xa8, + 0xf9, 0xf1, 0x5b, 0xbb, 0xda, 0xfe, 0xf6, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x97, 0xb9, + 0xb8, 0x15, 0x07, 0x00, 0x00, } - -func (m *Config) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Config) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x4a - } - if m.TLS != nil { - { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Gossip != nil { - { - size, err := m.Gossip.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.AutoEncrypt != nil { - { - size, err := m.AutoEncrypt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.ACL != nil { - { - size, err := m.ACL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.SegmentName) > 0 { - i -= len(m.SegmentName) - copy(dAtA[i:], m.SegmentName) - i = encodeVarintConfig(dAtA, i, uint64(len(m.SegmentName))) - i-- - dAtA[i] = 0x22 - } - if len(m.NodeName) > 0 { - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintConfig(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x1a - } - if len(m.PrimaryDatacenter) > 0 { - i -= len(m.PrimaryDatacenter) - copy(dAtA[i:], m.PrimaryDatacenter) - i = encodeVarintConfig(dAtA, i, uint64(len(m.PrimaryDatacenter))) - i-- - dAtA[i] = 0x12 - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Gossip) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Gossip) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Gossip) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RetryJoinLAN) > 0 { - for iNdEx := len(m.RetryJoinLAN) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RetryJoinLAN[iNdEx]) - copy(dAtA[i:], m.RetryJoinLAN[iNdEx]) - i = encodeVarintConfig(dAtA, i, uint64(len(m.RetryJoinLAN[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Encryption != nil { - { - size, err := m.Encryption.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GossipEncryption) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GossipEncryption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GossipEncryption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.VerifyOutgoing { - i-- - if m.VerifyOutgoing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.VerifyIncoming { - i-- - if m.VerifyIncoming { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TLS) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TLS) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Deprecated_PreferServerCipherSuites { - i-- - if m.Deprecated_PreferServerCipherSuites { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.MinVersion) > 0 { - i -= len(m.MinVersion) - copy(dAtA[i:], m.MinVersion) - i = encodeVarintConfig(dAtA, i, uint64(len(m.MinVersion))) - i-- - dAtA[i] = 0x22 - } - if len(m.CipherSuites) > 0 { - i -= len(m.CipherSuites) - copy(dAtA[i:], m.CipherSuites) - i = encodeVarintConfig(dAtA, i, uint64(len(m.CipherSuites))) - i-- - dAtA[i] = 0x1a - } - if m.VerifyServerHostname { - i-- - if m.VerifyServerHostname { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.VerifyOutgoing { - i-- - if m.VerifyOutgoing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ACL) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ACL) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ACL) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MSPDisableBootstrap { - i-- - if m.MSPDisableBootstrap { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - if m.EnableTokenPersistence { - i-- - if m.EnableTokenPersistence { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if len(m.Deprecated_DisabledTTL) > 0 { - i -= len(m.Deprecated_DisabledTTL) - copy(dAtA[i:], m.Deprecated_DisabledTTL) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Deprecated_DisabledTTL))) - i-- - dAtA[i] = 0x4a - } - if m.Tokens != nil { - { - size, err := m.Tokens.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.EnableKeyListPolicy { - i-- - if m.EnableKeyListPolicy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.DefaultPolicy) > 0 { - i -= len(m.DefaultPolicy) - copy(dAtA[i:], m.DefaultPolicy) - i = encodeVarintConfig(dAtA, i, uint64(len(m.DefaultPolicy))) - i-- - dAtA[i] = 0x32 - } - if len(m.DownPolicy) > 0 { - i -= len(m.DownPolicy) - copy(dAtA[i:], m.DownPolicy) - i = encodeVarintConfig(dAtA, i, uint64(len(m.DownPolicy))) - i-- - dAtA[i] = 0x2a - } - if len(m.TokenTTL) > 0 { - i -= len(m.TokenTTL) - copy(dAtA[i:], m.TokenTTL) - i = encodeVarintConfig(dAtA, i, uint64(len(m.TokenTTL))) - i-- - dAtA[i] = 0x22 - } - if len(m.RoleTTL) > 0 { - i -= len(m.RoleTTL) - copy(dAtA[i:], m.RoleTTL) - i = encodeVarintConfig(dAtA, i, uint64(len(m.RoleTTL))) - i-- - dAtA[i] = 0x1a - } - if len(m.PolicyTTL) > 0 { - i -= len(m.PolicyTTL) - copy(dAtA[i:], m.PolicyTTL) - i = encodeVarintConfig(dAtA, i, uint64(len(m.PolicyTTL))) - i-- - dAtA[i] = 0x12 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ACLTokens) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ACLTokens) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ACLTokens) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ManagedServiceProvider) > 0 { - for iNdEx := len(m.ManagedServiceProvider) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ManagedServiceProvider[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Agent) > 0 { - i -= len(m.Agent) - copy(dAtA[i:], m.Agent) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Agent))) - i-- - dAtA[i] = 0x2a - } - if len(m.Default) > 0 { - i -= len(m.Default) - copy(dAtA[i:], m.Default) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Default))) - i-- - dAtA[i] = 0x22 - } - if len(m.AgentRecovery) > 0 { - i -= len(m.AgentRecovery) - copy(dAtA[i:], m.AgentRecovery) - i = encodeVarintConfig(dAtA, i, uint64(len(m.AgentRecovery))) - i-- - dAtA[i] = 0x1a - } - if len(m.Replication) > 0 { - i -= len(m.Replication) - copy(dAtA[i:], m.Replication) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Replication))) - i-- - dAtA[i] = 0x12 - } - if len(m.InitialManagement) > 0 { - i -= len(m.InitialManagement) - copy(dAtA[i:], m.InitialManagement) - i = encodeVarintConfig(dAtA, i, uint64(len(m.InitialManagement))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ACLServiceProviderToken) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ACLServiceProviderToken) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ACLServiceProviderToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SecretID) > 0 { - i -= len(m.SecretID) - copy(dAtA[i:], m.SecretID) - i = encodeVarintConfig(dAtA, i, uint64(len(m.SecretID))) - i-- - dAtA[i] = 0x12 - } - if len(m.AccessorID) > 0 { - i -= len(m.AccessorID) - copy(dAtA[i:], m.AccessorID) - i = encodeVarintConfig(dAtA, i, uint64(len(m.AccessorID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AutoEncrypt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AutoEncrypt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AutoEncrypt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.AllowTLS { - i-- - if m.AllowTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.IPSAN) > 0 { - for iNdEx := len(m.IPSAN) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.IPSAN[iNdEx]) - copy(dAtA[i:], m.IPSAN[iNdEx]) - i = encodeVarintConfig(dAtA, i, uint64(len(m.IPSAN[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.DNSSAN) > 0 { - for iNdEx := len(m.DNSSAN) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DNSSAN[iNdEx]) - copy(dAtA[i:], m.DNSSAN[iNdEx]) - i = encodeVarintConfig(dAtA, i, uint64(len(m.DNSSAN[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.TLS { - i-- - if m.TLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintConfig(dAtA []byte, offset int, v uint64) int { - offset -= sovConfig(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Config) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.PrimaryDatacenter) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.NodeName) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.SegmentName) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.ACL != nil { - l = m.ACL.Size() - n += 1 + l + sovConfig(uint64(l)) - } - if m.AutoEncrypt != nil { - l = m.AutoEncrypt.Size() - n += 1 + l + sovConfig(uint64(l)) - } - if m.Gossip != nil { - l = m.Gossip.Size() - n += 1 + l + sovConfig(uint64(l)) - } - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Gossip) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Encryption != nil { - l = m.Encryption.Size() - n += 1 + l + sovConfig(uint64(l)) - } - if len(m.RetryJoinLAN) > 0 { - for _, s := range m.RetryJoinLAN { - l = len(s) - n += 1 + l + sovConfig(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GossipEncryption) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.VerifyIncoming { - n += 2 - } - if m.VerifyOutgoing { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.VerifyOutgoing { - n += 2 - } - if m.VerifyServerHostname { - n += 2 - } - l = len(m.CipherSuites) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.MinVersion) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.Deprecated_PreferServerCipherSuites { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ACL) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Enabled { - n += 2 - } - l = len(m.PolicyTTL) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.RoleTTL) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.TokenTTL) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.DownPolicy) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.DefaultPolicy) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.EnableKeyListPolicy { - n += 2 - } - if m.Tokens != nil { - l = m.Tokens.Size() - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.Deprecated_DisabledTTL) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.EnableTokenPersistence { - n += 2 - } - if m.MSPDisableBootstrap { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ACLTokens) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.InitialManagement) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.Replication) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.AgentRecovery) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.Default) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.Agent) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if len(m.ManagedServiceProvider) > 0 { - for _, e := range m.ManagedServiceProvider { - l = e.Size() - n += 1 + l + sovConfig(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ACLServiceProviderToken) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.AccessorID) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - l = len(m.SecretID) - if l > 0 { - n += 1 + l + sovConfig(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AutoEncrypt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TLS { - n += 2 - } - if len(m.DNSSAN) > 0 { - for _, s := range m.DNSSAN { - l = len(s) - n += 1 + l + sovConfig(uint64(l)) - } - } - if len(m.IPSAN) > 0 { - for _, s := range m.IPSAN { - l = len(s) - n += 1 + l + sovConfig(uint64(l)) - } - } - if m.AllowTLS { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovConfig(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozConfig(x uint64) (n int) { - return sovConfig(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Config) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Config: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryDatacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrimaryDatacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SegmentName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SegmentName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ACL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ACL == nil { - m.ACL = &ACL{} - } - if err := m.ACL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoEncrypt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AutoEncrypt == nil { - m.AutoEncrypt = &AutoEncrypt{} - } - if err := m.AutoEncrypt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gossip", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Gossip == nil { - m.Gossip = &Gossip{} - } - if err := m.Gossip.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLS == nil { - m.TLS = &TLS{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Gossip) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Gossip: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Gossip: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Encryption", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Encryption == nil { - m.Encryption = &GossipEncryption{} - } - if err := m.Encryption.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryJoinLAN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RetryJoinLAN = append(m.RetryJoinLAN, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GossipEncryption) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GossipEncryption: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GossipEncryption: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VerifyIncoming", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.VerifyIncoming = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VerifyOutgoing", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.VerifyOutgoing = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TLS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TLS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TLS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VerifyOutgoing", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.VerifyOutgoing = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VerifyServerHostname", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.VerifyServerHostname = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CipherSuites", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CipherSuites = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MinVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated_PreferServerCipherSuites", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Deprecated_PreferServerCipherSuites = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ACL) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ACL: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ACL: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTTL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PolicyTTL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleTTL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RoleTTL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TokenTTL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TokenTTL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DownPolicy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultPolicy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableKeyListPolicy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableKeyListPolicy = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tokens == nil { - m.Tokens = &ACLTokens{} - } - if err := m.Tokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated_DisabledTTL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deprecated_DisabledTTL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableTokenPersistence", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableTokenPersistence = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MSPDisableBootstrap", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MSPDisableBootstrap = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ACLTokens) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ACLTokens: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ACLTokens: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialManagement", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitialManagement = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replication", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replication = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentRecovery", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentRecovery = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Default = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Agent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManagedServiceProvider", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ManagedServiceProvider = append(m.ManagedServiceProvider, &ACLServiceProviderToken{}) - if err := m.ManagedServiceProvider[len(m.ManagedServiceProvider)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ACLServiceProviderToken) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ACLServiceProviderToken: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ACLServiceProviderToken: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessorID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessorID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AutoEncrypt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AutoEncrypt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AutoEncrypt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TLS = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSSAN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DNSSAN = append(m.DNSSAN, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPSAN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConfig - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPSAN = append(m.IPSAN, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowTLS = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipConfig(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthConfig - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupConfig - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthConfig - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthConfig = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowConfig = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupConfig = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbconnect/connect.gen.go b/proto/pbconnect/connect.gen.go index 433a065b7..a9fbdfe24 100644 --- a/proto/pbconnect/connect.gen.go +++ b/proto/pbconnect/connect.gen.go @@ -13,14 +13,14 @@ func CARootToStructsCARoot(s *CARoot, t *structs.CARoot) { t.SerialNumber = s.SerialNumber t.SigningKeyID = s.SigningKeyID t.ExternalTrustDomain = s.ExternalTrustDomain - t.NotBefore = structs.TimeFromProtoGogo(s.NotBefore) - t.NotAfter = structs.TimeFromProtoGogo(s.NotAfter) + t.NotBefore = structs.TimeFromProto(s.NotBefore) + t.NotAfter = structs.TimeFromProto(s.NotAfter) t.RootCert = s.RootCert t.IntermediateCerts = s.IntermediateCerts t.SigningCert = s.SigningCert t.SigningKey = s.SigningKey t.Active = s.Active - t.RotatedOutAt = structs.TimeFromProtoGogo(s.RotatedOutAt) + t.RotatedOutAt = structs.TimeFromProto(s.RotatedOutAt) t.PrivateKeyType = s.PrivateKeyType t.PrivateKeyBits = int(s.PrivateKeyBits) t.RaftIndex = RaftIndexTo(s.RaftIndex) @@ -34,14 +34,14 @@ func CARootFromStructsCARoot(t *structs.CARoot, s *CARoot) { s.SerialNumber = t.SerialNumber s.SigningKeyID = t.SigningKeyID s.ExternalTrustDomain = t.ExternalTrustDomain - s.NotBefore = structs.TimeToProtoGogo(t.NotBefore) - s.NotAfter = structs.TimeToProtoGogo(t.NotAfter) + s.NotBefore = structs.TimeToProto(t.NotBefore) + s.NotAfter = structs.TimeToProto(t.NotAfter) s.RootCert = t.RootCert s.IntermediateCerts = t.IntermediateCerts s.SigningCert = t.SigningCert s.SigningKey = t.SigningKey s.Active = t.Active - s.RotatedOutAt = structs.TimeToProtoGogo(t.RotatedOutAt) + s.RotatedOutAt = structs.TimeToProto(t.RotatedOutAt) s.PrivateKeyType = t.PrivateKeyType s.PrivateKeyBits = int32(t.PrivateKeyBits) s.RaftIndex = RaftIndexFrom(t.RaftIndex) @@ -93,8 +93,8 @@ func IssuedCertToStructsIssuedCert(s *IssuedCert, t *structs.IssuedCert) { t.ServiceURI = s.ServiceURI t.Agent = s.Agent t.AgentURI = s.AgentURI - t.ValidAfter = structs.TimeFromProtoGogo(s.ValidAfter) - t.ValidBefore = structs.TimeFromProtoGogo(s.ValidBefore) + t.ValidAfter = structs.TimeFromProto(s.ValidAfter) + t.ValidBefore = structs.TimeFromProto(s.ValidBefore) t.EnterpriseMeta = EnterpriseMetaTo(s.EnterpriseMeta) t.RaftIndex = RaftIndexTo(s.RaftIndex) } @@ -109,8 +109,8 @@ func IssuedCertFromStructsIssuedCert(t *structs.IssuedCert, s *IssuedCert) { s.ServiceURI = t.ServiceURI s.Agent = t.Agent s.AgentURI = t.AgentURI - s.ValidAfter = structs.TimeToProtoGogo(t.ValidAfter) - s.ValidBefore = structs.TimeToProtoGogo(t.ValidBefore) + s.ValidAfter = structs.TimeToProto(t.ValidAfter) + s.ValidBefore = structs.TimeToProto(t.ValidBefore) s.EnterpriseMeta = EnterpriseMetaFrom(t.EnterpriseMeta) s.RaftIndex = RaftIndexFrom(t.RaftIndex) } diff --git a/proto/pbconnect/connect.go b/proto/pbconnect/connect.go index c61ca7c8a..2b13a12b3 100644 --- a/proto/pbconnect/connect.go +++ b/proto/pbconnect/connect.go @@ -2,42 +2,42 @@ package pbconnect import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" ) -func QueryMetaFrom(f structs.QueryMeta) *pbcommongogo.QueryMeta { - t := new(pbcommongogo.QueryMeta) - pbcommongogo.QueryMetaFromStructs(&f, t) +func QueryMetaFrom(f structs.QueryMeta) *pbcommon.QueryMeta { + t := new(pbcommon.QueryMeta) + pbcommon.QueryMetaFromStructs(&f, t) return t } -func QueryMetaTo(f *pbcommongogo.QueryMeta) structs.QueryMeta { +func QueryMetaTo(f *pbcommon.QueryMeta) structs.QueryMeta { t := new(structs.QueryMeta) - pbcommongogo.QueryMetaToStructs(f, t) + pbcommon.QueryMetaToStructs(f, t) return *t } -func RaftIndexFrom(f structs.RaftIndex) *pbcommongogo.RaftIndex { - t := new(pbcommongogo.RaftIndex) - pbcommongogo.RaftIndexFromStructs(&f, t) +func RaftIndexFrom(f structs.RaftIndex) *pbcommon.RaftIndex { + t := new(pbcommon.RaftIndex) + pbcommon.RaftIndexFromStructs(&f, t) return t } -func RaftIndexTo(f *pbcommongogo.RaftIndex) structs.RaftIndex { +func RaftIndexTo(f *pbcommon.RaftIndex) structs.RaftIndex { t := new(structs.RaftIndex) - pbcommongogo.RaftIndexToStructs(f, t) + pbcommon.RaftIndexToStructs(f, t) return *t } -func EnterpriseMetaFrom(f structs.EnterpriseMeta) *pbcommongogo.EnterpriseMeta { - t := new(pbcommongogo.EnterpriseMeta) - pbcommongogo.EnterpriseMetaFromStructs(&f, t) +func EnterpriseMetaFrom(f structs.EnterpriseMeta) *pbcommon.EnterpriseMeta { + t := new(pbcommon.EnterpriseMeta) + pbcommon.EnterpriseMetaFromStructs(&f, t) return t } -func EnterpriseMetaTo(f *pbcommongogo.EnterpriseMeta) structs.EnterpriseMeta { +func EnterpriseMetaTo(f *pbcommon.EnterpriseMeta) structs.EnterpriseMeta { t := new(structs.EnterpriseMeta) - pbcommongogo.EnterpriseMetaToStructs(f, t) + pbcommon.EnterpriseMetaToStructs(f, t) return *t } diff --git a/proto/pbconnect/connect.pb.go b/proto/pbconnect/connect.pb.go index 64a6738f9..97ce17def 100644 --- a/proto/pbconnect/connect.pb.go +++ b/proto/pbconnect/connect.pb.go @@ -1,16 +1,14 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbconnect/connect.proto package pbconnect import ( fmt "fmt" - types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" - io "io" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + pbcommon "github.com/hashicorp/consul/proto/pbcommon" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -64,10 +62,10 @@ type CARoots struct { // QueryMeta here is mainly used to contain the latest Raft Index that could // be used to perform a blocking query. // mog: func-to=QueryMetaTo func-from=QueryMetaFrom - QueryMeta *pbcommongogo.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + QueryMeta *pbcommon.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CARoots) Reset() { *m = CARoots{} } @@ -76,26 +74,18 @@ func (*CARoots) ProtoMessage() {} func (*CARoots) Descriptor() ([]byte, []int) { return fileDescriptor_80627e709958eb04, []int{0} } + func (m *CARoots) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_CARoots.Unmarshal(m, b) } func (m *CARoots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CARoots.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_CARoots.Marshal(b, m, deterministic) } func (m *CARoots) XXX_Merge(src proto.Message) { xxx_messageInfo_CARoots.Merge(m, src) } func (m *CARoots) XXX_Size() int { - return m.Size() + return xxx_messageInfo_CARoots.Size(m) } func (m *CARoots) XXX_DiscardUnknown() { xxx_messageInfo_CARoots.DiscardUnknown(m) @@ -124,7 +114,7 @@ func (m *CARoots) GetRoots() []*CARoot { return nil } -func (m *CARoots) GetQueryMeta() *pbcommongogo.QueryMeta { +func (m *CARoots) GetQueryMeta() *pbcommon.QueryMeta { if m != nil { return m.QueryMeta } @@ -160,10 +150,10 @@ type CARoot struct { // future flexibility. ExternalTrustDomain string `protobuf:"bytes,5,opt,name=ExternalTrustDomain,proto3" json:"ExternalTrustDomain,omitempty"` // Time validity bounds. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo - NotBefore *types.Timestamp `protobuf:"bytes,6,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo - NotAfter *types.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto + NotBefore *timestamp.Timestamp `protobuf:"bytes,6,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto + NotAfter *timestamp.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` // RootCert is the PEM-encoded public certificate. RootCert string `protobuf:"bytes,8,opt,name=RootCert,proto3" json:"RootCert,omitempty"` // IntermediateCerts is a list of PEM-encoded intermediate certs to @@ -182,8 +172,8 @@ type CARoot struct { // RotatedOutAt is the time at which this CA was removed from the state. // This will only be set on roots that have been rotated out from being the // active root. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo - RotatedOutAt *types.Timestamp `protobuf:"bytes,13,opt,name=RotatedOutAt,proto3" json:"RotatedOutAt,omitempty"` + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto + RotatedOutAt *timestamp.Timestamp `protobuf:"bytes,13,opt,name=RotatedOutAt,proto3" json:"RotatedOutAt,omitempty"` // PrivateKeyType is the type of the private key used to sign certificates. It // may be "rsa" or "ec". This is provided as a convenience to avoid parsing // the public key to from the certificate to infer the type. @@ -194,10 +184,10 @@ type CARoot struct { // mog: func-to=int func-from=int32 PrivateKeyBits int32 `protobuf:"varint,15,opt,name=PrivateKeyBits,proto3" json:"PrivateKeyBits,omitempty"` // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - RaftIndex *pbcommongogo.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CARoot) Reset() { *m = CARoot{} } @@ -206,26 +196,18 @@ func (*CARoot) ProtoMessage() {} func (*CARoot) Descriptor() ([]byte, []int) { return fileDescriptor_80627e709958eb04, []int{1} } + func (m *CARoot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_CARoot.Unmarshal(m, b) } func (m *CARoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CARoot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_CARoot.Marshal(b, m, deterministic) } func (m *CARoot) XXX_Merge(src proto.Message) { xxx_messageInfo_CARoot.Merge(m, src) } func (m *CARoot) XXX_Size() int { - return m.Size() + return xxx_messageInfo_CARoot.Size(m) } func (m *CARoot) XXX_DiscardUnknown() { xxx_messageInfo_CARoot.DiscardUnknown(m) @@ -268,14 +250,14 @@ func (m *CARoot) GetExternalTrustDomain() string { return "" } -func (m *CARoot) GetNotBefore() *types.Timestamp { +func (m *CARoot) GetNotBefore() *timestamp.Timestamp { if m != nil { return m.NotBefore } return nil } -func (m *CARoot) GetNotAfter() *types.Timestamp { +func (m *CARoot) GetNotAfter() *timestamp.Timestamp { if m != nil { return m.NotAfter } @@ -317,7 +299,7 @@ func (m *CARoot) GetActive() bool { return false } -func (m *CARoot) GetRotatedOutAt() *types.Timestamp { +func (m *CARoot) GetRotatedOutAt() *timestamp.Timestamp { if m != nil { return m.RotatedOutAt } @@ -338,13 +320,15 @@ func (m *CARoot) GetPrivateKeyBits() int32 { return 0 } -func (m *CARoot) GetRaftIndex() *pbcommongogo.RaftIndex { +func (m *CARoot) GetRaftIndex() *pbcommon.RaftIndex { if m != nil { return m.RaftIndex } return nil } +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. // // mog annotation: // @@ -370,18 +354,18 @@ type IssuedCert struct { AgentURI string `protobuf:"bytes,7,opt,name=AgentURI,proto3" json:"AgentURI,omitempty"` // ValidAfter and ValidBefore are the validity periods for the // certificate. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo - ValidAfter *types.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo - ValidBefore *types.Timestamp `protobuf:"bytes,9,opt,name=ValidBefore,proto3" json:"ValidBefore,omitempty"` + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto + ValidAfter *timestamp.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto + ValidBefore *timestamp.Timestamp `protobuf:"bytes,9,opt,name=ValidBefore,proto3" json:"ValidBefore,omitempty"` // EnterpriseMeta is the Consul Enterprise specific metadata // mog: func-to=EnterpriseMetaTo func-from=EnterpriseMetaFrom - EnterpriseMeta *pbcommongogo.EnterpriseMeta `protobuf:"bytes,10,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` + EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,10,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - RaftIndex *pbcommongogo.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *IssuedCert) Reset() { *m = IssuedCert{} } @@ -390,26 +374,18 @@ func (*IssuedCert) ProtoMessage() {} func (*IssuedCert) Descriptor() ([]byte, []int) { return fileDescriptor_80627e709958eb04, []int{2} } + func (m *IssuedCert) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_IssuedCert.Unmarshal(m, b) } func (m *IssuedCert) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IssuedCert.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_IssuedCert.Marshal(b, m, deterministic) } func (m *IssuedCert) XXX_Merge(src proto.Message) { xxx_messageInfo_IssuedCert.Merge(m, src) } func (m *IssuedCert) XXX_Size() int { - return m.Size() + return xxx_messageInfo_IssuedCert.Size(m) } func (m *IssuedCert) XXX_DiscardUnknown() { xxx_messageInfo_IssuedCert.DiscardUnknown(m) @@ -466,28 +442,28 @@ func (m *IssuedCert) GetAgentURI() string { return "" } -func (m *IssuedCert) GetValidAfter() *types.Timestamp { +func (m *IssuedCert) GetValidAfter() *timestamp.Timestamp { if m != nil { return m.ValidAfter } return nil } -func (m *IssuedCert) GetValidBefore() *types.Timestamp { +func (m *IssuedCert) GetValidBefore() *timestamp.Timestamp { if m != nil { return m.ValidBefore } return nil } -func (m *IssuedCert) GetEnterpriseMeta() *pbcommongogo.EnterpriseMeta { +func (m *IssuedCert) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { if m != nil { return m.EnterpriseMeta } return nil } -func (m *IssuedCert) GetRaftIndex() *pbcommongogo.RaftIndex { +func (m *IssuedCert) GetRaftIndex() *pbcommon.RaftIndex { if m != nil { return m.RaftIndex } @@ -500,1811 +476,50 @@ func init() { proto.RegisterType((*IssuedCert)(nil), "connect.IssuedCert") } -func init() { proto.RegisterFile("proto/pbconnect/connect.proto", fileDescriptor_80627e709958eb04) } +func init() { + proto.RegisterFile("proto/pbconnect/connect.proto", fileDescriptor_80627e709958eb04) +} var fileDescriptor_80627e709958eb04 = []byte{ - // 661 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0x4d, 0xf3, 0xe3, 0x49, 0x9b, 0xc2, 0xf2, 0xa3, 0x55, 0x24, 0x52, 0x2b, 0x02, 0x14, - 0x09, 0x94, 0xa0, 0x56, 0x42, 0x08, 0x01, 0x52, 0xd2, 0xf4, 0x10, 0x55, 0x0d, 0x65, 0x5b, 0x38, - 0x70, 0x73, 0x92, 0x89, 0xbb, 0x52, 0xec, 0x8d, 0xd6, 0xeb, 0xaa, 0x39, 0xf2, 0x16, 0xbc, 0x03, - 0x2f, 0xc2, 0x0d, 0x1e, 0x01, 0x95, 0x17, 0x41, 0xbb, 0x76, 0x62, 0x3b, 0x45, 0x0a, 0xa7, 0xec, - 0x7c, 0xf3, 0xcd, 0x78, 0x66, 0xbf, 0x2f, 0x0b, 0x8f, 0xe7, 0x52, 0x28, 0xd1, 0x99, 0x8f, 0xc6, - 0x22, 0x08, 0x70, 0xac, 0x3a, 0xc9, 0x6f, 0xdb, 0xe0, 0xa4, 0x9c, 0x84, 0xf5, 0x7d, 0x4f, 0x08, - 0x6f, 0x86, 0x1d, 0x03, 0x8f, 0xa2, 0x69, 0x47, 0x71, 0x1f, 0x43, 0xe5, 0xfa, 0xf3, 0x98, 0x59, - 0xdf, 0x4f, 0x1b, 0xf9, 0xbe, 0x08, 0x3c, 0xe1, 0x89, 0x4e, 0x7c, 0x8c, 0x09, 0xcd, 0xef, 0x16, - 0x94, 0x8f, 0xba, 0x4c, 0x08, 0x15, 0x92, 0x26, 0xec, 0x74, 0xc7, 0x8a, 0x5f, 0xa1, 0x0e, 0x07, - 0x7d, 0x6a, 0x39, 0x56, 0xcb, 0x66, 0x39, 0x8c, 0x38, 0x50, 0xbd, 0x90, 0x51, 0xa8, 0xfa, 0xc2, - 0x77, 0x79, 0x40, 0xb7, 0x0c, 0x25, 0x0b, 0x91, 0xa7, 0x50, 0x34, 0xed, 0x68, 0xc1, 0x29, 0xb4, - 0xaa, 0x07, 0x7b, 0xed, 0xe5, 0xec, 0xf1, 0x67, 0x58, 0x9c, 0x25, 0x87, 0x60, 0x7f, 0x8c, 0x50, - 0x2e, 0x4e, 0x51, 0xb9, 0x74, 0xdb, 0xb1, 0x5a, 0xd5, 0x83, 0x87, 0xed, 0x74, 0xca, 0xf6, 0x2a, - 0xc9, 0x52, 0x5e, 0xf3, 0x6b, 0x11, 0x4a, 0x71, 0x1b, 0x52, 0x83, 0xad, 0xd5, 0x88, 0x5b, 0x83, - 0x3e, 0x21, 0xb0, 0x3d, 0x74, 0x7d, 0x4c, 0x26, 0x32, 0x67, 0xbd, 0xd0, 0x39, 0x4a, 0xee, 0xce, - 0x86, 0x91, 0x3f, 0x42, 0x49, 0x0b, 0x8e, 0xd5, 0xda, 0x66, 0x39, 0xcc, 0x70, 0xb8, 0x17, 0xf0, - 0xc0, 0x3b, 0xc1, 0xc5, 0xa0, 0x6f, 0x46, 0xb1, 0x59, 0x0e, 0x23, 0x2f, 0xe1, 0xfe, 0xf1, 0xb5, - 0x42, 0x19, 0xb8, 0xb3, 0xec, 0xf2, 0x45, 0x43, 0xfd, 0x57, 0x8a, 0xbc, 0x06, 0x7b, 0x28, 0x54, - 0x0f, 0xa7, 0x42, 0x22, 0x2d, 0x99, 0xed, 0xea, 0xed, 0x58, 0xac, 0xf6, 0x52, 0xac, 0xf6, 0xc5, - 0x52, 0x2c, 0x96, 0x92, 0xc9, 0x2b, 0xa8, 0x0c, 0x85, 0xea, 0x4e, 0x15, 0x4a, 0x5a, 0xde, 0x58, - 0xb8, 0xe2, 0x92, 0x3a, 0x54, 0xf4, 0xbd, 0x1c, 0xa1, 0x54, 0xb4, 0x62, 0x06, 0x5b, 0xc5, 0xe4, - 0x05, 0xdc, 0x1b, 0x04, 0x0a, 0xa5, 0x8f, 0x13, 0xee, 0x2a, 0xd4, 0x58, 0x48, 0x6d, 0xa7, 0xd0, - 0xb2, 0xd9, 0xed, 0x84, 0x96, 0x38, 0xd9, 0xde, 0x34, 0x83, 0x58, 0xe2, 0x0c, 0x44, 0x1a, 0x00, - 0xe9, 0xfd, 0xd0, 0xaa, 0x21, 0x64, 0x10, 0xf2, 0x08, 0x4a, 0xb1, 0x69, 0xe8, 0x8e, 0x63, 0xb5, - 0x2a, 0x2c, 0x89, 0xc8, 0x7b, 0xd8, 0x61, 0x42, 0xb9, 0x0a, 0x27, 0x1f, 0x22, 0xd5, 0x55, 0x74, - 0x77, 0xe3, 0x7e, 0x39, 0x3e, 0x79, 0x06, 0xb5, 0x33, 0xc9, 0xaf, 0x5c, 0x85, 0x27, 0xb8, 0xb8, - 0x58, 0xcc, 0x91, 0xd6, 0xcc, 0xb7, 0xd7, 0xd0, 0x3c, 0xaf, 0xc7, 0x55, 0x48, 0xf7, 0x1c, 0xab, - 0x55, 0x64, 0x6b, 0xa8, 0xf6, 0x20, 0x73, 0xa7, 0x6a, 0x10, 0x4c, 0xf0, 0x9a, 0xde, 0xbd, 0xed, - 0xc1, 0x55, 0x92, 0xa5, 0xbc, 0xe6, 0xcf, 0x02, 0xc0, 0x20, 0x0c, 0x23, 0x9c, 0x98, 0xbb, 0x58, - 0xf7, 0x58, 0xf2, 0xa7, 0xc9, 0x79, 0x8c, 0x42, 0x59, 0x73, 0xcf, 0x8e, 0x4f, 0x13, 0x7b, 0x2e, - 0x43, 0xf2, 0x04, 0x76, 0xd3, 0x99, 0x74, 0xbe, 0x60, 0xf2, 0x79, 0x50, 0xd7, 0x9f, 0xa3, 0xbc, - 0xe2, 0x63, 0x4c, 0xec, 0xb9, 0x0c, 0x8d, 0x12, 0xf1, 0xf1, 0x13, 0x1b, 0x24, 0x86, 0xcc, 0x20, - 0xe4, 0x01, 0x14, 0xbb, 0x1e, 0x06, 0xca, 0x78, 0xd0, 0x66, 0x71, 0xa0, 0xbd, 0x62, 0x0e, 0xba, - 0xa6, 0x1c, 0x7b, 0x65, 0x19, 0x93, 0x37, 0x00, 0x9f, 0xdd, 0x19, 0x9f, 0xc4, 0x0e, 0xac, 0x6c, - 0x54, 0x28, 0xc3, 0x26, 0x6f, 0xa1, 0x6a, 0xa2, 0xc4, 0xf7, 0xf6, 0xc6, 0xe2, 0x2c, 0x9d, 0xf4, - 0xa0, 0x76, 0xac, 0xcd, 0x38, 0x97, 0x3c, 0x44, 0xf3, 0x2c, 0x40, 0xd2, 0x20, 0x23, 0x49, 0x9e, - 0xc1, 0xd6, 0x2a, 0xf2, 0x8a, 0x56, 0xff, 0x4f, 0xd1, 0xde, 0xbb, 0x1f, 0x37, 0x0d, 0xeb, 0xd7, - 0x4d, 0xc3, 0xfa, 0x7d, 0xd3, 0xb0, 0xbe, 0xfd, 0x69, 0xdc, 0xf9, 0xf2, 0xdc, 0xe3, 0xea, 0x32, - 0x1a, 0xe9, 0xca, 0xce, 0xa5, 0x1b, 0x5e, 0xf2, 0xb1, 0x90, 0x73, 0xfd, 0x08, 0x87, 0xd1, 0xac, - 0xb3, 0xf6, 0x36, 0x8f, 0x4a, 0x06, 0x38, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xda, 0x8a, 0x91, - 0x6e, 0xb5, 0x05, 0x00, 0x00, + // 632 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x55, 0xd7, 0xcf, 0xdc, 0x6e, 0x1d, 0x33, 0x68, 0xb2, 0x8a, 0x80, 0xa8, 0x02, 0x14, 0x09, + 0x68, 0xd0, 0x90, 0x10, 0x42, 0x68, 0x52, 0xb7, 0xee, 0x21, 0x9a, 0x56, 0x86, 0x37, 0x78, 0xe0, + 0x2d, 0x6d, 0x6f, 0x3b, 0x4b, 0x4d, 0x5c, 0x39, 0xce, 0xb4, 0xfe, 0x22, 0x7e, 0x0a, 0xbf, 0x0a, + 0x09, 0xd9, 0x4e, 0xda, 0xa4, 0x20, 0xf5, 0x29, 0xbe, 0xe7, 0x1e, 0x5f, 0xdf, 0xeb, 0x73, 0x62, + 0x78, 0xb6, 0x94, 0x42, 0x09, 0x7f, 0x39, 0x9e, 0x88, 0x38, 0xc6, 0x89, 0xf2, 0xb3, 0x6f, 0xdf, + 0xe0, 0xa4, 0x99, 0x85, 0xdd, 0x17, 0x73, 0x21, 0xe6, 0x0b, 0xf4, 0x0d, 0x3c, 0x4e, 0x67, 0xbe, + 0xe2, 0x11, 0x26, 0x2a, 0x8c, 0x96, 0x96, 0xd9, 0x7d, 0xba, 0x29, 0x14, 0x45, 0x22, 0xf6, 0xed, + 0xc7, 0x26, 0x7b, 0xbf, 0x2a, 0xd0, 0x3c, 0x1f, 0x30, 0x21, 0x54, 0x42, 0x7a, 0xb0, 0x3f, 0x98, + 0x28, 0x7e, 0x8f, 0x3a, 0x0c, 0x86, 0xb4, 0xe2, 0x56, 0x3c, 0x87, 0x95, 0x30, 0xe2, 0x42, 0xfb, + 0x56, 0xa6, 0x89, 0x1a, 0x8a, 0x28, 0xe4, 0x31, 0xdd, 0x33, 0x94, 0x22, 0x44, 0x5e, 0x41, 0xdd, + 0x94, 0xa3, 0x55, 0xb7, 0xea, 0xb5, 0x4f, 0x0e, 0xfb, 0x79, 0xdf, 0xf6, 0x18, 0x66, 0xb3, 0xc4, + 0x07, 0xe7, 0x5b, 0x8a, 0x72, 0x75, 0x85, 0x2a, 0xa4, 0x35, 0xb7, 0xe2, 0xb5, 0x4f, 0x8e, 0xfa, + 0x59, 0x6b, 0xeb, 0x04, 0xdb, 0x70, 0x7a, 0x7f, 0x6a, 0xd0, 0xb0, 0x25, 0x48, 0x07, 0xf6, 0xd6, + 0xed, 0xed, 0x05, 0x43, 0x42, 0xa0, 0x36, 0x0a, 0x23, 0xcc, 0xba, 0x31, 0x6b, 0x3d, 0xcc, 0x0d, + 0x4a, 0x1e, 0x2e, 0x46, 0x69, 0x34, 0x46, 0x49, 0xab, 0x6e, 0xc5, 0xab, 0xb1, 0x12, 0x66, 0x38, + 0x7c, 0x1e, 0xf3, 0x78, 0x7e, 0x89, 0xab, 0x60, 0x68, 0xda, 0x70, 0x58, 0x09, 0x23, 0xef, 0xe1, + 0xf1, 0xc5, 0x83, 0x42, 0x19, 0x87, 0x8b, 0xe2, 0xe0, 0x75, 0x43, 0xfd, 0x5f, 0x8a, 0x7c, 0x02, + 0x67, 0x24, 0xd4, 0x19, 0xce, 0x84, 0x44, 0xda, 0x30, 0x93, 0x75, 0xfb, 0x56, 0xa4, 0x7e, 0x2e, + 0x52, 0xff, 0x36, 0x17, 0x89, 0x6d, 0xc8, 0xe4, 0x23, 0xb4, 0x46, 0x42, 0x0d, 0x66, 0x0a, 0x25, + 0x6d, 0xee, 0xdc, 0xb8, 0xe6, 0x92, 0x2e, 0xb4, 0xf4, 0xbd, 0x9c, 0xa3, 0x54, 0xb4, 0x65, 0x1a, + 0x5b, 0xc7, 0xe4, 0x2d, 0x1c, 0x05, 0xb1, 0x42, 0x19, 0xe1, 0x94, 0x87, 0x0a, 0x35, 0x96, 0x50, + 0xc7, 0xad, 0x7a, 0x0e, 0xfb, 0x37, 0xa1, 0xe5, 0xcd, 0xa6, 0x37, 0xc5, 0xc0, 0xca, 0x5b, 0x80, + 0xc8, 0x73, 0x80, 0xcd, 0xfd, 0xd0, 0xb6, 0x21, 0x14, 0x10, 0x72, 0x0c, 0x0d, 0x6b, 0x18, 0xba, + 0xef, 0x56, 0xbc, 0x16, 0xcb, 0x22, 0x72, 0x0a, 0xfb, 0x4c, 0xa8, 0x50, 0xe1, 0xf4, 0x6b, 0xaa, + 0x06, 0x8a, 0x1e, 0xec, 0x9c, 0xaf, 0xc4, 0x27, 0xaf, 0xa1, 0x73, 0x2d, 0xf9, 0x7d, 0xa8, 0xf0, + 0x12, 0x57, 0xb7, 0xab, 0x25, 0xd2, 0x8e, 0x39, 0x7b, 0x0b, 0x2d, 0xf3, 0xce, 0xb8, 0x4a, 0xe8, + 0xa1, 0x5b, 0xf1, 0xea, 0x6c, 0x0b, 0xd5, 0xfe, 0x63, 0xe1, 0x4c, 0x05, 0xf1, 0x14, 0x1f, 0xe8, + 0xa3, 0xb2, 0xff, 0xd6, 0x09, 0xb6, 0xe1, 0xf4, 0x7e, 0x57, 0x01, 0x82, 0x24, 0x49, 0x71, 0x6a, + 0xee, 0x61, 0xdb, 0x5f, 0xd9, 0xcf, 0x52, 0xf2, 0x17, 0x85, 0xa6, 0xe6, 0x5e, 0x5f, 0x5c, 0x65, + 0xd6, 0xcc, 0x43, 0xf2, 0x12, 0x0e, 0x36, 0xfd, 0xe8, 0x7c, 0xd5, 0xe4, 0xcb, 0xa0, 0xde, 0x7f, + 0x83, 0xf2, 0x9e, 0x4f, 0x30, 0xb3, 0x66, 0x1e, 0x1a, 0x15, 0xec, 0xf2, 0x3b, 0x0b, 0x32, 0x33, + 0x16, 0x10, 0xf2, 0x04, 0xea, 0x83, 0x39, 0xc6, 0xca, 0xf8, 0xcf, 0x61, 0x36, 0xd0, 0x3e, 0x31, + 0x0b, 0xbd, 0xa7, 0x69, 0x7d, 0x92, 0xc7, 0xe4, 0x33, 0xc0, 0x8f, 0x70, 0xc1, 0xa7, 0xd6, 0x7d, + 0xad, 0x9d, 0xea, 0x14, 0xd8, 0xe4, 0x0b, 0xb4, 0x4d, 0x94, 0x79, 0xde, 0xd9, 0xb9, 0xb9, 0x48, + 0x27, 0xa7, 0xd0, 0xb9, 0xd0, 0x46, 0x5c, 0x4a, 0x9e, 0xa0, 0x79, 0x0e, 0xc0, 0x14, 0x38, 0xce, + 0xe5, 0x28, 0x67, 0xd9, 0x16, 0xbb, 0xac, 0x64, 0x7b, 0xb7, 0x92, 0x67, 0xef, 0x7e, 0xbe, 0x99, + 0x73, 0x75, 0x97, 0x8e, 0x35, 0xcb, 0xbf, 0x0b, 0x93, 0x3b, 0x3e, 0x11, 0x72, 0xa9, 0x1f, 0xd8, + 0x24, 0x5d, 0xf8, 0x5b, 0xef, 0xee, 0xb8, 0x61, 0x80, 0x0f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x77, 0x18, 0x20, 0xcd, 0x91, 0x05, 0x00, 0x00, } - -func (m *CARoots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CARoots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CARoots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.QueryMeta != nil { - { - size, err := m.QueryMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Roots) > 0 { - for iNdEx := len(m.Roots) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Roots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.TrustDomain) > 0 { - i -= len(m.TrustDomain) - copy(dAtA[i:], m.TrustDomain) - i = encodeVarintConnect(dAtA, i, uint64(len(m.TrustDomain))) - i-- - dAtA[i] = 0x12 - } - if len(m.ActiveRootID) > 0 { - i -= len(m.ActiveRootID) - copy(dAtA[i:], m.ActiveRootID) - i = encodeVarintConnect(dAtA, i, uint64(len(m.ActiveRootID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CARoot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CARoot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CARoot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RaftIndex != nil { - { - size, err := m.RaftIndex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.PrivateKeyBits != 0 { - i = encodeVarintConnect(dAtA, i, uint64(m.PrivateKeyBits)) - i-- - dAtA[i] = 0x78 - } - if len(m.PrivateKeyType) > 0 { - i -= len(m.PrivateKeyType) - copy(dAtA[i:], m.PrivateKeyType) - i = encodeVarintConnect(dAtA, i, uint64(len(m.PrivateKeyType))) - i-- - dAtA[i] = 0x72 - } - if m.RotatedOutAt != nil { - { - size, err := m.RotatedOutAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if m.Active { - i-- - if m.Active { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - } - if len(m.SigningKey) > 0 { - i -= len(m.SigningKey) - copy(dAtA[i:], m.SigningKey) - i = encodeVarintConnect(dAtA, i, uint64(len(m.SigningKey))) - i-- - dAtA[i] = 0x5a - } - if len(m.SigningCert) > 0 { - i -= len(m.SigningCert) - copy(dAtA[i:], m.SigningCert) - i = encodeVarintConnect(dAtA, i, uint64(len(m.SigningCert))) - i-- - dAtA[i] = 0x52 - } - if len(m.IntermediateCerts) > 0 { - for iNdEx := len(m.IntermediateCerts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.IntermediateCerts[iNdEx]) - copy(dAtA[i:], m.IntermediateCerts[iNdEx]) - i = encodeVarintConnect(dAtA, i, uint64(len(m.IntermediateCerts[iNdEx]))) - i-- - dAtA[i] = 0x4a - } - } - if len(m.RootCert) > 0 { - i -= len(m.RootCert) - copy(dAtA[i:], m.RootCert) - i = encodeVarintConnect(dAtA, i, uint64(len(m.RootCert))) - i-- - dAtA[i] = 0x42 - } - if m.NotAfter != nil { - { - size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.NotBefore != nil { - { - size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.ExternalTrustDomain) > 0 { - i -= len(m.ExternalTrustDomain) - copy(dAtA[i:], m.ExternalTrustDomain) - i = encodeVarintConnect(dAtA, i, uint64(len(m.ExternalTrustDomain))) - i-- - dAtA[i] = 0x2a - } - if len(m.SigningKeyID) > 0 { - i -= len(m.SigningKeyID) - copy(dAtA[i:], m.SigningKeyID) - i = encodeVarintConnect(dAtA, i, uint64(len(m.SigningKeyID))) - i-- - dAtA[i] = 0x22 - } - if m.SerialNumber != 0 { - i = encodeVarintConnect(dAtA, i, uint64(m.SerialNumber)) - i-- - dAtA[i] = 0x18 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintConnect(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintConnect(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IssuedCert) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IssuedCert) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IssuedCert) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RaftIndex != nil { - { - size, err := m.RaftIndex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.EnterpriseMeta != nil { - { - size, err := m.EnterpriseMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.ValidBefore != nil { - { - size, err := m.ValidBefore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.ValidAfter != nil { - { - size, err := m.ValidAfter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintConnect(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.AgentURI) > 0 { - i -= len(m.AgentURI) - copy(dAtA[i:], m.AgentURI) - i = encodeVarintConnect(dAtA, i, uint64(len(m.AgentURI))) - i-- - dAtA[i] = 0x3a - } - if len(m.Agent) > 0 { - i -= len(m.Agent) - copy(dAtA[i:], m.Agent) - i = encodeVarintConnect(dAtA, i, uint64(len(m.Agent))) - i-- - dAtA[i] = 0x32 - } - if len(m.ServiceURI) > 0 { - i -= len(m.ServiceURI) - copy(dAtA[i:], m.ServiceURI) - i = encodeVarintConnect(dAtA, i, uint64(len(m.ServiceURI))) - i-- - dAtA[i] = 0x2a - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarintConnect(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0x22 - } - if len(m.PrivateKeyPEM) > 0 { - i -= len(m.PrivateKeyPEM) - copy(dAtA[i:], m.PrivateKeyPEM) - i = encodeVarintConnect(dAtA, i, uint64(len(m.PrivateKeyPEM))) - i-- - dAtA[i] = 0x1a - } - if len(m.CertPEM) > 0 { - i -= len(m.CertPEM) - copy(dAtA[i:], m.CertPEM) - i = encodeVarintConnect(dAtA, i, uint64(len(m.CertPEM))) - i-- - dAtA[i] = 0x12 - } - if len(m.SerialNumber) > 0 { - i -= len(m.SerialNumber) - copy(dAtA[i:], m.SerialNumber) - i = encodeVarintConnect(dAtA, i, uint64(len(m.SerialNumber))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintConnect(dAtA []byte, offset int, v uint64) int { - offset -= sovConnect(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CARoots) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ActiveRootID) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.TrustDomain) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if len(m.Roots) > 0 { - for _, e := range m.Roots { - l = e.Size() - n += 1 + l + sovConnect(uint64(l)) - } - } - if m.QueryMeta != nil { - l = m.QueryMeta.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CARoot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if m.SerialNumber != 0 { - n += 1 + sovConnect(uint64(m.SerialNumber)) - } - l = len(m.SigningKeyID) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.ExternalTrustDomain) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if m.NotBefore != nil { - l = m.NotBefore.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.NotAfter != nil { - l = m.NotAfter.Size() - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.RootCert) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if len(m.IntermediateCerts) > 0 { - for _, s := range m.IntermediateCerts { - l = len(s) - n += 1 + l + sovConnect(uint64(l)) - } - } - l = len(m.SigningCert) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.SigningKey) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if m.Active { - n += 2 - } - if m.RotatedOutAt != nil { - l = m.RotatedOutAt.Size() - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.PrivateKeyType) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if m.PrivateKeyBits != 0 { - n += 1 + sovConnect(uint64(m.PrivateKeyBits)) - } - if m.RaftIndex != nil { - l = m.RaftIndex.Size() - n += 2 + l + sovConnect(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *IssuedCert) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SerialNumber) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.CertPEM) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.PrivateKeyPEM) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.ServiceURI) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.Agent) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - l = len(m.AgentURI) - if l > 0 { - n += 1 + l + sovConnect(uint64(l)) - } - if m.ValidAfter != nil { - l = m.ValidAfter.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.ValidBefore != nil { - l = m.ValidBefore.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.EnterpriseMeta != nil { - l = m.EnterpriseMeta.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.RaftIndex != nil { - l = m.RaftIndex.Size() - n += 1 + l + sovConnect(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovConnect(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozConnect(x uint64) (n int) { - return sovConnect(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CARoots) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CARoots: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CARoots: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveRootID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ActiveRootID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrustDomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TrustDomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roots", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roots = append(m.Roots, &CARoot{}) - if err := m.Roots[len(m.Roots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.QueryMeta == nil { - m.QueryMeta = &pbcommongogo.QueryMeta{} - } - if err := m.QueryMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConnect(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConnect - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CARoot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CARoot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CARoot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SerialNumber", wireType) - } - m.SerialNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SerialNumber |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SigningKeyID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SigningKeyID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrustDomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalTrustDomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NotBefore == nil { - m.NotBefore = &types.Timestamp{} - } - if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NotAfter == nil { - m.NotAfter = &types.Timestamp{} - } - if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootCert", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RootCert = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntermediateCerts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IntermediateCerts = append(m.IntermediateCerts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SigningCert", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SigningCert = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SigningKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SigningKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Active = bool(v != 0) - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RotatedOutAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RotatedOutAt == nil { - m.RotatedOutAt = &types.Timestamp{} - } - if err := m.RotatedOutAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrivateKeyType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrivateKeyType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrivateKeyBits", wireType) - } - m.PrivateKeyBits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrivateKeyBits |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RaftIndex == nil { - m.RaftIndex = &pbcommongogo.RaftIndex{} - } - if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConnect(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConnect - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IssuedCert) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IssuedCert: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IssuedCert: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SerialNumber", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SerialNumber = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CertPEM", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CertPEM = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrivateKeyPEM", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrivateKeyPEM = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Agent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidAfter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValidAfter == nil { - m.ValidAfter = &types.Timestamp{} - } - if err := m.ValidAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidBefore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValidBefore == nil { - m.ValidBefore = &types.Timestamp{} - } - if err := m.ValidBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnterpriseMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EnterpriseMeta == nil { - m.EnterpriseMeta = &pbcommongogo.EnterpriseMeta{} - } - if err := m.EnterpriseMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowConnect - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthConnect - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthConnect - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RaftIndex == nil { - m.RaftIndex = &pbcommongogo.RaftIndex{} - } - if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipConnect(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthConnect - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipConnect(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConnect - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConnect - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowConnect - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthConnect - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupConnect - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthConnect - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthConnect = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowConnect = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupConnect = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbconnect/connect.proto b/proto/pbconnect/connect.proto index 129fa5385..37ba5bcc9 100644 --- a/proto/pbconnect/connect.proto +++ b/proto/pbconnect/connect.proto @@ -5,7 +5,7 @@ package connect; option go_package = "github.com/hashicorp/consul/proto/pbconnect"; import "google/protobuf/timestamp.proto"; -import "proto/pbcommongogo/common.proto"; +import "proto/pbcommon/common.proto"; // CARoots is the list of all currently trusted CA Roots. // @@ -50,7 +50,7 @@ message CARoots { // QueryMeta here is mainly used to contain the latest Raft Index that could // be used to perform a blocking query. // mog: func-to=QueryMetaTo func-from=QueryMetaFrom - commongogo.QueryMeta QueryMeta = 4; + common.QueryMeta QueryMeta = 4; } // CARoot is the trusted CA Root. @@ -87,9 +87,9 @@ message CARoot { string ExternalTrustDomain = 5; // Time validity bounds. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto google.protobuf.Timestamp NotBefore = 6; - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto google.protobuf.Timestamp NotAfter = 7; // RootCert is the PEM-encoded public certificate. @@ -114,7 +114,7 @@ message CARoot { // RotatedOutAt is the time at which this CA was removed from the state. // This will only be set on roots that have been rotated out from being the // active root. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto google.protobuf.Timestamp RotatedOutAt = 13; // PrivateKeyType is the type of the private key used to sign certificates. It @@ -129,9 +129,11 @@ message CARoot { int32 PrivateKeyBits = 15; // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - commongogo.RaftIndex RaftIndex = 16; + common.RaftIndex RaftIndex = 16; } +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. // // mog annotation: // @@ -161,15 +163,15 @@ message IssuedCert { // ValidAfter and ValidBefore are the validity periods for the // certificate. - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto google.protobuf.Timestamp ValidAfter = 8; - // mog: func-to=structs.TimeFromProtoGogo func-from=structs.TimeToProtoGogo + // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto google.protobuf.Timestamp ValidBefore = 9; // EnterpriseMeta is the Consul Enterprise specific metadata // mog: func-to=EnterpriseMetaTo func-from=EnterpriseMetaFrom - commongogo.EnterpriseMeta EnterpriseMeta = 10; + common.EnterpriseMeta EnterpriseMeta = 10; // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - commongogo.RaftIndex RaftIndex = 11; + common.RaftIndex RaftIndex = 11; } \ No newline at end of file From 31baed248decd708e4a12978d5bee4f22c3a7a3c Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Wed, 23 Mar 2022 11:46:56 -0400 Subject: [PATCH 009/785] docs: make gossip threat model more visible --- website/content/docs/security/security-models/core.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/content/docs/security/security-models/core.mdx b/website/content/docs/security/security-models/core.mdx index b11f5da30..f408a57f6 100644 --- a/website/content/docs/security/security-models/core.mdx +++ b/website/content/docs/security/security-models/core.mdx @@ -407,7 +407,9 @@ The following are not part of the threat model for client agents: configured identity, and extract information from Consul when ACLs are disabled. - **DNS** - Malicious actors with access to a Consul agent DNS endpoint may be able to extract service catalog - information. Gossip - Malicious actors with access to a Consul agent Serf gossip endpoint may be able to impersonate + information. + +- **Gossip** - Malicious actors with access to a Consul agent Serf gossip endpoint may be able to impersonate agents within a datacenter. Gossip encryption should be enabled, with a regularly rotated gossip key. - **Proxy (xDS)** - Malicious actors with access to a Consul agent xDS endpoint may be able to extract Envoy service From 98b733e41aab25e2eb7a56dffe132e5be5a0b81c Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 23 Mar 2022 12:10:03 -0400 Subject: [PATCH 010/785] remove gogo from pbservice --- agent/grpc/private/resolver/registry.go | 2 +- agent/grpc/private/resolver/resolver.go | 8 +- .../services/subscribe/subscribe_test.go | 124 +- agent/rpcclient/health/view_test.go | 9 +- agent/submatview/streaming_test.go | 10 +- build-support/scripts/proto-gen-entry.sh | 6 + go.mod | 2 +- go.sum | 4 +- proto/pbcommon/common_oss.go | 2 +- proto/pbservice/convert.go | 139 +- proto/pbservice/convert_oss.go | 8 +- proto/pbservice/convert_pbstruct.go | 3 +- proto/pbservice/healthcheck.gen.go | 150 +- proto/pbservice/healthcheck.pb.go | 4352 +++-------------- proto/pbservice/healthcheck.proto | 58 +- proto/pbservice/ids.go | 12 +- proto/pbservice/ids_test.go | 6 +- proto/pbservice/node.gen.go | 58 +- proto/pbservice/node.pb.go | 2380 ++------- proto/pbservice/node.proto | 28 +- proto/pbservice/service.gen.go | 184 +- proto/pbservice/service.pb.go | 4272 +++------------- proto/pbservice/service.proto | 41 +- proto/pbsubscribe/subscribe.pb.go | 1331 +---- 24 files changed, 2059 insertions(+), 11130 deletions(-) diff --git a/agent/grpc/private/resolver/registry.go b/agent/grpc/private/resolver/registry.go index d305b607d..14c93af2d 100644 --- a/agent/grpc/private/resolver/registry.go +++ b/agent/grpc/private/resolver/registry.go @@ -16,7 +16,7 @@ type registry struct { byAuthority map[string]*ServerResolverBuilder } -func (r *registry) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (r *registry) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r.lock.RLock() defer r.lock.RUnlock() res, ok := r.byAuthority[target.Authority] diff --git a/agent/grpc/private/resolver/resolver.go b/agent/grpc/private/resolver/resolver.go index e77ee568d..c0c3b8938 100644 --- a/agent/grpc/private/resolver/resolver.go +++ b/agent/grpc/private/resolver/resolver.go @@ -85,7 +85,7 @@ func (s *ServerResolverBuilder) ServerForGlobalAddr(globalAddr string) (*metadat // Build returns a new serverResolver for the given ClientConn. The resolver // will keep the ClientConn's state updated based on updates from Serf. -func (s *ServerResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) { +func (s *ServerResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { s.lock.Lock() defer s.lock.Unlock() @@ -221,7 +221,6 @@ func (s *ServerResolverBuilder) getDCAddrs(dc string) []resolver.Address { addrs = append(addrs, resolver.Address{ // NOTE: the address persisted here is only dialable using our custom dialer Addr: DCPrefix(server.Datacenter, server.Addr.String()), - Type: resolver.Backend, ServerName: server.Name, }) } @@ -294,14 +293,14 @@ func (r *serverResolver) Close() { } // ResolveNow is not used -func (*serverResolver) ResolveNow(resolver.ResolveNowOption) {} +func (*serverResolver) ResolveNow(options resolver.ResolveNowOptions) {} type leaderResolver struct { globalAddr string clientConn resolver.ClientConn } -func (l leaderResolver) ResolveNow(resolver.ResolveNowOption) {} +func (l leaderResolver) ResolveNow(resolver.ResolveNowOptions) {} func (l leaderResolver) Close() {} @@ -313,7 +312,6 @@ func (l leaderResolver) updateClientConn() { { // NOTE: the address persisted here is only dialable using our custom dialer Addr: l.globalAddr, - Type: resolver.Backend, ServerName: "leader", }, } diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index a084b6f55..0a52c0f49 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -3,6 +3,8 @@ package subscribe import ( "context" "errors" + "github.com/golang/protobuf/ptypes/duration" + "github.com/hashicorp/consul/proto/pbcommon" "io" "net" "testing" @@ -154,12 +156,14 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Port: 8080, Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, + Connect: &pbservice.ServiceConnect{}, RaftIndex: raftIndex(ids, "reg2", "reg2"), - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, }, }, }, @@ -185,12 +189,14 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Port: 8080, Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, + Connect: &pbservice.ServiceConnect{}, RaftIndex: raftIndex(ids, "reg3", "reg3"), - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, }, }, }, @@ -235,12 +241,14 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Port: 8080, Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, + Connect: &pbservice.ServiceConnect{}, RaftIndex: raftIndex(ids, "reg3", "reg3"), - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, }, Checks: []*pbservice.HealthCheck{ { @@ -251,7 +259,13 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { ServiceID: "redis1", ServiceName: "redis", RaftIndex: raftIndex(ids, "update", "update"), - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + Definition: &pbservice.HealthCheckDefinition{ + Interval: &duration.Duration{}, + Timeout: &duration.Duration{}, + DeregisterCriticalServiceAfter: &duration.Duration{}, + TTL: &duration.Duration{}, + }, }, }, }, @@ -395,8 +409,8 @@ func newCounter() *counter { return &counter{labels: make(map[string]uint64)} } -func raftIndex(ids *counter, created, modified string) pbcommongogo.RaftIndex { - return pbcommongogo.RaftIndex{ +func raftIndex(ids *counter, created, modified string) *pbcommon.RaftIndex { + return &pbcommon.RaftIndex{ CreateIndex: ids.For(created), ModifyIndex: ids.For(modified), } @@ -507,11 +521,13 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { Port: 8080, Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, RaftIndex: raftIndex(ids, "reg2", "reg2"), }, }, @@ -538,11 +554,13 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { Port: 8080, Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, RaftIndex: raftIndex(ids, "reg3", "reg3"), }, }, @@ -589,11 +607,13 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { RaftIndex: raftIndex(ids, "reg3", "reg3"), Weights: &pbservice.Weights{Passing: 1, Warning: 1}, // Sad empty state - Proxy: pbservice.ConnectProxyConfig{ - MeshGateway: pbservice.MeshGatewayConfig{}, - Expose: pbservice.ExposeConfig{}, + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, }, - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, }, Checks: []*pbservice.HealthCheck{ { @@ -604,7 +624,13 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { ServiceID: "redis1", ServiceName: "redis", RaftIndex: raftIndex(ids, "update", "update"), - EnterpriseMeta: pbcommongogo.DefaultEnterpriseMeta, + EnterpriseMeta: pbcommon.DefaultEnterpriseMeta, + Definition: &pbservice.HealthCheckDefinition{ + Interval: &duration.Duration{}, + Timeout: &duration.Duration{}, + DeregisterCriticalServiceAfter: &duration.Duration{}, + TTL: &duration.Duration{}, + }, }, }, }, @@ -986,8 +1012,18 @@ func TestNewEventFromSteamEvent(t *testing.T) { ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ Op: pbsubscribe.CatalogOp_Register, CheckServiceNode: &pbservice.CheckServiceNode{ - Node: &pbservice.Node{Node: "node1"}, - Service: &pbservice.NodeService{Service: "web1"}, + Node: &pbservice.Node{Node: "node1", RaftIndex: &pbcommon.RaftIndex{}}, + Service: &pbservice.NodeService{ + Service: "web1", + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, + }, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: &pbcommon.EnterpriseMeta{}, + RaftIndex: &pbcommon.RaftIndex{}, + }, }, }, }, @@ -998,8 +1034,18 @@ func TestNewEventFromSteamEvent(t *testing.T) { ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ Op: pbsubscribe.CatalogOp_Deregister, CheckServiceNode: &pbservice.CheckServiceNode{ - Node: &pbservice.Node{Node: "node2"}, - Service: &pbservice.NodeService{Service: "web1"}, + Node: &pbservice.Node{Node: "node2", RaftIndex: &pbcommon.RaftIndex{}}, + Service: &pbservice.NodeService{ + Service: "web1", + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, + }, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: &pbcommon.EnterpriseMeta{}, + RaftIndex: &pbcommon.RaftIndex{}, + }, }, }, }, @@ -1027,8 +1073,18 @@ func TestNewEventFromSteamEvent(t *testing.T) { ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ Op: pbsubscribe.CatalogOp_Register, CheckServiceNode: &pbservice.CheckServiceNode{ - Node: &pbservice.Node{Node: "node1"}, - Service: &pbservice.NodeService{Service: "web1"}, + Node: &pbservice.Node{Node: "node1", RaftIndex: &pbcommon.RaftIndex{}}, + Service: &pbservice.NodeService{ + Service: "web1", + Proxy: &pbservice.ConnectProxyConfig{ + MeshGateway: &pbservice.MeshGatewayConfig{}, + Expose: &pbservice.ExposeConfig{}, + TransparentProxy: &pbservice.TransparentProxyConfig{}, + }, + Connect: &pbservice.ServiceConnect{}, + EnterpriseMeta: &pbcommon.EnterpriseMeta{}, + RaftIndex: &pbcommon.RaftIndex{}, + }, }, }, }, diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index bc5795c05..9dc00150f 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/hashicorp/consul/proto/pbcommon" "strings" "testing" "time" @@ -568,11 +569,11 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub Op: pbsubscribe.CatalogOp_Register, CheckServiceNode: &pbservice.CheckServiceNode{ Node: &pbservice.Node{ - ID: nodeID, + ID: string(nodeID), Node: node, Address: addr, Datacenter: "dc1", - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -581,7 +582,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub ID: svc, Service: svc, Port: 8080, - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -612,7 +613,7 @@ func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string) *pbs Passing: 1, Warning: 1, }, - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ // The original insertion index since a delete doesn't update // this. This magic value came from state store tests where we // setup at index 10 and then mutate at index 100. It can be diff --git a/agent/submatview/streaming_test.go b/agent/submatview/streaming_test.go index be484cac6..764cd47dc 100644 --- a/agent/submatview/streaming_test.go +++ b/agent/submatview/streaming_test.go @@ -3,11 +3,11 @@ package submatview import ( "context" "fmt" + "github.com/hashicorp/consul/proto/pbcommon" "sync" "google.golang.org/grpc" - "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -116,11 +116,11 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub Op: pbsubscribe.CatalogOp_Register, CheckServiceNode: &pbservice.CheckServiceNode{ Node: &pbservice.Node{ - ID: nodeID, + ID: string(nodeID), Node: node, Address: addr, Datacenter: "dc1", - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -129,7 +129,7 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub ID: svc, Service: svc, Port: 8080, - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ CreateIndex: index, ModifyIndex: index, }, @@ -160,7 +160,7 @@ func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string) *pbs Passing: 1, Warning: 1, }, - RaftIndex: pbcommongogo.RaftIndex{ + RaftIndex: &pbcommon.RaftIndex{ // The original insertion index since a delete doesn't update // this. This magic value came from state store tests where we // setup at index 10 and then mutate at index 100. It can be diff --git a/build-support/scripts/proto-gen-entry.sh b/build-support/scripts/proto-gen-entry.sh index 4deb2bf29..639f725cc 100644 --- a/build-support/scripts/proto-gen-entry.sh +++ b/build-support/scripts/proto-gen-entry.sh @@ -14,6 +14,12 @@ elif [[ "$FILENAME" =~ .*pbconfig/.* ]]; then elif [[ "$FILENAME" =~ .*pbautoconf/.* ]]; then echo "$FILENAME no gogo" ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +elif [[ "$FILENAME" =~ .*pbservice/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 +elif [[ "$FILENAME" =~ .*pbsubscribe/.* ]]; then + echo "$FILENAME no gogo" + ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 else echo "$FILENAME gogo" ./build-support/scripts/proto-gen.sh $1 $2 $3 diff --git a/go.mod b/go.mod index 17fe9d28d..628a30202 100644 --- a/go.mod +++ b/go.mod @@ -94,7 +94,7 @@ require ( google.golang.org/api v0.9.0 // indirect google.golang.org/appengine v1.6.0 // indirect google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 - google.golang.org/grpc v1.25.1 + google.golang.org/grpc v1.27.1 gopkg.in/square/go-jose.v2 v2.5.1 gotest.tools/v3 v3.0.3 k8s.io/api v0.18.2 diff --git a/go.sum b/go.sum index 085684975..688dd2b8f 100644 --- a/go.sum +++ b/go.sum @@ -130,6 +130,7 @@ github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= @@ -714,8 +715,9 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/proto/pbcommon/common_oss.go b/proto/pbcommon/common_oss.go index e96c1a4b3..2dc2026e8 100644 --- a/proto/pbcommon/common_oss.go +++ b/proto/pbcommon/common_oss.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -var DefaultEnterpriseMeta = EnterpriseMeta{} +var DefaultEnterpriseMeta = &EnterpriseMeta{} func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *EnterpriseMeta { return &EnterpriseMeta{} diff --git a/proto/pbservice/convert.go b/proto/pbservice/convert.go index a68c22b8f..c981d1cb3 100644 --- a/proto/pbservice/convert.go +++ b/proto/pbservice/convert.go @@ -2,24 +2,28 @@ package pbservice import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/types" ) -func RaftIndexToStructs(s pbcommongogo.RaftIndex) structs.RaftIndex { +type CheckIDType = types.CheckID +type NodeIDType = types.NodeID + +func RaftIndexToStructs(s *pbcommon.RaftIndex) structs.RaftIndex { return structs.RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, } } -func NewRaftIndexFromStructs(s structs.RaftIndex) pbcommongogo.RaftIndex { - return pbcommongogo.RaftIndex{ +func NewRaftIndexFromStructs(s structs.RaftIndex) *pbcommon.RaftIndex { + return &pbcommon.RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, } } -func MapHeadersToStructs(s map[string]HeaderValue) map[string][]string { +func MapHeadersToStructs(s map[string]*HeaderValue) map[string][]string { t := make(map[string][]string, len(s)) for k, v := range s { t[k] = v.Value @@ -27,10 +31,10 @@ func MapHeadersToStructs(s map[string]HeaderValue) map[string][]string { return t } -func NewMapHeadersFromStructs(t map[string][]string) map[string]HeaderValue { - s := make(map[string]HeaderValue, len(t)) +func NewMapHeadersFromStructs(t map[string][]string) map[string]*HeaderValue { + s := make(map[string]*HeaderValue, len(t)) for k, v := range t { - s[k] = HeaderValue{Value: v} + s[k] = &HeaderValue{Value: v} } return s } @@ -42,23 +46,23 @@ func CheckServiceNodeToStructs(s *CheckServiceNode) (*structs.CheckServiceNode, } var t structs.CheckServiceNode if s.Node != nil { - n := NodeToStructs(*s.Node) - t.Node = &n + n := new(structs.Node) + NodeToStructs(s.Node, n) + t.Node = n } if s.Service != nil { - r := NodeServiceToStructs(*s.Service) - t.Service = &r + r := new(structs.NodeService) + NodeServiceToStructs(s.Service, r) + t.Service = r } t.Checks = make(structs.HealthChecks, len(s.Checks)) for i, c := range s.Checks { if c == nil { continue } - h, err := HealthCheckToStructs(*c) - if err != nil { - return &t, err - } - t.Checks[i] = &h + h := new(structs.HealthCheck) + HealthCheckToStructs(c, h) + t.Checks[i] = h } return &t, nil } @@ -70,20 +74,23 @@ func NewCheckServiceNodeFromStructs(t *structs.CheckServiceNode) *CheckServiceNo } var s CheckServiceNode if t.Node != nil { - n := NewNodeFromStructs(*t.Node) - s.Node = &n + n := new(Node) + NodeFromStructs(t.Node, n) + s.Node = n } if t.Service != nil { - r := NewNodeServiceFromStructs(*t.Service) - s.Service = &r + r := new(NodeService) + NodeServiceFromStructs(t.Service, r) + s.Service = r } s.Checks = make([]*HealthCheck, len(t.Checks)) for i, c := range t.Checks { if c == nil { continue } - h := NewHealthCheckFromStructs(*c) - s.Checks[i] = &h + h := new(HealthCheck) + HealthCheckFromStructs(c, h) + s.Checks[i] = h } return &s } @@ -111,7 +118,7 @@ func NewWeightsPtrFromStructs(t *structs.Weights) *Weights { } // TODO: handle this with mog -func MapStringServiceAddressToStructs(s map[string]ServiceAddress) map[string]structs.ServiceAddress { +func MapStringServiceAddressToStructs(s map[string]*ServiceAddress) map[string]structs.ServiceAddress { t := make(map[string]structs.ServiceAddress, len(s)) for k, v := range s { t[k] = structs.ServiceAddress{Address: v.Address, Port: int(v.Port)} @@ -120,64 +127,70 @@ func MapStringServiceAddressToStructs(s map[string]ServiceAddress) map[string]st } // TODO: handle this with mog -func NewMapStringServiceAddressFromStructs(t map[string]structs.ServiceAddress) map[string]ServiceAddress { - s := make(map[string]ServiceAddress, len(t)) +func NewMapStringServiceAddressFromStructs(t map[string]structs.ServiceAddress) map[string]*ServiceAddress { + s := make(map[string]*ServiceAddress, len(t)) for k, v := range t { - s[k] = ServiceAddress{Address: v.Address, Port: int32(v.Port)} + s[k] = &ServiceAddress{Address: v.Address, Port: int32(v.Port)} } return s } // TODO: handle this with mog -func ExposePathSliceToStructs(s []ExposePath) []structs.ExposePath { +func ExposePathSliceToStructs(s []*ExposePath) []structs.ExposePath { t := make([]structs.ExposePath, len(s)) for i, v := range s { - t[i] = ExposePathToStructs(v) + e := new(structs.ExposePath) + ExposePathToStructs(v, e) + t[i] = *e } return t } // TODO: handle this with mog -func NewExposePathSliceFromStructs(t []structs.ExposePath) []ExposePath { - s := make([]ExposePath, len(t)) +func NewExposePathSliceFromStructs(t []structs.ExposePath) []*ExposePath { + s := make([]*ExposePath, len(t)) for i, v := range t { - s[i] = NewExposePathFromStructs(v) + ep := new(ExposePath) + ExposePathFromStructs(&v, ep) + s[i] = ep } return s } // TODO: handle this with mog -func UpstreamsToStructs(s []Upstream) structs.Upstreams { +func UpstreamsToStructs(s []*Upstream) structs.Upstreams { t := make(structs.Upstreams, len(s)) for i, v := range s { - t[i] = UpstreamToStructs(v) + u := new(structs.Upstream) + UpstreamToStructs(v, u) + t[i] = *u } return t } // TODO: handle this with mog -func NewUpstreamsFromStructs(t structs.Upstreams) []Upstream { - s := make([]Upstream, len(t)) +func NewUpstreamsFromStructs(t structs.Upstreams) []*Upstream { + s := make([]*Upstream, len(t)) for i, v := range t { - s[i] = NewUpstreamFromStructs(v) + u := new(Upstream) + UpstreamFromStructs(&v, u) + s[i] = u } return s } // TODO: handle this with mog -func CheckTypesToStructs(s []*CheckType) (structs.CheckTypes, error) { +func CheckTypesToStructs(s []*CheckType) structs.CheckTypes { t := make(structs.CheckTypes, len(s)) for i, v := range s { if v == nil { continue } - newV, err := CheckTypeToStructs(*v) - if err != nil { - return t, err - } - t[i] = &newV + c := new(structs.CheckType) + CheckTypeToStructs(v, c) + t[i] = c } - return t, nil + return t } // TODO: handle this with mog @@ -187,8 +200,9 @@ func NewCheckTypesFromStructs(t structs.CheckTypes) []*CheckType { if v == nil { continue } - newV := NewCheckTypeFromStructs(*v) - s[i] = &newV + newV := new(CheckType) + CheckTypeFromStructs(v, newV) + s[i] = newV } return s } @@ -198,8 +212,9 @@ func ConnectProxyConfigPtrToStructs(s *ConnectProxyConfig) *structs.ConnectProxy if s == nil { return nil } - t := ConnectProxyConfigToStructs(*s) - return &t + c := new(structs.ConnectProxyConfig) + ConnectProxyConfigToStructs(s, c) + return c } // TODO: handle this with mog @@ -207,8 +222,9 @@ func NewConnectProxyConfigPtrFromStructs(t *structs.ConnectProxyConfig) *Connect if t == nil { return nil } - s := NewConnectProxyConfigFromStructs(*t) - return &s + cp := new(ConnectProxyConfig) + ConnectProxyConfigFromStructs(t, cp) + return cp } // TODO: handle this with mog @@ -216,8 +232,9 @@ func ServiceConnectPtrToStructs(s *ServiceConnect) *structs.ServiceConnect { if s == nil { return nil } - t := ServiceConnectToStructs(*s) - return &t + sc := new(structs.ServiceConnect) + ServiceConnectToStructs(s, sc) + return sc } // TODO: handle this with mog @@ -225,8 +242,9 @@ func NewServiceConnectPtrFromStructs(t *structs.ServiceConnect) *ServiceConnect if t == nil { return nil } - s := NewServiceConnectFromStructs(*t) - return &s + sc := new(ServiceConnect) + ServiceConnectFromStructs(t, sc) + return sc } // TODO: handle this with mog @@ -234,11 +252,9 @@ func ServiceDefinitionPtrToStructs(s *ServiceDefinition) *structs.ServiceDefinit if s == nil { return nil } - t, err := ServiceDefinitionToStructs(*s) - if err != nil { - return nil - } - return &t + sd := new(structs.ServiceDefinition) + ServiceDefinitionToStructs(s, sd) + return sd } // TODO: handle this with mog @@ -246,6 +262,7 @@ func NewServiceDefinitionPtrFromStructs(t *structs.ServiceDefinition) *ServiceDe if t == nil { return nil } - s := NewServiceDefinitionFromStructs(*t) - return &s + sd := new(ServiceDefinition) + ServiceDefinitionFromStructs(t, sd) + return sd } diff --git a/proto/pbservice/convert_oss.go b/proto/pbservice/convert_oss.go index 214cf69ad..4efb78bef 100644 --- a/proto/pbservice/convert_oss.go +++ b/proto/pbservice/convert_oss.go @@ -5,13 +5,13 @@ package pbservice import ( "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" ) -func EnterpriseMetaToStructs(_ pbcommongogo.EnterpriseMeta) structs.EnterpriseMeta { +func EnterpriseMetaToStructs(_ *pbcommon.EnterpriseMeta) structs.EnterpriseMeta { return structs.EnterpriseMeta{} } -func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) pbcommongogo.EnterpriseMeta { - return pbcommongogo.EnterpriseMeta{} +func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *pbcommon.EnterpriseMeta { + return &pbcommon.EnterpriseMeta{} } diff --git a/proto/pbservice/convert_pbstruct.go b/proto/pbservice/convert_pbstruct.go index dbb0ea5a1..1a09d81ef 100644 --- a/proto/pbservice/convert_pbstruct.go +++ b/proto/pbservice/convert_pbstruct.go @@ -4,7 +4,8 @@ import ( fmt "fmt" "reflect" - types "github.com/gogo/protobuf/types" + //TODO(gogo-remove): remove the types alias + types "github.com/golang/protobuf/ptypes/struct" ) // ProtobufTypesStructToMapStringInterface converts a protobuf/types.Struct into a diff --git a/proto/pbservice/healthcheck.gen.go b/proto/pbservice/healthcheck.gen.go index 345b13719..6bdb63b4b 100644 --- a/proto/pbservice/healthcheck.gen.go +++ b/proto/pbservice/healthcheck.gen.go @@ -2,14 +2,13 @@ package pbservice -import ( - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbutil" -) +import "github.com/hashicorp/consul/agent/structs" -func CheckTypeToStructs(s CheckType) (structs.CheckType, error) { - var t structs.CheckType - t.CheckID = s.CheckID +func CheckTypeToStructs(s *CheckType, t *structs.CheckType) { + if s == nil { + return + } + t.CheckID = CheckIDType(s.CheckID) t.Name = s.Name t.Status = s.Status t.Notes = s.Notes @@ -21,12 +20,7 @@ func CheckTypeToStructs(s CheckType) (structs.CheckType, error) { t.Method = s.Method t.Body = s.Body t.TCP = s.TCP - interval, err := pbutil.DurationFromProto(&s.Interval) - if err != nil { - return t, err - } - t.Interval = interval - + t.Interval = structs.DurationFromProto(s.Interval) t.AliasNode = s.AliasNode t.AliasService = s.AliasService t.DockerContainerID = s.DockerContainerID @@ -35,32 +29,21 @@ func CheckTypeToStructs(s CheckType) (structs.CheckType, error) { t.GRPCUseTLS = s.GRPCUseTLS t.TLSServerName = s.TLSServerName t.TLSSkipVerify = s.TLSSkipVerify - timeout, err := pbutil.DurationFromProto(&s.Timeout) - if err != nil { - return t, err - } - t.Timeout = timeout - ttl, err := pbutil.DurationFromProto(&s.TTL) - if err != nil { - return t, err - } - t.TTL = ttl + t.Timeout = structs.DurationFromProto(s.Timeout) + t.TTL = structs.DurationFromProto(s.TTL) t.SuccessBeforePassing = int(s.SuccessBeforePassing) - t.FailuresBeforeCritical = int(s.FailuresBeforeCritical) t.FailuresBeforeWarning = int(s.FailuresBeforeWarning) + t.FailuresBeforeCritical = int(s.FailuresBeforeCritical) t.ProxyHTTP = s.ProxyHTTP t.ProxyGRPC = s.ProxyGRPC - deregisterCriticalServiceAfter, err := pbutil.DurationFromProto(&s.DeregisterCriticalServiceAfter) - if err != nil { - return t, err - } - t.DeregisterCriticalServiceAfter = deregisterCriticalServiceAfter + t.DeregisterCriticalServiceAfter = structs.DurationFromProto(s.DeregisterCriticalServiceAfter) t.OutputMaxSize = int(s.OutputMaxSize) - return t, nil } -func NewCheckTypeFromStructs(t structs.CheckType) CheckType { - var s CheckType - s.CheckID = t.CheckID +func CheckTypeFromStructs(t *structs.CheckType, s *CheckType) { + if s == nil { + return + } + s.CheckID = string(t.CheckID) s.Name = t.Name s.Status = t.Status s.Notes = t.Notes @@ -72,7 +55,7 @@ func NewCheckTypeFromStructs(t structs.CheckType) CheckType { s.Method = t.Method s.Body = t.Body s.TCP = t.TCP - s.Interval = *pbutil.DurationToProto(t.Interval) + s.Interval = structs.DurationToProto(t.Interval) s.AliasNode = t.AliasNode s.AliasService = t.AliasService s.DockerContainerID = t.DockerContainerID @@ -81,21 +64,22 @@ func NewCheckTypeFromStructs(t structs.CheckType) CheckType { s.GRPCUseTLS = t.GRPCUseTLS s.TLSServerName = t.TLSServerName s.TLSSkipVerify = t.TLSSkipVerify - s.Timeout = *pbutil.DurationToProto(t.Timeout) - s.TTL = *pbutil.DurationToProto(t.TTL) + s.Timeout = structs.DurationToProto(t.Timeout) + s.TTL = structs.DurationToProto(t.TTL) s.SuccessBeforePassing = int32(t.SuccessBeforePassing) - s.FailuresBeforeCritical = int32(t.FailuresBeforeCritical) s.FailuresBeforeWarning = int32(t.FailuresBeforeWarning) + s.FailuresBeforeCritical = int32(t.FailuresBeforeCritical) s.ProxyHTTP = t.ProxyHTTP s.ProxyGRPC = t.ProxyGRPC - s.DeregisterCriticalServiceAfter = *pbutil.DurationToProto(t.DeregisterCriticalServiceAfter) + s.DeregisterCriticalServiceAfter = structs.DurationToProto(t.DeregisterCriticalServiceAfter) s.OutputMaxSize = int32(t.OutputMaxSize) - return s } -func HealthCheckToStructs(s HealthCheck) (structs.HealthCheck, error) { - var t structs.HealthCheck +func HealthCheckToStructs(s *HealthCheck, t *structs.HealthCheck) { + if s == nil { + return + } t.Node = s.Node - t.CheckID = s.CheckID + t.CheckID = CheckIDType(s.CheckID) t.Name = s.Name t.Status = s.Status t.Notes = s.Notes @@ -104,22 +88,21 @@ func HealthCheckToStructs(s HealthCheck) (structs.HealthCheck, error) { t.ServiceName = s.ServiceName t.ServiceTags = s.ServiceTags t.Type = s.Type - t.ExposedPort = int(s.ExposedPort) - definition, err := HealthCheckDefinitionToStructs(s.Definition) - if err != nil { - return t, err - } - t.Definition = definition - t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) - t.RaftIndex = RaftIndexToStructs(s.RaftIndex) t.Interval = s.Interval t.Timeout = s.Timeout - return t, nil + t.ExposedPort = int(s.ExposedPort) + if s.Definition != nil { + HealthCheckDefinitionToStructs(s.Definition, &t.Definition) + } + t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) + t.RaftIndex = RaftIndexToStructs(s.RaftIndex) } -func NewHealthCheckFromStructs(t structs.HealthCheck) HealthCheck { - var s HealthCheck +func HealthCheckFromStructs(t *structs.HealthCheck, s *HealthCheck) { + if s == nil { + return + } s.Node = t.Node - s.CheckID = t.CheckID + s.CheckID = string(t.CheckID) s.Name = t.Name s.Status = t.Status s.Notes = t.Notes @@ -128,16 +111,21 @@ func NewHealthCheckFromStructs(t structs.HealthCheck) HealthCheck { s.ServiceName = t.ServiceName s.ServiceTags = t.ServiceTags s.Type = t.Type - s.ExposedPort = int32(t.ExposedPort) - s.Definition = NewHealthCheckDefinitionFromStructs(t.Definition) - s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) - s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) s.Interval = t.Interval s.Timeout = t.Timeout - return s + s.ExposedPort = int32(t.ExposedPort) + { + var x HealthCheckDefinition + HealthCheckDefinitionFromStructs(&t.Definition, &x) + s.Definition = &x + } + s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) + s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) } -func HealthCheckDefinitionToStructs(s HealthCheckDefinition) (structs.HealthCheckDefinition, error) { - var t structs.HealthCheckDefinition +func HealthCheckDefinitionToStructs(s *HealthCheckDefinition, t *structs.HealthCheckDefinition) { + if s == nil { + return + } t.HTTP = s.HTTP t.TLSServerName = s.TLSServerName t.TLSSkipVerify = s.TLSSkipVerify @@ -147,22 +135,10 @@ func HealthCheckDefinitionToStructs(s HealthCheckDefinition) (structs.HealthChec t.TCP = s.TCP t.H2PING = s.H2PING t.H2PingUseTLS = s.H2PingUseTLS - interval, err := pbutil.DurationFromProto(&s.Interval) - if err != nil { - return t, err - } - t.Interval = interval + t.Interval = structs.DurationFromProto(s.Interval) t.OutputMaxSize = uint(s.OutputMaxSize) - timeout, err := pbutil.DurationFromProto(&s.Timeout) - if err != nil { - return t, err - } - t.Timeout = timeout - deregisterCriticalServiceAfter, err := pbutil.DurationFromProto(&s.DeregisterCriticalServiceAfter) - if err != nil { - return t, err - } - t.DeregisterCriticalServiceAfter = deregisterCriticalServiceAfter + t.Timeout = structs.DurationFromProto(s.Timeout) + t.DeregisterCriticalServiceAfter = structs.DurationFromProto(s.DeregisterCriticalServiceAfter) t.ScriptArgs = s.ScriptArgs t.DockerContainerID = s.DockerContainerID t.Shell = s.Shell @@ -170,15 +146,12 @@ func HealthCheckDefinitionToStructs(s HealthCheckDefinition) (structs.HealthChec t.GRPCUseTLS = s.GRPCUseTLS t.AliasNode = s.AliasNode t.AliasService = s.AliasService - ttl, err := pbutil.DurationFromProto(&s.TTL) - if err != nil { - return t, err - } - t.TTL = ttl - return t, nil + t.TTL = structs.DurationFromProto(s.TTL) } -func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) HealthCheckDefinition { - var s HealthCheckDefinition +func HealthCheckDefinitionFromStructs(t *structs.HealthCheckDefinition, s *HealthCheckDefinition) { + if s == nil { + return + } s.HTTP = t.HTTP s.TLSServerName = t.TLSServerName s.TLSSkipVerify = t.TLSSkipVerify @@ -188,10 +161,10 @@ func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) Health s.TCP = t.TCP s.H2PING = t.H2PING s.H2PingUseTLS = t.H2PingUseTLS - s.Interval = *pbutil.DurationToProto(t.Interval) + s.Interval = structs.DurationToProto(t.Interval) s.OutputMaxSize = uint32(t.OutputMaxSize) - s.Timeout = *pbutil.DurationToProto(t.Timeout) - s.DeregisterCriticalServiceAfter = *pbutil.DurationToProto(t.DeregisterCriticalServiceAfter) + s.Timeout = structs.DurationToProto(t.Timeout) + s.DeregisterCriticalServiceAfter = structs.DurationToProto(t.DeregisterCriticalServiceAfter) s.ScriptArgs = t.ScriptArgs s.DockerContainerID = t.DockerContainerID s.Shell = t.Shell @@ -199,6 +172,5 @@ func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) Health s.GRPCUseTLS = t.GRPCUseTLS s.AliasNode = t.AliasNode s.AliasService = t.AliasService - s.TTL = *pbutil.DurationToProto(t.TTL) - return s + s.TTL = structs.DurationToProto(t.TTL) } diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index cb9eed6f8..3f5fe637b 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -1,18 +1,14 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbservice/healthcheck.proto package pbservice import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" - github_com_hashicorp_consul_types "github.com/hashicorp/consul/types" - io "io" + duration "github.com/golang/protobuf/ptypes/duration" + pbcommon "github.com/hashicorp/consul/proto/pbcommon" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -34,25 +30,29 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // output=healthcheck.gen.go // name=Structs type HealthCheck struct { - Node string `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` - CheckID github_com_hashicorp_consul_types.CheckID `protobuf:"bytes,2,opt,name=CheckID,proto3,casttype=github.com/hashicorp/consul/types.CheckID" json:"CheckID,omitempty"` - Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` - Status string `protobuf:"bytes,4,opt,name=Status,proto3" json:"Status,omitempty"` - Notes string `protobuf:"bytes,5,opt,name=Notes,proto3" json:"Notes,omitempty"` - Output string `protobuf:"bytes,6,opt,name=Output,proto3" json:"Output,omitempty"` - ServiceID string `protobuf:"bytes,7,opt,name=ServiceID,proto3" json:"ServiceID,omitempty"` - ServiceName string `protobuf:"bytes,8,opt,name=ServiceName,proto3" json:"ServiceName,omitempty"` - ServiceTags []string `protobuf:"bytes,9,rep,name=ServiceTags,proto3" json:"ServiceTags,omitempty"` - Type string `protobuf:"bytes,12,opt,name=Type,proto3" json:"Type,omitempty"` - Definition HealthCheckDefinition `protobuf:"bytes,10,opt,name=Definition,proto3" json:"Definition"` + Node string `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` + // mog: func-to=CheckIDType func-from=string + CheckID string `protobuf:"bytes,2,opt,name=CheckID,proto3" json:"CheckID,omitempty"` + Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` + Status string `protobuf:"bytes,4,opt,name=Status,proto3" json:"Status,omitempty"` + Notes string `protobuf:"bytes,5,opt,name=Notes,proto3" json:"Notes,omitempty"` + Output string `protobuf:"bytes,6,opt,name=Output,proto3" json:"Output,omitempty"` + ServiceID string `protobuf:"bytes,7,opt,name=ServiceID,proto3" json:"ServiceID,omitempty"` + ServiceName string `protobuf:"bytes,8,opt,name=ServiceName,proto3" json:"ServiceName,omitempty"` + ServiceTags []string `protobuf:"bytes,9,rep,name=ServiceTags,proto3" json:"ServiceTags,omitempty"` + Type string `protobuf:"bytes,12,opt,name=Type,proto3" json:"Type,omitempty"` + Definition *HealthCheckDefinition `protobuf:"bytes,10,opt,name=Definition,proto3" json:"Definition,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommongogo.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=int func-from=int32 - ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` - Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` - Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` + Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` + Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *HealthCheck) Reset() { *m = HealthCheck{} } @@ -61,26 +61,18 @@ func (*HealthCheck) ProtoMessage() {} func (*HealthCheck) Descriptor() ([]byte, []int) { return fileDescriptor_8a6f7448747c9fbe, []int{0} } + func (m *HealthCheck) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_HealthCheck.Unmarshal(m, b) } func (m *HealthCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HealthCheck.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_HealthCheck.Marshal(b, m, deterministic) } func (m *HealthCheck) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthCheck.Merge(m, src) } func (m *HealthCheck) XXX_Size() int { - return m.Size() + return xxx_messageInfo_HealthCheck.Size(m) } func (m *HealthCheck) XXX_DiscardUnknown() { xxx_messageInfo_HealthCheck.DiscardUnknown(m) @@ -88,8 +80,123 @@ func (m *HealthCheck) XXX_DiscardUnknown() { var xxx_messageInfo_HealthCheck proto.InternalMessageInfo +func (m *HealthCheck) GetNode() string { + if m != nil { + return m.Node + } + return "" +} + +func (m *HealthCheck) GetCheckID() string { + if m != nil { + return m.CheckID + } + return "" +} + +func (m *HealthCheck) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HealthCheck) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *HealthCheck) GetNotes() string { + if m != nil { + return m.Notes + } + return "" +} + +func (m *HealthCheck) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *HealthCheck) GetServiceID() string { + if m != nil { + return m.ServiceID + } + return "" +} + +func (m *HealthCheck) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *HealthCheck) GetServiceTags() []string { + if m != nil { + return m.ServiceTags + } + return nil +} + +func (m *HealthCheck) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HealthCheck) GetDefinition() *HealthCheckDefinition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *HealthCheck) GetRaftIndex() *pbcommon.RaftIndex { + if m != nil { + return m.RaftIndex + } + return nil +} + +func (m *HealthCheck) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if m != nil { + return m.EnterpriseMeta + } + return nil +} + +func (m *HealthCheck) GetExposedPort() int32 { + if m != nil { + return m.ExposedPort + } + return 0 +} + +func (m *HealthCheck) GetInterval() string { + if m != nil { + return m.Interval + } + return "" +} + +func (m *HealthCheck) GetTimeout() string { + if m != nil { + return m.Timeout + } + return "" +} + type HeaderValue struct { - Value []string `protobuf:"bytes,1,rep,name=Value,proto3" json:"Value,omitempty"` + Value []string `protobuf:"bytes,1,rep,name=Value,proto3" json:"Value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *HeaderValue) Reset() { *m = HeaderValue{} } @@ -98,26 +205,18 @@ func (*HeaderValue) ProtoMessage() {} func (*HeaderValue) Descriptor() ([]byte, []int) { return fileDescriptor_8a6f7448747c9fbe, []int{1} } + func (m *HeaderValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_HeaderValue.Unmarshal(m, b) } func (m *HeaderValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HeaderValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_HeaderValue.Marshal(b, m, deterministic) } func (m *HeaderValue) XXX_Merge(src proto.Message) { xxx_messageInfo_HeaderValue.Merge(m, src) } func (m *HeaderValue) XXX_Size() int { - return m.Size() + return xxx_messageInfo_HeaderValue.Size(m) } func (m *HeaderValue) XXX_DiscardUnknown() { xxx_messageInfo_HeaderValue.DiscardUnknown(m) @@ -125,6 +224,13 @@ func (m *HeaderValue) XXX_DiscardUnknown() { var xxx_messageInfo_HeaderValue proto.InternalMessageInfo +func (m *HeaderValue) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + // HealthCheckDefinition of a single HealthCheck. // // mog annotation: @@ -137,25 +243,32 @@ type HealthCheckDefinition struct { TLSServerName string `protobuf:"bytes,19,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - Header map[string]HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Method string `protobuf:"bytes,4,opt,name=Method,proto3" json:"Method,omitempty"` - Body string `protobuf:"bytes,18,opt,name=Body,proto3" json:"Body,omitempty"` - TCP string `protobuf:"bytes,5,opt,name=TCP,proto3" json:"TCP,omitempty"` - Interval types.Duration `protobuf:"bytes,6,opt,name=Interval,proto3" json:"Interval"` + Header map[string]*HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Method string `protobuf:"bytes,4,opt,name=Method,proto3" json:"Method,omitempty"` + Body string `protobuf:"bytes,18,opt,name=Body,proto3" json:"Body,omitempty"` + TCP string `protobuf:"bytes,5,opt,name=TCP,proto3" json:"TCP,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + Interval *duration.Duration `protobuf:"bytes,6,opt,name=Interval,proto3" json:"Interval,omitempty"` // mog: func-to=uint func-from=uint32 - OutputMaxSize uint32 `protobuf:"varint,9,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` - Timeout types.Duration `protobuf:"bytes,7,opt,name=Timeout,proto3" json:"Timeout"` - DeregisterCriticalServiceAfter types.Duration `protobuf:"bytes,8,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter"` - ScriptArgs []string `protobuf:"bytes,10,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` - DockerContainerID string `protobuf:"bytes,11,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` - Shell string `protobuf:"bytes,12,opt,name=Shell,proto3" json:"Shell,omitempty"` - H2PING string `protobuf:"bytes,20,opt,name=H2PING,proto3" json:"H2PING,omitempty"` - H2PingUseTLS bool `protobuf:"varint,21,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` - GRPC string `protobuf:"bytes,13,opt,name=GRPC,proto3" json:"GRPC,omitempty"` - GRPCUseTLS bool `protobuf:"varint,14,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` - AliasNode string `protobuf:"bytes,15,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` - AliasService string `protobuf:"bytes,16,opt,name=AliasService,proto3" json:"AliasService,omitempty"` - TTL types.Duration `protobuf:"bytes,17,opt,name=TTL,proto3" json:"TTL"` + OutputMaxSize uint32 `protobuf:"varint,9,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + Timeout *duration.Duration `protobuf:"bytes,7,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + DeregisterCriticalServiceAfter *duration.Duration `protobuf:"bytes,8,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` + ScriptArgs []string `protobuf:"bytes,10,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` + DockerContainerID string `protobuf:"bytes,11,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` + Shell string `protobuf:"bytes,12,opt,name=Shell,proto3" json:"Shell,omitempty"` + H2PING string `protobuf:"bytes,20,opt,name=H2PING,proto3" json:"H2PING,omitempty"` + H2PingUseTLS bool `protobuf:"varint,21,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` + GRPC string `protobuf:"bytes,13,opt,name=GRPC,proto3" json:"GRPC,omitempty"` + GRPCUseTLS bool `protobuf:"varint,14,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` + AliasNode string `protobuf:"bytes,15,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` + AliasService string `protobuf:"bytes,16,opt,name=AliasService,proto3" json:"AliasService,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + TTL *duration.Duration `protobuf:"bytes,17,opt,name=TTL,proto3" json:"TTL,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *HealthCheckDefinition) Reset() { *m = HealthCheckDefinition{} } @@ -164,26 +277,18 @@ func (*HealthCheckDefinition) ProtoMessage() {} func (*HealthCheckDefinition) Descriptor() ([]byte, []int) { return fileDescriptor_8a6f7448747c9fbe, []int{2} } + func (m *HealthCheckDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_HealthCheckDefinition.Unmarshal(m, b) } func (m *HealthCheckDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HealthCheckDefinition.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_HealthCheckDefinition.Marshal(b, m, deterministic) } func (m *HealthCheckDefinition) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthCheckDefinition.Merge(m, src) } func (m *HealthCheckDefinition) XXX_Size() int { - return m.Size() + return xxx_messageInfo_HealthCheckDefinition.Size(m) } func (m *HealthCheckDefinition) XXX_DiscardUnknown() { xxx_messageInfo_HealthCheckDefinition.DiscardUnknown(m) @@ -191,6 +296,153 @@ func (m *HealthCheckDefinition) XXX_DiscardUnknown() { var xxx_messageInfo_HealthCheckDefinition proto.InternalMessageInfo +func (m *HealthCheckDefinition) GetHTTP() string { + if m != nil { + return m.HTTP + } + return "" +} + +func (m *HealthCheckDefinition) GetTLSServerName() string { + if m != nil { + return m.TLSServerName + } + return "" +} + +func (m *HealthCheckDefinition) GetTLSSkipVerify() bool { + if m != nil { + return m.TLSSkipVerify + } + return false +} + +func (m *HealthCheckDefinition) GetHeader() map[string]*HeaderValue { + if m != nil { + return m.Header + } + return nil +} + +func (m *HealthCheckDefinition) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *HealthCheckDefinition) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HealthCheckDefinition) GetTCP() string { + if m != nil { + return m.TCP + } + return "" +} + +func (m *HealthCheckDefinition) GetInterval() *duration.Duration { + if m != nil { + return m.Interval + } + return nil +} + +func (m *HealthCheckDefinition) GetOutputMaxSize() uint32 { + if m != nil { + return m.OutputMaxSize + } + return 0 +} + +func (m *HealthCheckDefinition) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *HealthCheckDefinition) GetDeregisterCriticalServiceAfter() *duration.Duration { + if m != nil { + return m.DeregisterCriticalServiceAfter + } + return nil +} + +func (m *HealthCheckDefinition) GetScriptArgs() []string { + if m != nil { + return m.ScriptArgs + } + return nil +} + +func (m *HealthCheckDefinition) GetDockerContainerID() string { + if m != nil { + return m.DockerContainerID + } + return "" +} + +func (m *HealthCheckDefinition) GetShell() string { + if m != nil { + return m.Shell + } + return "" +} + +func (m *HealthCheckDefinition) GetH2PING() string { + if m != nil { + return m.H2PING + } + return "" +} + +func (m *HealthCheckDefinition) GetH2PingUseTLS() bool { + if m != nil { + return m.H2PingUseTLS + } + return false +} + +func (m *HealthCheckDefinition) GetGRPC() string { + if m != nil { + return m.GRPC + } + return "" +} + +func (m *HealthCheckDefinition) GetGRPCUseTLS() bool { + if m != nil { + return m.GRPCUseTLS + } + return false +} + +func (m *HealthCheckDefinition) GetAliasNode() string { + if m != nil { + return m.AliasNode + } + return "" +} + +func (m *HealthCheckDefinition) GetAliasService() string { + if m != nil { + return m.AliasService + } + return "" +} + +func (m *HealthCheckDefinition) GetTTL() *duration.Duration { + if m != nil { + return m.TTL + } + return nil +} + // CheckType is used to create either the CheckMonitor or the CheckTTL. // The following types are supported: Script, HTTP, TCP, Docker, TTL, GRPC, // Alias. Script, H2PING, @@ -204,30 +456,34 @@ var xxx_messageInfo_HealthCheckDefinition proto.InternalMessageInfo // output=healthcheck.gen.go // name=Structs type CheckType struct { - CheckID github_com_hashicorp_consul_types.CheckID `protobuf:"bytes,1,opt,name=CheckID,proto3,casttype=github.com/hashicorp/consul/types.CheckID" json:"CheckID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Status string `protobuf:"bytes,3,opt,name=Status,proto3" json:"Status,omitempty"` - Notes string `protobuf:"bytes,4,opt,name=Notes,proto3" json:"Notes,omitempty"` - ScriptArgs []string `protobuf:"bytes,5,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` - HTTP string `protobuf:"bytes,6,opt,name=HTTP,proto3" json:"HTTP,omitempty"` + // mog: func-to=CheckIDType func-from=string + CheckID string `protobuf:"bytes,1,opt,name=CheckID,proto3" json:"CheckID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + Status string `protobuf:"bytes,3,opt,name=Status,proto3" json:"Status,omitempty"` + Notes string `protobuf:"bytes,4,opt,name=Notes,proto3" json:"Notes,omitempty"` + ScriptArgs []string `protobuf:"bytes,5,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` + HTTP string `protobuf:"bytes,6,opt,name=HTTP,proto3" json:"HTTP,omitempty"` // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - Header map[string]HeaderValue `protobuf:"bytes,20,rep,name=Header,proto3" json:"Header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Method string `protobuf:"bytes,7,opt,name=Method,proto3" json:"Method,omitempty"` - Body string `protobuf:"bytes,26,opt,name=Body,proto3" json:"Body,omitempty"` - TCP string `protobuf:"bytes,8,opt,name=TCP,proto3" json:"TCP,omitempty"` - Interval types.Duration `protobuf:"bytes,9,opt,name=Interval,proto3" json:"Interval"` - AliasNode string `protobuf:"bytes,10,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` - AliasService string `protobuf:"bytes,11,opt,name=AliasService,proto3" json:"AliasService,omitempty"` - DockerContainerID string `protobuf:"bytes,12,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` - Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"` - H2PING string `protobuf:"bytes,28,opt,name=H2PING,proto3" json:"H2PING,omitempty"` - H2PingUseTLS bool `protobuf:"varint,30,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` - GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"` - GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` - TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` - TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` - Timeout types.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3" json:"Timeout"` - TTL types.Duration `protobuf:"bytes,18,opt,name=TTL,proto3" json:"TTL"` + Header map[string]*HeaderValue `protobuf:"bytes,20,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Method string `protobuf:"bytes,7,opt,name=Method,proto3" json:"Method,omitempty"` + Body string `protobuf:"bytes,26,opt,name=Body,proto3" json:"Body,omitempty"` + TCP string `protobuf:"bytes,8,opt,name=TCP,proto3" json:"TCP,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + Interval *duration.Duration `protobuf:"bytes,9,opt,name=Interval,proto3" json:"Interval,omitempty"` + AliasNode string `protobuf:"bytes,10,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` + AliasService string `protobuf:"bytes,11,opt,name=AliasService,proto3" json:"AliasService,omitempty"` + DockerContainerID string `protobuf:"bytes,12,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` + Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"` + H2PING string `protobuf:"bytes,28,opt,name=H2PING,proto3" json:"H2PING,omitempty"` + H2PingUseTLS bool `protobuf:"varint,30,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` + GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"` + GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` + TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` + TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + Timeout *duration.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + TTL *duration.Duration `protobuf:"bytes,18,opt,name=TTL,proto3" json:"TTL,omitempty"` // mog: func-to=int func-from=int32 SuccessBeforePassing int32 `protobuf:"varint,21,opt,name=SuccessBeforePassing,proto3" json:"SuccessBeforePassing,omitempty"` // mog: func-to=int func-from=int32 @@ -240,9 +496,13 @@ type CheckType struct { // DeregisterCriticalServiceAfter, if >0, will cause the associated // service, if any, to be deregistered if this check is critical for // longer than this duration. - DeregisterCriticalServiceAfter types.Duration `protobuf:"bytes,19,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter"` + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + DeregisterCriticalServiceAfter *duration.Duration `protobuf:"bytes,19,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` // mog: func-to=int func-from=int32 - OutputMaxSize int32 `protobuf:"varint,25,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` + OutputMaxSize int32 `protobuf:"varint,25,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CheckType) Reset() { *m = CheckType{} } @@ -251,26 +511,18 @@ func (*CheckType) ProtoMessage() {} func (*CheckType) Descriptor() ([]byte, []int) { return fileDescriptor_8a6f7448747c9fbe, []int{3} } + func (m *CheckType) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_CheckType.Unmarshal(m, b) } func (m *CheckType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckType.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_CheckType.Marshal(b, m, deterministic) } func (m *CheckType) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckType.Merge(m, src) } func (m *CheckType) XXX_Size() int { - return m.Size() + return xxx_messageInfo_CheckType.Size(m) } func (m *CheckType) XXX_DiscardUnknown() { xxx_messageInfo_CheckType.DiscardUnknown(m) @@ -278,3604 +530,292 @@ func (m *CheckType) XXX_DiscardUnknown() { var xxx_messageInfo_CheckType proto.InternalMessageInfo +func (m *CheckType) GetCheckID() string { + if m != nil { + return m.CheckID + } + return "" +} + +func (m *CheckType) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CheckType) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *CheckType) GetNotes() string { + if m != nil { + return m.Notes + } + return "" +} + +func (m *CheckType) GetScriptArgs() []string { + if m != nil { + return m.ScriptArgs + } + return nil +} + +func (m *CheckType) GetHTTP() string { + if m != nil { + return m.HTTP + } + return "" +} + +func (m *CheckType) GetHeader() map[string]*HeaderValue { + if m != nil { + return m.Header + } + return nil +} + +func (m *CheckType) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *CheckType) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *CheckType) GetTCP() string { + if m != nil { + return m.TCP + } + return "" +} + +func (m *CheckType) GetInterval() *duration.Duration { + if m != nil { + return m.Interval + } + return nil +} + +func (m *CheckType) GetAliasNode() string { + if m != nil { + return m.AliasNode + } + return "" +} + +func (m *CheckType) GetAliasService() string { + if m != nil { + return m.AliasService + } + return "" +} + +func (m *CheckType) GetDockerContainerID() string { + if m != nil { + return m.DockerContainerID + } + return "" +} + +func (m *CheckType) GetShell() string { + if m != nil { + return m.Shell + } + return "" +} + +func (m *CheckType) GetH2PING() string { + if m != nil { + return m.H2PING + } + return "" +} + +func (m *CheckType) GetH2PingUseTLS() bool { + if m != nil { + return m.H2PingUseTLS + } + return false +} + +func (m *CheckType) GetGRPC() string { + if m != nil { + return m.GRPC + } + return "" +} + +func (m *CheckType) GetGRPCUseTLS() bool { + if m != nil { + return m.GRPCUseTLS + } + return false +} + +func (m *CheckType) GetTLSServerName() string { + if m != nil { + return m.TLSServerName + } + return "" +} + +func (m *CheckType) GetTLSSkipVerify() bool { + if m != nil { + return m.TLSSkipVerify + } + return false +} + +func (m *CheckType) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *CheckType) GetTTL() *duration.Duration { + if m != nil { + return m.TTL + } + return nil +} + +func (m *CheckType) GetSuccessBeforePassing() int32 { + if m != nil { + return m.SuccessBeforePassing + } + return 0 +} + +func (m *CheckType) GetFailuresBeforeWarning() int32 { + if m != nil { + return m.FailuresBeforeWarning + } + return 0 +} + +func (m *CheckType) GetFailuresBeforeCritical() int32 { + if m != nil { + return m.FailuresBeforeCritical + } + return 0 +} + +func (m *CheckType) GetProxyHTTP() string { + if m != nil { + return m.ProxyHTTP + } + return "" +} + +func (m *CheckType) GetProxyGRPC() string { + if m != nil { + return m.ProxyGRPC + } + return "" +} + +func (m *CheckType) GetDeregisterCriticalServiceAfter() *duration.Duration { + if m != nil { + return m.DeregisterCriticalServiceAfter + } + return nil +} + +func (m *CheckType) GetOutputMaxSize() int32 { + if m != nil { + return m.OutputMaxSize + } + return 0 +} + func init() { proto.RegisterType((*HealthCheck)(nil), "pbservice.HealthCheck") proto.RegisterType((*HeaderValue)(nil), "pbservice.HeaderValue") proto.RegisterType((*HealthCheckDefinition)(nil), "pbservice.HealthCheckDefinition") - proto.RegisterMapType((map[string]HeaderValue)(nil), "pbservice.HealthCheckDefinition.HeaderEntry") + proto.RegisterMapType((map[string]*HeaderValue)(nil), "pbservice.HealthCheckDefinition.HeaderEntry") proto.RegisterType((*CheckType)(nil), "pbservice.CheckType") - proto.RegisterMapType((map[string]HeaderValue)(nil), "pbservice.CheckType.HeaderEntry") + proto.RegisterMapType((map[string]*HeaderValue)(nil), "pbservice.CheckType.HeaderEntry") } -func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) } +func init() { + proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) +} var fileDescriptor_8a6f7448747c9fbe = []byte{ - // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x4e, 0xe3, 0xc6, - 0x17, 0x8f, 0x09, 0x49, 0xf0, 0x64, 0x61, 0x97, 0x59, 0xe0, 0x3f, 0x9b, 0xff, 0xd6, 0xa4, 0x74, - 0x2f, 0xa8, 0x4a, 0x1d, 0x95, 0x56, 0x55, 0x3f, 0xd4, 0x4a, 0x84, 0xb0, 0x90, 0x0a, 0x68, 0xea, - 0xa4, 0x5b, 0xa9, 0x77, 0xc6, 0x99, 0x38, 0x16, 0x89, 0x27, 0x1a, 0x8f, 0x11, 0xe9, 0x53, 0xf4, - 0xb2, 0x0f, 0xd0, 0x87, 0xe1, 0x92, 0xcb, 0x5e, 0xa1, 0x16, 0x9e, 0xa1, 0x37, 0xbd, 0xaa, 0xe6, - 0x8c, 0x9d, 0xd8, 0x1b, 0x2f, 0x64, 0xa5, 0xed, 0x55, 0xce, 0xf9, 0x9d, 0x8f, 0x19, 0x9f, 0x73, - 0x7e, 0x67, 0x82, 0xde, 0x1f, 0x71, 0x26, 0x58, 0x6d, 0x74, 0x16, 0x50, 0x7e, 0xe1, 0x39, 0xb4, - 0xd6, 0xa7, 0xf6, 0x40, 0xf4, 0x9d, 0x3e, 0x75, 0xce, 0x4d, 0xb0, 0x61, 0x7d, 0x62, 0xac, 0x18, - 0x2e, 0x63, 0xee, 0x80, 0xd6, 0xc0, 0x70, 0x16, 0xf6, 0x6a, 0xdd, 0x90, 0xdb, 0xc2, 0x63, 0xbe, - 0x72, 0xad, 0x6c, 0xc6, 0xd9, 0x1c, 0x36, 0x1c, 0x32, 0xdf, 0x65, 0x2e, 0xab, 0x29, 0x31, 0x72, - 0x58, 0x93, 0x90, 0x72, 0x92, 0x92, 0x42, 0xb7, 0xee, 0x16, 0x51, 0xf9, 0x08, 0xce, 0xdd, 0x97, - 0xe7, 0x62, 0x8c, 0x16, 0x4f, 0x59, 0x97, 0x12, 0xad, 0xaa, 0x6d, 0xeb, 0x16, 0xc8, 0xf8, 0x10, - 0x95, 0xc0, 0xd8, 0x6c, 0x90, 0x05, 0x09, 0xd7, 0x3f, 0xfe, 0xe7, 0x66, 0xf3, 0x43, 0xd7, 0x13, - 0xfd, 0xf0, 0xcc, 0x74, 0xd8, 0xb0, 0xd6, 0xb7, 0x83, 0xbe, 0xe7, 0x30, 0x3e, 0xaa, 0x39, 0xcc, - 0x0f, 0xc2, 0x41, 0x4d, 0x8c, 0x47, 0x34, 0x30, 0xa3, 0x20, 0x2b, 0x8e, 0x86, 0xe4, 0xf6, 0x90, - 0x92, 0x7c, 0x94, 0xdc, 0x1e, 0x52, 0xbc, 0x81, 0x8a, 0x6d, 0x61, 0x8b, 0x30, 0x20, 0x8b, 0x80, - 0x46, 0x1a, 0x5e, 0x43, 0x85, 0x53, 0x26, 0x68, 0x40, 0x0a, 0x00, 0x2b, 0x45, 0x7a, 0x7f, 0x1f, - 0x8a, 0x51, 0x28, 0x48, 0x51, 0x79, 0x2b, 0x0d, 0x3f, 0x47, 0x7a, 0x5b, 0x15, 0xaa, 0xd9, 0x20, - 0x25, 0x30, 0x4d, 0x01, 0x5c, 0x45, 0xe5, 0x48, 0x81, 0xe3, 0x97, 0xc0, 0x9e, 0x84, 0x12, 0x1e, - 0x1d, 0xdb, 0x0d, 0x88, 0x5e, 0xcd, 0x27, 0x3c, 0x24, 0x24, 0xef, 0xde, 0x19, 0x8f, 0x28, 0x79, - 0xa4, 0xee, 0x2e, 0x65, 0xfc, 0x12, 0xa1, 0x06, 0xed, 0x79, 0xbe, 0x27, 0xfb, 0x40, 0x50, 0x55, - 0xdb, 0x2e, 0xef, 0x56, 0xcd, 0x49, 0xcf, 0xcc, 0x44, 0x61, 0xa7, 0x7e, 0xf5, 0xc5, 0xab, 0x9b, - 0xcd, 0x9c, 0x95, 0x88, 0xc4, 0xdf, 0x20, 0xdd, 0xb2, 0x7b, 0xa2, 0xe9, 0x77, 0xe9, 0x25, 0x29, - 0x43, 0x9a, 0x75, 0x73, 0xda, 0x47, 0x73, 0x62, 0xac, 0x2f, 0xc9, 0xd8, 0xeb, 0x9b, 0x4d, 0xcd, - 0x9a, 0x46, 0xe0, 0x23, 0xb4, 0x72, 0xe0, 0x0b, 0xca, 0x47, 0xdc, 0x0b, 0xe8, 0x09, 0x15, 0x36, - 0x59, 0x86, 0x1c, 0x95, 0x64, 0x8e, 0xb4, 0x47, 0x74, 0x89, 0xd7, 0xe2, 0x64, 0x19, 0x0e, 0x2e, - 0x47, 0x2c, 0xa0, 0xdd, 0x16, 0xe3, 0x82, 0xac, 0x54, 0xb5, 0xed, 0x82, 0x95, 0x84, 0x70, 0x05, - 0x2d, 0x35, 0x65, 0xcc, 0x85, 0x3d, 0x20, 0x8f, 0xa1, 0x14, 0x13, 0x1d, 0x13, 0x54, 0xea, 0x78, - 0x43, 0xca, 0x42, 0x41, 0x9e, 0x80, 0x29, 0x56, 0xb7, 0x3e, 0x80, 0x21, 0xeb, 0x52, 0xfe, 0xca, - 0x1e, 0x84, 0x54, 0xf6, 0x16, 0x04, 0xa2, 0x41, 0x9d, 0x95, 0xb2, 0xf5, 0x7b, 0x09, 0xad, 0x67, - 0x56, 0x4c, 0xd6, 0xfe, 0xa8, 0xd3, 0x69, 0xc5, 0x43, 0x29, 0x65, 0xfc, 0x02, 0x2d, 0x77, 0x8e, - 0xdb, 0xb2, 0x43, 0x94, 0x43, 0x57, 0x9f, 0x82, 0x31, 0x0d, 0xc6, 0x5e, 0xe7, 0xde, 0xe8, 0x15, - 0xe5, 0x5e, 0x6f, 0x0c, 0x03, 0xbc, 0x64, 0xa5, 0x41, 0xfc, 0x1d, 0x2a, 0xaa, 0xeb, 0x91, 0x7c, - 0x35, 0xbf, 0x5d, 0xde, 0xdd, 0x79, 0xa8, 0x87, 0xa6, 0x72, 0x3f, 0xf0, 0x05, 0x1f, 0x47, 0xa5, - 0x8c, 0x32, 0xc8, 0x09, 0x3d, 0xa1, 0xa2, 0xcf, 0xba, 0xf1, 0x3c, 0x2b, 0x4d, 0x7e, 0x43, 0x9d, - 0x75, 0xc7, 0x04, 0xab, 0x6f, 0x90, 0x32, 0x7e, 0x82, 0xf2, 0x9d, 0xfd, 0x56, 0x34, 0xe1, 0x52, - 0xc4, 0x5f, 0x27, 0xca, 0x5b, 0x84, 0x26, 0x3e, 0x33, 0x15, 0xf1, 0xcd, 0x98, 0xf8, 0x66, 0x23, - 0x22, 0x7e, 0x74, 0xf0, 0xb4, 0xfe, 0x2f, 0xd0, 0xb2, 0xa2, 0xc3, 0x89, 0x7d, 0xd9, 0xf6, 0x7e, - 0xa1, 0x44, 0xaf, 0x6a, 0xdb, 0xcb, 0x56, 0x1a, 0xc4, 0x5f, 0x4e, 0xbb, 0x54, 0x9a, 0xef, 0x84, - 0xd8, 0x1f, 0xbb, 0xc8, 0x68, 0x50, 0x4e, 0x5d, 0x2f, 0x10, 0x94, 0xef, 0x73, 0x4f, 0x78, 0x8e, - 0x3d, 0x88, 0x48, 0xb2, 0xd7, 0x13, 0x94, 0x03, 0xb5, 0xe6, 0xc8, 0xf8, 0x40, 0x1a, 0x6c, 0x20, - 0xd4, 0x76, 0xb8, 0x37, 0x12, 0x7b, 0xdc, 0x0d, 0x08, 0x82, 0x29, 0x49, 0x20, 0x78, 0x07, 0xad, - 0x36, 0x98, 0x73, 0x4e, 0xf9, 0x3e, 0xf3, 0x85, 0xed, 0xf9, 0x94, 0x37, 0x1b, 0x40, 0x1c, 0xdd, - 0x9a, 0x35, 0xc8, 0x71, 0x6b, 0xf7, 0xe9, 0x60, 0x10, 0x71, 0x57, 0x29, 0xb2, 0x51, 0x47, 0xbb, - 0xad, 0xe6, 0xe9, 0x21, 0x59, 0x53, 0x8d, 0x52, 0x1a, 0xde, 0x42, 0x8f, 0x8e, 0x76, 0x5b, 0x9e, - 0xef, 0xfe, 0x18, 0xd0, 0xce, 0x71, 0x9b, 0xac, 0xc3, 0xc4, 0xa4, 0x30, 0xd9, 0xcc, 0x43, 0xab, - 0xb5, 0x0f, 0x3c, 0xd3, 0x2d, 0x90, 0xe5, 0x9d, 0xe5, 0x6f, 0x14, 0xb5, 0x02, 0x51, 0x09, 0x44, - 0xae, 0xa8, 0xbd, 0x81, 0x67, 0x07, 0xb0, 0x5e, 0x15, 0x75, 0xa6, 0x80, 0x3c, 0x15, 0x94, 0xa8, - 0x0c, 0x11, 0x81, 0x52, 0x18, 0xfe, 0x04, 0xe5, 0x3b, 0x9d, 0x63, 0xb2, 0x3a, 0x5f, 0x8d, 0xa5, - 0x6f, 0xe5, 0x87, 0x98, 0x78, 0x30, 0xaa, 0x72, 0xe0, 0xce, 0xe9, 0x38, 0xe2, 0x91, 0x14, 0xf1, - 0x0e, 0x2a, 0x5c, 0x00, 0x15, 0x17, 0x20, 0xeb, 0x46, 0x7a, 0xf2, 0x63, 0xc6, 0x5a, 0xca, 0xe9, - 0xab, 0x85, 0x2f, 0xb4, 0xad, 0xbf, 0x75, 0xa4, 0x03, 0x1d, 0x60, 0x05, 0x26, 0xde, 0x06, 0xed, - 0x9d, 0xbc, 0x0d, 0x0b, 0x99, 0x6f, 0x43, 0x3e, 0xfb, 0x6d, 0x58, 0x4c, 0xbe, 0x0d, 0xe9, 0xa1, - 0x29, 0xcc, 0x0c, 0x4d, 0xbc, 0x45, 0x8a, 0x89, 0x2d, 0xf2, 0xed, 0x84, 0xf9, 0x6b, 0xc0, 0xfc, - 0xe4, 0xf6, 0x9e, 0x7c, 0xe4, 0x5c, 0x6c, 0x2f, 0x65, 0xb2, 0xbd, 0x32, 0xcb, 0xf6, 0xa5, 0x6c, - 0xb6, 0xeb, 0x6f, 0xcb, 0xf6, 0xd4, 0x3c, 0xa1, 0x87, 0xe6, 0xa9, 0x9c, 0x31, 0x4f, 0x99, 0x2c, - 0x7a, 0xf4, 0x20, 0x8b, 0x96, 0xb3, 0x59, 0xf4, 0xfc, 0x5e, 0x16, 0x19, 0xf7, 0xb0, 0x68, 0xe5, - 0x8d, 0x2c, 0x7a, 0x3c, 0xc3, 0xa2, 0x99, 0xb5, 0xff, 0xff, 0xb9, 0xd6, 0xfe, 0x93, 0xac, 0xb5, - 0x9f, 0xd8, 0x84, 0xab, 0x6f, 0xb9, 0x09, 0x23, 0x2a, 0xe2, 0xf9, 0xa9, 0x88, 0x77, 0xd1, 0x5a, - 0x3b, 0x74, 0x1c, 0x1a, 0x04, 0x75, 0xda, 0x63, 0x9c, 0xb6, 0xec, 0x20, 0xf0, 0x7c, 0x17, 0xf6, - 0x4b, 0xc1, 0xca, 0xb4, 0xe1, 0xcf, 0xd0, 0xfa, 0x4b, 0xdb, 0x1b, 0x84, 0x9c, 0x46, 0x86, 0x9f, - 0x6c, 0xee, 0xcb, 0xa0, 0xf7, 0x20, 0x28, 0xdb, 0x88, 0x3f, 0x47, 0x1b, 0x69, 0x43, 0xbc, 0x63, - 0xc9, 0x06, 0x84, 0xbd, 0xc1, 0x2a, 0x27, 0xaa, 0xc5, 0xd9, 0xe5, 0x18, 0x58, 0xf2, 0x3f, 0x35, - 0x51, 0x13, 0x60, 0x62, 0x85, 0x96, 0x91, 0x84, 0x15, 0xfa, 0xf6, 0xf0, 0xd3, 0xf0, 0xf4, 0xdd, - 0x3c, 0x0d, 0x33, 0x8f, 0xdc, 0x33, 0xf8, 0xa6, 0x34, 0xf8, 0x1f, 0xec, 0xbd, 0xfa, 0xc9, 0xd5, - 0x5f, 0x46, 0xee, 0xea, 0xd6, 0xd0, 0xae, 0x6f, 0x0d, 0xed, 0xcf, 0x5b, 0x43, 0xfb, 0xf5, 0xce, - 0xc8, 0xfd, 0x76, 0x67, 0xe4, 0xae, 0xef, 0x8c, 0xdc, 0x1f, 0x77, 0x46, 0xee, 0xe7, 0x8f, 0xee, - 0x5b, 0x7b, 0xaf, 0xfd, 0xd9, 0x3f, 0x2b, 0x02, 0xf0, 0xe9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x87, 0x18, 0xc2, 0xd7, 0x06, 0x0c, 0x00, 0x00, + // 994 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xd1, 0x6e, 0x22, 0x37, + 0x14, 0x15, 0x21, 0x40, 0xc6, 0x84, 0x6c, 0xe2, 0x4d, 0x52, 0x2f, 0xbb, 0x8d, 0x28, 0xdd, 0x07, + 0xa4, 0x4d, 0x41, 0x62, 0xdb, 0x6a, 0xd5, 0x87, 0xaa, 0x09, 0xa4, 0x1b, 0xa4, 0x24, 0xa5, 0xc3, + 0x74, 0x2b, 0xf5, 0xcd, 0x19, 0x0c, 0x58, 0x19, 0xc6, 0x23, 0x8f, 0x27, 0x0a, 0xfd, 0x80, 0x7e, + 0x42, 0xbf, 0xa1, 0x9f, 0x59, 0xf9, 0x7a, 0x06, 0x66, 0x96, 0x49, 0x88, 0x54, 0xf5, 0x09, 0xdf, + 0x7b, 0xae, 0x8d, 0x7d, 0xef, 0x39, 0x07, 0xd0, 0x57, 0x81, 0x14, 0x4a, 0x74, 0x82, 0xdb, 0x90, + 0xc9, 0x7b, 0xee, 0xb2, 0xce, 0x8c, 0x51, 0x4f, 0xcd, 0xdc, 0x19, 0x73, 0xef, 0xda, 0x80, 0x61, + 0x6b, 0x09, 0xd6, 0x4f, 0xa6, 0x42, 0x4c, 0x3d, 0xd6, 0x01, 0xe0, 0x36, 0x9a, 0x74, 0xc6, 0x91, + 0xa4, 0x8a, 0x0b, 0xdf, 0x94, 0xd6, 0x5f, 0x27, 0xa7, 0xb9, 0x62, 0x3e, 0x17, 0x7e, 0xc7, 0x7c, + 0x18, 0xb0, 0xf9, 0xf7, 0x36, 0xaa, 0x5e, 0xc2, 0xe9, 0x3d, 0x7d, 0x3a, 0xc6, 0x68, 0xfb, 0x46, + 0x8c, 0x19, 0x29, 0x34, 0x0a, 0x2d, 0xcb, 0x86, 0x35, 0x26, 0xa8, 0x02, 0xe0, 0xa0, 0x4f, 0xb6, + 0x20, 0x9d, 0x84, 0x50, 0x4d, 0xe7, 0x8c, 0x14, 0xe3, 0x6a, 0x3a, 0x67, 0xf8, 0x18, 0x95, 0x47, + 0x8a, 0xaa, 0x28, 0x24, 0xdb, 0x90, 0x8d, 0x23, 0x7c, 0x88, 0x4a, 0x37, 0x42, 0xb1, 0x90, 0x94, + 0x20, 0x6d, 0x02, 0x5d, 0xfd, 0x4b, 0xa4, 0x82, 0x48, 0x91, 0xb2, 0xa9, 0x36, 0x11, 0x7e, 0x83, + 0xac, 0x91, 0x79, 0xdf, 0xa0, 0x4f, 0x2a, 0x00, 0xad, 0x12, 0xb8, 0x81, 0xaa, 0x71, 0x00, 0x5f, + 0xbf, 0x03, 0x78, 0x3a, 0x95, 0xaa, 0x70, 0xe8, 0x34, 0x24, 0x56, 0xa3, 0x98, 0xaa, 0xd0, 0x29, + 0x7d, 0x77, 0x67, 0x11, 0x30, 0xb2, 0x6b, 0xee, 0xae, 0xd7, 0xf8, 0x27, 0x84, 0xfa, 0x6c, 0xc2, + 0x7d, 0xae, 0xdb, 0x47, 0x50, 0xa3, 0xd0, 0xaa, 0x76, 0x1b, 0xed, 0x65, 0xab, 0xdb, 0xa9, 0x4e, + 0xad, 0xea, 0xec, 0xd4, 0x1e, 0xdc, 0x41, 0x96, 0x4d, 0x27, 0x6a, 0xe0, 0x8f, 0xd9, 0x03, 0xa9, + 0xc2, 0x01, 0x07, 0xed, 0xb8, 0xe3, 0x4b, 0xc0, 0x5e, 0xd5, 0xe0, 0x1f, 0xd1, 0xde, 0x85, 0xaf, + 0x98, 0x0c, 0x24, 0x0f, 0xd9, 0x35, 0x53, 0x94, 0xd4, 0x60, 0xd7, 0x71, 0xb2, 0x2b, 0x8b, 0xda, + 0x9f, 0x55, 0xeb, 0x87, 0x5e, 0x3c, 0x04, 0x22, 0x64, 0xe3, 0xa1, 0x90, 0x8a, 0xec, 0x35, 0x0a, + 0xad, 0x92, 0x9d, 0x4e, 0xe1, 0x3a, 0xda, 0x19, 0xe8, 0x3d, 0xf7, 0xd4, 0x23, 0x2f, 0xe0, 0xb1, + 0xcb, 0x58, 0x8f, 0xd6, 0xe1, 0x73, 0x26, 0x22, 0x45, 0xf6, 0xcd, 0x68, 0xe3, 0xb0, 0xf9, 0x35, + 0xf0, 0x62, 0xcc, 0xe4, 0x27, 0xea, 0x45, 0x4c, 0x4f, 0x0f, 0x16, 0xa4, 0x00, 0x9d, 0x34, 0x41, + 0xf3, 0xaf, 0x0a, 0x3a, 0xca, 0xed, 0x89, 0xee, 0xee, 0xa5, 0xe3, 0x0c, 0x13, 0x1e, 0xe9, 0x35, + 0x7e, 0x8b, 0x6a, 0xce, 0xd5, 0x48, 0xcf, 0x80, 0x49, 0x98, 0xdb, 0x4b, 0x00, 0xb3, 0xc9, 0xa4, + 0xea, 0x8e, 0x07, 0x9f, 0x98, 0xe4, 0x93, 0x05, 0x70, 0x6e, 0xc7, 0xce, 0x26, 0x71, 0x1f, 0x95, + 0xcd, 0xf5, 0x48, 0xb1, 0x51, 0x6c, 0x55, 0xbb, 0xa7, 0x9b, 0xa6, 0xd4, 0x36, 0xe5, 0x17, 0xbe, + 0x92, 0x0b, 0x3b, 0xde, 0xab, 0xd9, 0x77, 0xcd, 0xd4, 0x4c, 0x8c, 0x13, 0xae, 0x9a, 0x48, 0xdf, + 0xfe, 0x5c, 0x8c, 0x17, 0x04, 0x9b, 0xdb, 0xeb, 0x35, 0xde, 0x47, 0x45, 0xa7, 0x37, 0x8c, 0xd9, + 0xab, 0x97, 0xf8, 0xbb, 0x54, 0x63, 0xcb, 0x30, 0xb4, 0x57, 0x6d, 0xa3, 0xc5, 0x76, 0xa2, 0xc5, + 0x76, 0x3f, 0xd6, 0x62, 0xaa, 0xe7, 0x6f, 0x51, 0xcd, 0x90, 0xfc, 0x9a, 0x3e, 0x8c, 0xf8, 0x9f, + 0x8c, 0x58, 0x8d, 0x42, 0xab, 0x66, 0x67, 0x93, 0xf8, 0xfd, 0x6a, 0x32, 0x95, 0x4d, 0x67, 0x27, + 0x95, 0x98, 0xa2, 0x93, 0x3e, 0x93, 0x6c, 0xca, 0x43, 0xc5, 0x64, 0x4f, 0x72, 0xc5, 0x5d, 0xea, + 0xc5, 0xa4, 0x3f, 0x9b, 0x28, 0x26, 0x41, 0x2a, 0x4f, 0x9e, 0xb5, 0xe1, 0x00, 0x7c, 0x82, 0xd0, + 0xc8, 0x95, 0x3c, 0x50, 0x67, 0x72, 0x1a, 0x12, 0x04, 0x6c, 0x48, 0x65, 0xf0, 0x29, 0x3a, 0xe8, + 0x0b, 0xf7, 0x8e, 0xc9, 0x9e, 0xf0, 0x15, 0xe5, 0x3e, 0x93, 0x83, 0x3e, 0x08, 0xc1, 0xb2, 0xd7, + 0x01, 0x4d, 0xab, 0xd1, 0x8c, 0x79, 0x5e, 0xac, 0x42, 0x13, 0xe8, 0xb1, 0x5c, 0x76, 0x87, 0x83, + 0x9b, 0x8f, 0xe4, 0xd0, 0x8c, 0xc5, 0x44, 0xb8, 0x89, 0x76, 0x2f, 0xbb, 0x43, 0xee, 0x4f, 0x7f, + 0x0b, 0x99, 0x73, 0x35, 0x22, 0x47, 0xc0, 0x8c, 0x4c, 0x4e, 0x8f, 0xee, 0xa3, 0x3d, 0xec, 0x81, + 0x8a, 0x2c, 0x1b, 0xd6, 0xfa, 0xce, 0xfa, 0x33, 0xde, 0xb5, 0x07, 0xbb, 0x52, 0x19, 0x6d, 0x36, + 0x67, 0x1e, 0xa7, 0x21, 0x38, 0x9f, 0x91, 0xc8, 0x2a, 0xa1, 0xbf, 0x15, 0x82, 0xb8, 0x0d, 0xb1, + 0x50, 0x32, 0x39, 0xfc, 0x0e, 0x15, 0x1d, 0xe7, 0x8a, 0x1c, 0x6c, 0xea, 0xae, 0xae, 0xaa, 0xff, + 0x9a, 0x48, 0x0b, 0xc8, 0xa8, 0x89, 0x75, 0xc7, 0x16, 0xb1, 0x52, 0xf4, 0x12, 0x9f, 0xa2, 0xd2, + 0x3d, 0x88, 0x6d, 0x2b, 0xb6, 0x82, 0x0c, 0xb7, 0x13, 0x4d, 0xda, 0xa6, 0xe8, 0x87, 0xad, 0x0f, + 0x85, 0xe6, 0x3f, 0x16, 0xb2, 0x80, 0xf0, 0x60, 0x63, 0x29, 0xc3, 0x2e, 0xe4, 0x1b, 0xf6, 0x56, + 0xae, 0x61, 0x17, 0xf3, 0x0d, 0x7b, 0x3b, 0x6d, 0xd8, 0xd9, 0xf9, 0x97, 0xd6, 0xe6, 0x9f, 0x08, + 0xbf, 0x9c, 0x12, 0xfe, 0x87, 0xa5, 0x58, 0x0f, 0x41, 0xac, 0x69, 0x4b, 0x5d, 0xde, 0x7a, 0x83, + 0x40, 0x2b, 0xb9, 0x02, 0xad, 0xaf, 0x0b, 0x74, 0x27, 0x5f, 0xa0, 0xd6, 0xf3, 0x05, 0x9a, 0xa1, + 0x03, 0xda, 0x44, 0x87, 0x6a, 0x0e, 0x1d, 0x72, 0x45, 0xb0, 0xbb, 0x51, 0x04, 0xb5, 0x7c, 0x11, + 0xbc, 0x79, 0x52, 0x04, 0x27, 0x4f, 0x88, 0x60, 0xef, 0x51, 0x11, 0xbc, 0x58, 0x13, 0xc1, 0x9a, + 0x3b, 0xbf, 0x7e, 0x96, 0x3b, 0xef, 0xe7, 0xb9, 0x73, 0xca, 0xbc, 0x0e, 0x9e, 0x6d, 0x5e, 0xb1, + 0x86, 0xf0, 0x73, 0x34, 0x84, 0xbb, 0xe8, 0x70, 0x14, 0xb9, 0x2e, 0x0b, 0xc3, 0x73, 0x36, 0x11, + 0x92, 0x0d, 0x69, 0x18, 0x72, 0x7f, 0x0a, 0x96, 0x50, 0xb2, 0x73, 0x31, 0xfc, 0x2d, 0x3a, 0xfa, + 0x99, 0x72, 0x2f, 0x92, 0x2c, 0x06, 0x7e, 0xa7, 0xd2, 0xd7, 0x9b, 0xbe, 0x84, 0x4d, 0xf9, 0x20, + 0xfe, 0x1e, 0x1d, 0x67, 0x81, 0xc4, 0x16, 0xc9, 0x31, 0x6c, 0x7b, 0x04, 0xd5, 0x2c, 0x1a, 0x4a, + 0xf1, 0xb0, 0x00, 0x35, 0x7c, 0x61, 0x58, 0xb4, 0x4c, 0x2c, 0x51, 0x18, 0x13, 0x49, 0xa1, 0x30, + 0xab, 0xcd, 0x3e, 0xfe, 0xf2, 0xbf, 0xfa, 0xf8, 0xda, 0xaf, 0xd0, 0x2b, 0x78, 0x4d, 0x36, 0xf9, + 0x3f, 0x58, 0xd5, 0xf9, 0x37, 0x7f, 0xbc, 0x9b, 0x72, 0x35, 0x8b, 0x6e, 0xf5, 0x1f, 0x9c, 0xce, + 0x8c, 0x86, 0x33, 0xee, 0x0a, 0x19, 0x74, 0x5c, 0xe1, 0x87, 0x91, 0xd7, 0xf9, 0xec, 0xaf, 0xef, + 0x6d, 0x19, 0x12, 0xef, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x33, 0x6c, 0x46, 0x9d, 0x14, 0x0b, + 0x00, 0x00, } - -func (m *HealthCheck) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HealthCheck) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HealthCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeout) > 0 { - i -= len(m.Timeout) - copy(dAtA[i:], m.Timeout) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Timeout))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if len(m.Interval) > 0 { - i -= len(m.Interval) - copy(dAtA[i:], m.Interval) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Interval))) - i-- - dAtA[i] = 0x7a - } - if m.ExposedPort != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.ExposedPort)) - i-- - dAtA[i] = 0x70 - } - { - size, err := m.EnterpriseMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x62 - } - { - size, err := m.RaftIndex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - if len(m.ServiceTags) > 0 { - for iNdEx := len(m.ServiceTags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ServiceTags[iNdEx]) - copy(dAtA[i:], m.ServiceTags[iNdEx]) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ServiceTags[iNdEx]))) - i-- - dAtA[i] = 0x4a - } - } - if len(m.ServiceName) > 0 { - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0x42 - } - if len(m.ServiceID) > 0 { - i -= len(m.ServiceID) - copy(dAtA[i:], m.ServiceID) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ServiceID))) - i-- - dAtA[i] = 0x3a - } - if len(m.Output) > 0 { - i -= len(m.Output) - copy(dAtA[i:], m.Output) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Output))) - i-- - dAtA[i] = 0x32 - } - if len(m.Notes) > 0 { - i -= len(m.Notes) - copy(dAtA[i:], m.Notes) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Notes))) - i-- - dAtA[i] = 0x2a - } - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x22 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if len(m.CheckID) > 0 { - i -= len(m.CheckID) - copy(dAtA[i:], m.CheckID) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.CheckID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Node) > 0 { - i -= len(m.Node) - copy(dAtA[i:], m.Node) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Node))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HeaderValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HeaderValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HeaderValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *HealthCheckDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HealthCheckDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HealthCheckDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.H2PingUseTLS { - i-- - if m.H2PingUseTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if len(m.H2PING) > 0 { - i -= len(m.H2PING) - copy(dAtA[i:], m.H2PING) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.H2PING))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - if len(m.TLSServerName) > 0 { - i -= len(m.TLSServerName) - copy(dAtA[i:], m.TLSServerName) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - if len(m.Body) > 0 { - i -= len(m.Body) - copy(dAtA[i:], m.Body) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Body))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - if len(m.AliasService) > 0 { - i -= len(m.AliasService) - copy(dAtA[i:], m.AliasService) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.AliasService))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if len(m.AliasNode) > 0 { - i -= len(m.AliasNode) - copy(dAtA[i:], m.AliasNode) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.AliasNode))) - i-- - dAtA[i] = 0x7a - } - if m.GRPCUseTLS { - i-- - if m.GRPCUseTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - } - if len(m.GRPC) > 0 { - i -= len(m.GRPC) - copy(dAtA[i:], m.GRPC) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.GRPC))) - i-- - dAtA[i] = 0x6a - } - if len(m.Shell) > 0 { - i -= len(m.Shell) - copy(dAtA[i:], m.Shell) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Shell))) - i-- - dAtA[i] = 0x62 - } - if len(m.DockerContainerID) > 0 { - i -= len(m.DockerContainerID) - copy(dAtA[i:], m.DockerContainerID) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.DockerContainerID))) - i-- - dAtA[i] = 0x5a - } - if len(m.ScriptArgs) > 0 { - for iNdEx := len(m.ScriptArgs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ScriptArgs[iNdEx]) - copy(dAtA[i:], m.ScriptArgs[iNdEx]) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ScriptArgs[iNdEx]))) - i-- - dAtA[i] = 0x52 - } - } - if m.OutputMaxSize != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.OutputMaxSize)) - i-- - dAtA[i] = 0x48 - } - { - size, err := m.DeregisterCriticalServiceAfter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - { - size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.Interval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - if len(m.TCP) > 0 { - i -= len(m.TCP) - copy(dAtA[i:], m.TCP) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TCP))) - i-- - dAtA[i] = 0x2a - } - if len(m.Method) > 0 { - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x22 - } - if len(m.Header) > 0 { - for k := range m.Header { - v := m.Header[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintHealthcheck(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if m.TLSSkipVerify { - i-- - if m.TLSSkipVerify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.HTTP) > 0 { - i -= len(m.HTTP) - copy(dAtA[i:], m.HTTP) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.HTTP))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CheckType) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckType) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckType) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.H2PingUseTLS { - i-- - if m.H2PingUseTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.FailuresBeforeWarning != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.FailuresBeforeWarning)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe8 - } - if len(m.H2PING) > 0 { - i -= len(m.H2PING) - copy(dAtA[i:], m.H2PING) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.H2PING))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 - } - if len(m.TLSServerName) > 0 { - i -= len(m.TLSServerName) - copy(dAtA[i:], m.TLSServerName) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - } - if len(m.Body) > 0 { - i -= len(m.Body) - copy(dAtA[i:], m.Body) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Body))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - if m.OutputMaxSize != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.OutputMaxSize)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - if len(m.ProxyGRPC) > 0 { - i -= len(m.ProxyGRPC) - copy(dAtA[i:], m.ProxyGRPC) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ProxyGRPC))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.ProxyHTTP) > 0 { - i -= len(m.ProxyHTTP) - copy(dAtA[i:], m.ProxyHTTP) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ProxyHTTP))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - if m.FailuresBeforeCritical != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.FailuresBeforeCritical)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.SuccessBeforePassing != 0 { - i = encodeVarintHealthcheck(dAtA, i, uint64(m.SuccessBeforePassing)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if len(m.Header) > 0 { - for k := range m.Header { - v := m.Header[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintHealthcheck(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - { - size, err := m.DeregisterCriticalServiceAfter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - { - size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - if m.TLSSkipVerify { - i-- - if m.TLSSkipVerify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.GRPCUseTLS { - i-- - if m.GRPCUseTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 - } - if len(m.GRPC) > 0 { - i -= len(m.GRPC) - copy(dAtA[i:], m.GRPC) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.GRPC))) - i-- - dAtA[i] = 0x72 - } - if len(m.Shell) > 0 { - i -= len(m.Shell) - copy(dAtA[i:], m.Shell) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Shell))) - i-- - dAtA[i] = 0x6a - } - if len(m.DockerContainerID) > 0 { - i -= len(m.DockerContainerID) - copy(dAtA[i:], m.DockerContainerID) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.DockerContainerID))) - i-- - dAtA[i] = 0x62 - } - if len(m.AliasService) > 0 { - i -= len(m.AliasService) - copy(dAtA[i:], m.AliasService) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.AliasService))) - i-- - dAtA[i] = 0x5a - } - if len(m.AliasNode) > 0 { - i -= len(m.AliasNode) - copy(dAtA[i:], m.AliasNode) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.AliasNode))) - i-- - dAtA[i] = 0x52 - } - { - size, err := m.Interval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintHealthcheck(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - if len(m.TCP) > 0 { - i -= len(m.TCP) - copy(dAtA[i:], m.TCP) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TCP))) - i-- - dAtA[i] = 0x42 - } - if len(m.Method) > 0 { - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x3a - } - if len(m.HTTP) > 0 { - i -= len(m.HTTP) - copy(dAtA[i:], m.HTTP) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.HTTP))) - i-- - dAtA[i] = 0x32 - } - if len(m.ScriptArgs) > 0 { - for iNdEx := len(m.ScriptArgs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ScriptArgs[iNdEx]) - copy(dAtA[i:], m.ScriptArgs[iNdEx]) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.ScriptArgs[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Notes) > 0 { - i -= len(m.Notes) - copy(dAtA[i:], m.Notes) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Notes))) - i-- - dAtA[i] = 0x22 - } - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.CheckID) > 0 { - i -= len(m.CheckID) - copy(dAtA[i:], m.CheckID) - i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.CheckID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintHealthcheck(dAtA []byte, offset int, v uint64) int { - offset -= sovHealthcheck(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *HealthCheck) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Node) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.CheckID) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Status) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Notes) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Output) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.ServiceID) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.ServiceName) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - if len(m.ServiceTags) > 0 { - for _, s := range m.ServiceTags { - l = len(s) - n += 1 + l + sovHealthcheck(uint64(l)) - } - } - l = m.Definition.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - l = m.RaftIndex.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - l = len(m.Type) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = m.EnterpriseMeta.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - if m.ExposedPort != 0 { - n += 1 + sovHealthcheck(uint64(m.ExposedPort)) - } - l = len(m.Interval) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Timeout) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - return n -} - -func (m *HeaderValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovHealthcheck(uint64(l)) - } - } - return n -} - -func (m *HealthCheckDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HTTP) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - if m.TLSSkipVerify { - n += 2 - } - if len(m.Header) > 0 { - for k, v := range m.Header { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovHealthcheck(uint64(len(k))) + 1 + l + sovHealthcheck(uint64(l)) - n += mapEntrySize + 1 + sovHealthcheck(uint64(mapEntrySize)) - } - } - l = len(m.Method) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.TCP) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = m.Interval.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - l = m.Timeout.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - l = m.DeregisterCriticalServiceAfter.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - if m.OutputMaxSize != 0 { - n += 1 + sovHealthcheck(uint64(m.OutputMaxSize)) - } - if len(m.ScriptArgs) > 0 { - for _, s := range m.ScriptArgs { - l = len(s) - n += 1 + l + sovHealthcheck(uint64(l)) - } - } - l = len(m.DockerContainerID) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Shell) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.GRPC) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - if m.GRPCUseTLS { - n += 2 - } - l = len(m.AliasNode) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.AliasService) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = m.TTL.Size() - n += 2 + l + sovHealthcheck(uint64(l)) - l = len(m.Body) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = len(m.TLSServerName) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = len(m.H2PING) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - if m.H2PingUseTLS { - n += 3 - } - return n -} - -func (m *CheckType) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.CheckID) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Status) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Notes) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - if len(m.ScriptArgs) > 0 { - for _, s := range m.ScriptArgs { - l = len(s) - n += 1 + l + sovHealthcheck(uint64(l)) - } - } - l = len(m.HTTP) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Method) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.TCP) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = m.Interval.Size() - n += 1 + l + sovHealthcheck(uint64(l)) - l = len(m.AliasNode) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.AliasService) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.DockerContainerID) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.Shell) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - l = len(m.GRPC) - if l > 0 { - n += 1 + l + sovHealthcheck(uint64(l)) - } - if m.GRPCUseTLS { - n += 2 - } - if m.TLSSkipVerify { - n += 3 - } - l = m.Timeout.Size() - n += 2 + l + sovHealthcheck(uint64(l)) - l = m.TTL.Size() - n += 2 + l + sovHealthcheck(uint64(l)) - l = m.DeregisterCriticalServiceAfter.Size() - n += 2 + l + sovHealthcheck(uint64(l)) - if len(m.Header) > 0 { - for k, v := range m.Header { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovHealthcheck(uint64(len(k))) + 1 + l + sovHealthcheck(uint64(l)) - n += mapEntrySize + 2 + sovHealthcheck(uint64(mapEntrySize)) - } - } - if m.SuccessBeforePassing != 0 { - n += 2 + sovHealthcheck(uint64(m.SuccessBeforePassing)) - } - if m.FailuresBeforeCritical != 0 { - n += 2 + sovHealthcheck(uint64(m.FailuresBeforeCritical)) - } - l = len(m.ProxyHTTP) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = len(m.ProxyGRPC) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - if m.OutputMaxSize != 0 { - n += 2 + sovHealthcheck(uint64(m.OutputMaxSize)) - } - l = len(m.Body) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = len(m.TLSServerName) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - l = len(m.H2PING) - if l > 0 { - n += 2 + l + sovHealthcheck(uint64(l)) - } - if m.FailuresBeforeWarning != 0 { - n += 2 + sovHealthcheck(uint64(m.FailuresBeforeWarning)) - } - if m.H2PingUseTLS { - n += 3 - } - return n -} - -func sovHealthcheck(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHealthcheck(x uint64) (n int) { - return sovHealthcheck(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *HealthCheck) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HealthCheck: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HealthCheck: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Node = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CheckID = github_com_hashicorp_consul_types.CheckID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Notes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Notes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Output = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceTags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceTags = append(m.ServiceTags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnterpriseMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EnterpriseMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExposedPort", wireType) - } - m.ExposedPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExposedPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Interval = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HeaderValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HeaderValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HeaderValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HealthCheckDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HealthCheckDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HealthCheckDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HTTP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSSkipVerify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TLSSkipVerify = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = make(map[string]HeaderValue) - } - var mapkey string - mapvalue := &HeaderValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthHealthcheck - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthHealthcheck - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthHealthcheck - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &HeaderValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Header[mapkey] = *mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TCP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeregisterCriticalServiceAfter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeregisterCriticalServiceAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OutputMaxSize", wireType) - } - m.OutputMaxSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OutputMaxSize |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScriptArgs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScriptArgs = append(m.ScriptArgs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shell", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GRPC", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GRPC = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GRPCUseTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.GRPCUseTLS = bool(v != 0) - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AliasNode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AliasNode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AliasService", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AliasService = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TTL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Body = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLSServerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field H2PING", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.H2PING = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field H2PingUseTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.H2PingUseTLS = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckType) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckType: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckType: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CheckID = github_com_hashicorp_consul_types.CheckID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Notes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Notes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScriptArgs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScriptArgs = append(m.ScriptArgs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HTTP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TCP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AliasNode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AliasNode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AliasService", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AliasService = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shell", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GRPC", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GRPC = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GRPCUseTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.GRPCUseTLS = bool(v != 0) - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSSkipVerify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TLSSkipVerify = bool(v != 0) - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TTL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeregisterCriticalServiceAfter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeregisterCriticalServiceAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = make(map[string]HeaderValue) - } - var mapkey string - mapvalue := &HeaderValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthHealthcheck - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthHealthcheck - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthHealthcheck - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &HeaderValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Header[mapkey] = *mapvalue - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessBeforePassing", wireType) - } - m.SuccessBeforePassing = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SuccessBeforePassing |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailuresBeforeCritical", wireType) - } - m.FailuresBeforeCritical = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FailuresBeforeCritical |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProxyHTTP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProxyHTTP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProxyGRPC", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProxyGRPC = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OutputMaxSize", wireType) - } - m.OutputMaxSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OutputMaxSize |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Body = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLSServerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field H2PING", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHealthcheck - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHealthcheck - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.H2PING = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailuresBeforeWarning", wireType) - } - m.FailuresBeforeWarning = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FailuresBeforeWarning |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field H2PingUseTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.H2PingUseTLS = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipHealthcheck(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHealthcheck - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHealthcheck(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHealthcheck - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthHealthcheck - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupHealthcheck - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthHealthcheck - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthHealthcheck = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHealthcheck = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupHealthcheck = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbservice/healthcheck.proto b/proto/pbservice/healthcheck.proto index 15a5d3dce..6d709d874 100644 --- a/proto/pbservice/healthcheck.proto +++ b/proto/pbservice/healthcheck.proto @@ -5,15 +5,7 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; import "google/protobuf/duration.proto"; -import "proto/pbcommongogo/common.proto"; - -// This fake import path is replaced by the build script with a versioned path -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; +import "proto/pbcommon/common.proto"; // HealthCheck represents a single check on a given node // @@ -24,7 +16,8 @@ option (gogoproto.goproto_sizecache_all) = false; // name=Structs message HealthCheck { string Node = 1; - string CheckID = 2 [(gogoproto.casttype) = "github.com/hashicorp/consul/types.CheckID"]; + // mog: func-to=CheckIDType func-from=string + string CheckID = 2; string Name = 3; string Status = 4; // The current check status string Notes = 5; // Additional notes with the status @@ -34,13 +27,13 @@ message HealthCheck { repeated string ServiceTags = 9; // optional service tags string Type = 12; // Check type: http/ttl/tcp/etc - HealthCheckDefinition Definition = 10 [(gogoproto.nullable) = false]; + HealthCheckDefinition Definition = 10; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - commongogo.RaftIndex RaftIndex = 11 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + common.RaftIndex RaftIndex = 11; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - commongogo.EnterpriseMeta EnterpriseMeta = 13 [(gogoproto.nullable) = false]; + common.EnterpriseMeta EnterpriseMeta = 13; // mog: func-to=int func-from=int32 int32 ExposedPort = 14; @@ -66,19 +59,19 @@ message HealthCheckDefinition { bool TLSSkipVerify = 2; // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - map Header = 3 [(gogoproto.nullable) = false]; + map Header = 3; string Method = 4; string Body = 18; string TCP = 5; - google.protobuf.Duration Interval = 6 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration Interval = 6; // mog: func-to=uint func-from=uint32 uint32 OutputMaxSize = 9; - google.protobuf.Duration Timeout = 7 - [(gogoproto.nullable) = false]; - google.protobuf.Duration DeregisterCriticalServiceAfter = 8 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration Timeout = 7; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration DeregisterCriticalServiceAfter = 8; repeated string ScriptArgs = 10; string DockerContainerID = 11; string Shell = 12; @@ -88,8 +81,8 @@ message HealthCheckDefinition { bool GRPCUseTLS = 14; string AliasNode = 15; string AliasService = 16; - google.protobuf.Duration TTL = 17 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration TTL = 17; } // CheckType is used to create either the CheckMonitor or the CheckTTL. @@ -105,7 +98,8 @@ message HealthCheckDefinition { // output=healthcheck.gen.go // name=Structs message CheckType { - string CheckID = 1 [(gogoproto.casttype) = "github.com/hashicorp/consul/types.CheckID"]; + // mog: func-to=CheckIDType func-from=string + string CheckID = 1; string Name = 2; string Status = 3; string Notes = 4; @@ -113,12 +107,12 @@ message CheckType { repeated string ScriptArgs = 5; string HTTP = 6; // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - map Header = 20 [(gogoproto.nullable) = false]; + map Header = 20; string Method = 7; string Body = 26; string TCP = 8; - google.protobuf.Duration Interval = 9 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration Interval = 9; string AliasNode = 10; string AliasService = 11; @@ -130,10 +124,10 @@ message CheckType { bool GRPCUseTLS = 15; string TLSServerName = 27; bool TLSSkipVerify = 16; - google.protobuf.Duration Timeout = 17 - [(gogoproto.nullable) = false]; - google.protobuf.Duration TTL = 18 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration Timeout = 17; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration TTL = 18; // mog: func-to=int func-from=int32 int32 SuccessBeforePassing = 21; @@ -149,8 +143,8 @@ message CheckType { // DeregisterCriticalServiceAfter, if >0, will cause the associated // service, if any, to be deregistered if this check is critical for // longer than this duration. - google.protobuf.Duration DeregisterCriticalServiceAfter = 19 - [(gogoproto.nullable) = false]; + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + google.protobuf.Duration DeregisterCriticalServiceAfter = 19; // mog: func-to=int func-from=int32 int32 OutputMaxSize = 25; diff --git a/proto/pbservice/ids.go b/proto/pbservice/ids.go index 7fac22f42..ef46d3eaa 100644 --- a/proto/pbservice/ids.go +++ b/proto/pbservice/ids.go @@ -23,14 +23,22 @@ func (m *CheckServiceNode) UniqueID() string { case m.Node != nil: builder.WriteString(m.Node.Partition + "/") case m.Service != nil: - builder.WriteString(m.Service.EnterpriseMeta.Partition + "/") + partition := "" + if m.Service.EnterpriseMeta != nil { + partition = m.Service.EnterpriseMeta.Partition + } + builder.WriteString(partition + "/") } if m.Node != nil { builder.WriteString(m.Node.Node + "/") } if m.Service != nil { - builder.WriteString(m.Service.EnterpriseMeta.Namespace + "/") + namespace := "" + if m.Service.EnterpriseMeta != nil { + namespace = m.Service.EnterpriseMeta.Namespace + } + builder.WriteString(namespace + "/") builder.WriteString(m.Service.ID) } return builder.String() diff --git a/proto/pbservice/ids_test.go b/proto/pbservice/ids_test.go index 2856aa70a..2d534e902 100644 --- a/proto/pbservice/ids_test.go +++ b/proto/pbservice/ids_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" ) func TestCheckServiceNode_UniqueID(t *testing.T) { @@ -25,7 +25,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { Node: &Node{Node: "the-node-name"}, Service: &NodeService{ ID: "the-service-id", - EnterpriseMeta: pbcommongogo.EnterpriseMeta{Namespace: "the-namespace"}, + EnterpriseMeta: &pbcommon.EnterpriseMeta{Namespace: "the-namespace"}, }, }, expected: "/the-node-name/the-namespace/the-service-id", @@ -35,7 +35,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { csn: CheckServiceNode{ Service: &NodeService{ ID: "the-service-id", - EnterpriseMeta: pbcommongogo.EnterpriseMeta{Namespace: "the-namespace"}, + EnterpriseMeta: &pbcommon.EnterpriseMeta{Namespace: "the-namespace"}, }, }, expected: "/the-namespace/the-service-id", diff --git a/proto/pbservice/node.gen.go b/proto/pbservice/node.gen.go index 0d9b1f641..cadf2c7e9 100644 --- a/proto/pbservice/node.gen.go +++ b/proto/pbservice/node.gen.go @@ -2,11 +2,13 @@ package pbservice -import structs "github.com/hashicorp/consul/agent/structs" +import "github.com/hashicorp/consul/agent/structs" -func NodeToStructs(s Node) structs.Node { - var t structs.Node - t.ID = s.ID +func NodeToStructs(s *Node, t *structs.Node) { + if s == nil { + return + } + t.ID = NodeIDType(s.ID) t.Node = s.Node t.Address = s.Address t.Datacenter = s.Datacenter @@ -14,11 +16,12 @@ func NodeToStructs(s Node) structs.Node { t.TaggedAddresses = s.TaggedAddresses t.Meta = s.Meta t.RaftIndex = RaftIndexToStructs(s.RaftIndex) - return t } -func NewNodeFromStructs(t structs.Node) Node { - var s Node - s.ID = t.ID +func NodeFromStructs(t *structs.Node, s *Node) { + if s == nil { + return + } + s.ID = string(t.ID) s.Node = t.Node s.Address = t.Address s.Datacenter = t.Datacenter @@ -26,11 +29,12 @@ func NewNodeFromStructs(t structs.Node) Node { s.TaggedAddresses = t.TaggedAddresses s.Meta = t.Meta s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) - return s } -func NodeServiceToStructs(s NodeService) structs.NodeService { - var t structs.NodeService - t.Kind = s.Kind +func NodeServiceToStructs(s *NodeService, t *structs.NodeService) { + if s == nil { + return + } + t.Kind = structs.ServiceKind(s.Kind) t.ID = s.ID t.Service = s.Service t.Tags = s.Tags @@ -41,16 +45,21 @@ func NodeServiceToStructs(s NodeService) structs.NodeService { t.SocketPath = s.SocketPath t.Weights = WeightsPtrToStructs(s.Weights) t.EnableTagOverride = s.EnableTagOverride - t.Proxy = ConnectProxyConfigToStructs(s.Proxy) - t.Connect = ServiceConnectToStructs(s.Connect) + if s.Proxy != nil { + ConnectProxyConfigToStructs(s.Proxy, &t.Proxy) + } + if s.Connect != nil { + ServiceConnectToStructs(s.Connect, &t.Connect) + } t.LocallyRegisteredAsSidecar = s.LocallyRegisteredAsSidecar t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) t.RaftIndex = RaftIndexToStructs(s.RaftIndex) - return t } -func NewNodeServiceFromStructs(t structs.NodeService) NodeService { - var s NodeService - s.Kind = t.Kind +func NodeServiceFromStructs(t *structs.NodeService, s *NodeService) { + if s == nil { + return + } + s.Kind = string(t.Kind) s.ID = t.ID s.Service = t.Service s.Tags = t.Tags @@ -61,10 +70,17 @@ func NewNodeServiceFromStructs(t structs.NodeService) NodeService { s.SocketPath = t.SocketPath s.Weights = NewWeightsPtrFromStructs(t.Weights) s.EnableTagOverride = t.EnableTagOverride - s.Proxy = NewConnectProxyConfigFromStructs(t.Proxy) - s.Connect = NewServiceConnectFromStructs(t.Connect) + { + var x ConnectProxyConfig + ConnectProxyConfigFromStructs(&t.Proxy, &x) + s.Proxy = &x + } + { + var x ServiceConnect + ServiceConnectFromStructs(&t.Connect, &x) + s.Connect = &x + } s.LocallyRegisteredAsSidecar = t.LocallyRegisteredAsSidecar s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) - return s } diff --git a/proto/pbservice/node.pb.go b/proto/pbservice/node.pb.go index a93dc6da1..556f37d57 100644 --- a/proto/pbservice/node.pb.go +++ b/proto/pbservice/node.pb.go @@ -1,18 +1,13 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbservice/node.proto package pbservice import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" - github_com_hashicorp_consul_agent_structs "github.com/hashicorp/consul/agent/structs" - pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" - github_com_hashicorp_consul_types "github.com/hashicorp/consul/types" - io "io" + pbcommon "github.com/hashicorp/consul/proto/pbcommon" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -29,9 +24,12 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. type CheckServiceNode struct { - Node *Node `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` - Service *NodeService `protobuf:"bytes,2,opt,name=Service,proto3" json:"Service,omitempty"` - Checks []*HealthCheck `protobuf:"bytes,3,rep,name=Checks,proto3" json:"Checks,omitempty"` + Node *Node `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` + Service *NodeService `protobuf:"bytes,2,opt,name=Service,proto3" json:"Service,omitempty"` + Checks []*HealthCheck `protobuf:"bytes,3,rep,name=Checks,proto3" json:"Checks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CheckServiceNode) Reset() { *m = CheckServiceNode{} } @@ -40,26 +38,18 @@ func (*CheckServiceNode) ProtoMessage() {} func (*CheckServiceNode) Descriptor() ([]byte, []int) { return fileDescriptor_bbc215b78fa95fe5, []int{0} } + func (m *CheckServiceNode) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_CheckServiceNode.Unmarshal(m, b) } func (m *CheckServiceNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckServiceNode.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_CheckServiceNode.Marshal(b, m, deterministic) } func (m *CheckServiceNode) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckServiceNode.Merge(m, src) } func (m *CheckServiceNode) XXX_Size() int { - return m.Size() + return xxx_messageInfo_CheckServiceNode.Size(m) } func (m *CheckServiceNode) XXX_DiscardUnknown() { xxx_messageInfo_CheckServiceNode.DiscardUnknown(m) @@ -67,6 +57,27 @@ func (m *CheckServiceNode) XXX_DiscardUnknown() { var xxx_messageInfo_CheckServiceNode proto.InternalMessageInfo +func (m *CheckServiceNode) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CheckServiceNode) GetService() *NodeService { + if m != nil { + return m.Service + } + return nil +} + +func (m *CheckServiceNode) GetChecks() []*HealthCheck { + if m != nil { + return m.Checks + } + return nil +} + // Node contains information about a node. // // mog annotation: @@ -75,15 +86,19 @@ var xxx_messageInfo_CheckServiceNode proto.InternalMessageInfo // output=node.gen.go // name=Structs type Node struct { - ID github_com_hashicorp_consul_types.NodeID `protobuf:"bytes,1,opt,name=ID,proto3,casttype=github.com/hashicorp/consul/types.NodeID" json:"ID,omitempty"` - Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` - Partition string `protobuf:"bytes,8,opt,name=Partition,proto3" json:"Partition,omitempty"` - Address string `protobuf:"bytes,3,opt,name=Address,proto3" json:"Address,omitempty"` - Datacenter string `protobuf:"bytes,4,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // mog: func-to=NodeIDType func-from=string + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` + Partition string `protobuf:"bytes,8,opt,name=Partition,proto3" json:"Partition,omitempty"` + Address string `protobuf:"bytes,3,opt,name=Address,proto3" json:"Address,omitempty"` + Datacenter string `protobuf:"bytes,4,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommongogo.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Node) Reset() { *m = Node{} } @@ -92,26 +107,18 @@ func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor_bbc215b78fa95fe5, []int{1} } + func (m *Node) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Node.Unmarshal(m, b) } func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Node.Marshal(b, m, deterministic) } func (m *Node) XXX_Merge(src proto.Message) { xxx_messageInfo_Node.Merge(m, src) } func (m *Node) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Node.Size(m) } func (m *Node) XXX_DiscardUnknown() { xxx_messageInfo_Node.DiscardUnknown(m) @@ -119,6 +126,62 @@ func (m *Node) XXX_DiscardUnknown() { var xxx_messageInfo_Node proto.InternalMessageInfo +func (m *Node) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Node) GetNode() string { + if m != nil { + return m.Node + } + return "" +} + +func (m *Node) GetPartition() string { + if m != nil { + return m.Partition + } + return "" +} + +func (m *Node) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Node) GetDatacenter() string { + if m != nil { + return m.Datacenter + } + return "" +} + +func (m *Node) GetTaggedAddresses() map[string]string { + if m != nil { + return m.TaggedAddresses + } + return nil +} + +func (m *Node) GetMeta() map[string]string { + if m != nil { + return m.Meta + } + return nil +} + +func (m *Node) GetRaftIndex() *pbcommon.RaftIndex { + if m != nil { + return m.RaftIndex + } + return nil +} + // NodeService is a service provided by a node // // mog annotation: @@ -130,14 +193,15 @@ type NodeService struct { // Kind is the kind of service this is. Different kinds of services may // have differing validation, DNS behavior, etc. An empty kind will default // to the Default kind. See ServiceKind for the full list of kinds. - Kind github_com_hashicorp_consul_agent_structs.ServiceKind `protobuf:"bytes,1,opt,name=Kind,proto3,casttype=github.com/hashicorp/consul/agent/structs.ServiceKind" json:"Kind,omitempty"` - ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` - Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` - Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` - Address string `protobuf:"bytes,5,opt,name=Address,proto3" json:"Address,omitempty"` + // mog: func-to=structs.ServiceKind func-from=string + Kind string `protobuf:"bytes,1,opt,name=Kind,proto3" json:"Kind,omitempty"` + ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` + Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` + Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` + Address string `protobuf:"bytes,5,opt,name=Address,proto3" json:"Address,omitempty"` // mog: func-to=MapStringServiceAddressToStructs func-from=NewMapStringServiceAddressFromStructs - TaggedAddresses map[string]ServiceAddress `protobuf:"bytes,15,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaggedAddresses map[string]*ServiceAddress `protobuf:"bytes,15,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // mog: func-to=int func-from=int32 Port int32 `protobuf:"varint,7,opt,name=Port,proto3" json:"Port,omitempty"` SocketPath string `protobuf:"bytes,17,opt,name=SocketPath,proto3" json:"SocketPath,omitempty"` @@ -154,10 +218,10 @@ type NodeService struct { // in the other case. ProxyConfig may be a more natural name here, but it's // confusing for the UX because one of the fields in ConnectProxyConfig is // also called just "Config" - Proxy ConnectProxyConfig `protobuf:"bytes,11,opt,name=Proxy,proto3" json:"Proxy"` + Proxy *ConnectProxyConfig `protobuf:"bytes,11,opt,name=Proxy,proto3" json:"Proxy,omitempty"` // Connect are the Connect settings for a service. This is purposely NOT // a pointer so that we never have to nil-check this. - Connect ServiceConnect `protobuf:"bytes,12,opt,name=Connect,proto3" json:"Connect"` + Connect *ServiceConnect `protobuf:"bytes,12,opt,name=Connect,proto3" json:"Connect,omitempty"` // LocallyRegisteredAsSidecar is private as it is only used by a local agent // state to track if the service was registered from a nested sidecar_service // block. We need to track that so we can know whether we need to deregister @@ -177,9 +241,12 @@ type NodeService struct { // somewhere this is used in API output. LocallyRegisteredAsSidecar bool `protobuf:"varint,13,opt,name=LocallyRegisteredAsSidecar,proto3" json:"LocallyRegisteredAsSidecar,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - pbcommongogo.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *NodeService) Reset() { *m = NodeService{} } @@ -188,26 +255,18 @@ func (*NodeService) ProtoMessage() {} func (*NodeService) Descriptor() ([]byte, []int) { return fileDescriptor_bbc215b78fa95fe5, []int{2} } + func (m *NodeService) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_NodeService.Unmarshal(m, b) } func (m *NodeService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeService.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_NodeService.Marshal(b, m, deterministic) } func (m *NodeService) XXX_Merge(src proto.Message) { xxx_messageInfo_NodeService.Merge(m, src) } func (m *NodeService) XXX_Size() int { - return m.Size() + return xxx_messageInfo_NodeService.Size(m) } func (m *NodeService) XXX_DiscardUnknown() { xxx_messageInfo_NodeService.DiscardUnknown(m) @@ -215,6 +274,118 @@ func (m *NodeService) XXX_DiscardUnknown() { var xxx_messageInfo_NodeService proto.InternalMessageInfo +func (m *NodeService) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *NodeService) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *NodeService) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func (m *NodeService) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *NodeService) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *NodeService) GetTaggedAddresses() map[string]*ServiceAddress { + if m != nil { + return m.TaggedAddresses + } + return nil +} + +func (m *NodeService) GetMeta() map[string]string { + if m != nil { + return m.Meta + } + return nil +} + +func (m *NodeService) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *NodeService) GetSocketPath() string { + if m != nil { + return m.SocketPath + } + return "" +} + +func (m *NodeService) GetWeights() *Weights { + if m != nil { + return m.Weights + } + return nil +} + +func (m *NodeService) GetEnableTagOverride() bool { + if m != nil { + return m.EnableTagOverride + } + return false +} + +func (m *NodeService) GetProxy() *ConnectProxyConfig { + if m != nil { + return m.Proxy + } + return nil +} + +func (m *NodeService) GetConnect() *ServiceConnect { + if m != nil { + return m.Connect + } + return nil +} + +func (m *NodeService) GetLocallyRegisteredAsSidecar() bool { + if m != nil { + return m.LocallyRegisteredAsSidecar + } + return false +} + +func (m *NodeService) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if m != nil { + return m.EnterpriseMeta + } + return nil +} + +func (m *NodeService) GetRaftIndex() *pbcommon.RaftIndex { + if m != nil { + return m.RaftIndex + } + return nil +} + func init() { proto.RegisterType((*CheckServiceNode)(nil), "pbservice.CheckServiceNode") proto.RegisterType((*Node)(nil), "pbservice.Node") @@ -222,2041 +393,54 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "pbservice.Node.TaggedAddressesEntry") proto.RegisterType((*NodeService)(nil), "pbservice.NodeService") proto.RegisterMapType((map[string]string)(nil), "pbservice.NodeService.MetaEntry") - proto.RegisterMapType((map[string]ServiceAddress)(nil), "pbservice.NodeService.TaggedAddressesEntry") + proto.RegisterMapType((map[string]*ServiceAddress)(nil), "pbservice.NodeService.TaggedAddressesEntry") } -func init() { proto.RegisterFile("proto/pbservice/node.proto", fileDescriptor_bbc215b78fa95fe5) } +func init() { + proto.RegisterFile("proto/pbservice/node.proto", fileDescriptor_bbc215b78fa95fe5) +} var fileDescriptor_bbc215b78fa95fe5 = []byte{ - // 777 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x13, 0xa7, 0x69, 0x26, 0xf7, 0xf6, 0x67, 0xd4, 0x7b, 0x35, 0x37, 0xba, 0x75, 0x42, - 0x61, 0x51, 0xa9, 0xc5, 0x46, 0x05, 0x04, 0x45, 0x80, 0xd4, 0x34, 0x95, 0x1a, 0x41, 0x4b, 0xe4, - 0x56, 0x42, 0x02, 0xb1, 0x98, 0xd8, 0x53, 0xdb, 0x6a, 0xea, 0x89, 0xc6, 0x93, 0xaa, 0x79, 0x0b, - 0x96, 0xf0, 0x12, 0x3c, 0x47, 0x97, 0x5d, 0xb2, 0x8a, 0xa0, 0x59, 0xf2, 0x06, 0x5d, 0xa1, 0x19, - 0x4f, 0x12, 0xc7, 0x0d, 0x15, 0x95, 0x58, 0xe5, 0xf8, 0x9c, 0xef, 0x7c, 0x73, 0x66, 0xbe, 0xef, - 0x28, 0xa0, 0xdc, 0x61, 0x94, 0x53, 0xab, 0xd3, 0x8a, 0x08, 0x3b, 0x0d, 0x1c, 0x62, 0x85, 0xd4, - 0x25, 0xa6, 0x4c, 0xc2, 0xe2, 0x28, 0x5b, 0xae, 0x0c, 0x61, 0x0e, 0x3d, 0x39, 0xa1, 0xa1, 0x47, - 0x3d, 0x6a, 0xc5, 0x61, 0x8c, 0x2d, 0xdf, 0x49, 0xf3, 0xf8, 0x04, 0xb7, 0xb9, 0xef, 0xf8, 0xc4, - 0x39, 0x56, 0x90, 0xe5, 0x34, 0x44, 0xfd, 0xaa, 0xf2, 0x92, 0x20, 0x8d, 0x21, 0x22, 0x8a, 0xb3, - 0x2b, 0x9f, 0x35, 0xb0, 0xb0, 0x2d, 0x48, 0x0e, 0x62, 0xf0, 0x3e, 0x75, 0x09, 0xbc, 0x0b, 0x74, - 0xf1, 0x8b, 0xb4, 0xaa, 0xb6, 0x5a, 0xda, 0x98, 0x37, 0x47, 0x94, 0xa6, 0x48, 0xdb, 0xb2, 0x08, - 0x1f, 0x80, 0x82, 0xea, 0x41, 0x59, 0x89, 0xfb, 0x37, 0x85, 0x53, 0x55, 0x7b, 0x08, 0x83, 0x26, - 0x98, 0x91, 0x47, 0x45, 0x28, 0x57, 0xcd, 0xa5, 0x1a, 0x76, 0xe5, 0x75, 0x64, 0xd9, 0x56, 0xa8, - 0x95, 0x1f, 0xb9, 0x78, 0x0e, 0xf8, 0x1c, 0x64, 0x1b, 0x75, 0x39, 0x4d, 0xb1, 0xb6, 0x7e, 0xd5, - 0xaf, 0xac, 0x7a, 0x01, 0xf7, 0xbb, 0x2d, 0xd3, 0xa1, 0x27, 0x96, 0x8f, 0x23, 0x3f, 0x70, 0x28, - 0xeb, 0x58, 0x0e, 0x0d, 0xa3, 0x6e, 0xdb, 0xe2, 0xbd, 0x0e, 0x89, 0xe4, 0x00, 0x8d, 0xba, 0x9d, - 0x6d, 0xd4, 0x21, 0x54, 0xb7, 0x11, 0x53, 0x16, 0xd5, 0xf0, 0xff, 0x83, 0x62, 0x13, 0x33, 0x1e, - 0xf0, 0x80, 0x86, 0x68, 0x56, 0x16, 0xc6, 0x09, 0x88, 0x40, 0x61, 0xcb, 0x75, 0x19, 0x89, 0xc4, - 0xa4, 0xa2, 0x36, 0xfc, 0x84, 0x06, 0x00, 0x75, 0xcc, 0xb1, 0x43, 0x42, 0x4e, 0x18, 0xd2, 0x65, - 0x31, 0x91, 0x81, 0xfb, 0x60, 0xfe, 0x10, 0x7b, 0x1e, 0x71, 0x55, 0x03, 0x89, 0x50, 0x5e, 0xde, - 0xf5, 0x5e, 0xea, 0x71, 0xcc, 0x14, 0x6c, 0x27, 0xe4, 0xac, 0x67, 0xa7, 0x9b, 0xe1, 0x7d, 0xa0, - 0xef, 0x11, 0x8e, 0xd1, 0x8c, 0x24, 0xf9, 0x2f, 0x4d, 0x22, 0x6a, 0x71, 0xa7, 0x84, 0xc1, 0x17, - 0xa0, 0x68, 0xe3, 0x23, 0xde, 0x08, 0x5d, 0x72, 0x86, 0x0a, 0x52, 0x95, 0x7f, 0xcc, 0xb1, 0xa5, - 0xcc, 0x51, 0xb1, 0x36, 0x7b, 0xde, 0xaf, 0x64, 0x2e, 0xfa, 0x15, 0xcd, 0x1e, 0x77, 0x94, 0x6b, - 0x60, 0x69, 0xda, 0x58, 0x70, 0x01, 0xe4, 0x8e, 0x49, 0x2f, 0x16, 0xc0, 0x16, 0x21, 0x5c, 0x02, - 0xf9, 0x53, 0xdc, 0xee, 0x0e, 0x1f, 0x35, 0xfe, 0x78, 0x96, 0x7d, 0xaa, 0x95, 0x9f, 0x80, 0xe2, - 0x68, 0xaa, 0xdb, 0x34, 0xae, 0x7c, 0x29, 0x80, 0x52, 0xc2, 0x36, 0x70, 0x0f, 0xe8, 0xaf, 0x82, - 0xd0, 0x55, 0xb2, 0x6f, 0x5e, 0xf5, 0x2b, 0x8f, 0x6f, 0x92, 0x1d, 0x7b, 0x24, 0xe4, 0x56, 0xc4, - 0x59, 0xd7, 0xe1, 0x91, 0xa9, 0x48, 0x04, 0x81, 0x2d, 0x69, 0xe0, 0x9c, 0xf4, 0x50, 0x7c, 0xaa, - 0x70, 0x05, 0x1a, 0xdb, 0x57, 0x69, 0x3c, 0x3c, 0x18, 0x02, 0xfd, 0x10, 0x7b, 0x11, 0xd2, 0xab, - 0x39, 0xe1, 0x17, 0x11, 0x27, 0x1d, 0x91, 0x9f, 0x74, 0xc4, 0xfb, 0xeb, 0x8a, 0xcf, 0x4b, 0xb1, - 0xd6, 0xa6, 0xaf, 0xc3, 0x54, 0xe1, 0x6b, 0xba, 0x90, 0xe3, 0xba, 0xfc, 0x8f, 0x26, 0xe4, 0xaf, - 0xfe, 0x82, 0x31, 0xed, 0x02, 0x08, 0xf4, 0x26, 0x65, 0x5c, 0x1a, 0x20, 0x6f, 0xcb, 0x58, 0x18, - 0xf7, 0x80, 0x3a, 0xc7, 0x84, 0x37, 0x31, 0xf7, 0xd1, 0x62, 0x6c, 0xdc, 0x71, 0x06, 0xae, 0x83, - 0xc2, 0x5b, 0x12, 0x78, 0x3e, 0x8f, 0xe4, 0x3a, 0x94, 0x36, 0x60, 0xe2, 0x30, 0x55, 0xb1, 0x87, - 0x10, 0xb8, 0x0e, 0x16, 0x77, 0x42, 0xdc, 0x6a, 0x93, 0x43, 0xec, 0xbd, 0x39, 0x25, 0x8c, 0x05, - 0x2e, 0x41, 0xc5, 0xaa, 0xb6, 0x3a, 0x6b, 0x5f, 0x2f, 0xc0, 0x4d, 0x90, 0x6f, 0x32, 0x7a, 0xd6, - 0x43, 0x25, 0xc9, 0xbc, 0x9c, 0x60, 0xde, 0xa6, 0x61, 0x48, 0x1c, 0x2e, 0xcb, 0xdb, 0x34, 0x3c, - 0x0a, 0x3c, 0xf5, 0x14, 0x71, 0x07, 0xdc, 0x04, 0x05, 0x05, 0x41, 0x7f, 0xc9, 0xe6, 0xe4, 0x0a, - 0xa8, 0xfb, 0x2b, 0x80, 0x6a, 0x1c, 0xe2, 0xe1, 0x4b, 0x50, 0x7e, 0x4d, 0x1d, 0xdc, 0x6e, 0xf7, - 0x6c, 0xe2, 0x05, 0x11, 0x27, 0x8c, 0xb8, 0x5b, 0xd1, 0x41, 0xe0, 0x12, 0x07, 0x33, 0xf4, 0xb7, - 0x1c, 0xf6, 0x06, 0x04, 0xdc, 0x05, 0x73, 0x3b, 0x62, 0xa7, 0x3b, 0x2c, 0x88, 0x88, 0x54, 0x61, - 0x41, 0x4e, 0x50, 0x4e, 0x2e, 0xd4, 0x24, 0x42, 0x8d, 0x90, 0xea, 0x9b, 0xdc, 0xca, 0xb9, 0x5b, - 0x6f, 0xe5, 0x87, 0xdf, 0xde, 0x4a, 0x2b, 0xb9, 0x5c, 0x53, 0xdf, 0x4a, 0x51, 0xfc, 0x89, 0x85, - 0xad, 0xed, 0x9d, 0x7f, 0x37, 0x32, 0xe7, 0x97, 0x86, 0x76, 0x71, 0x69, 0x68, 0xdf, 0x2e, 0x0d, - 0xed, 0xe3, 0xc0, 0xc8, 0x7c, 0x1a, 0x18, 0x99, 0x8b, 0x81, 0x91, 0xf9, 0x3a, 0x30, 0x32, 0xef, - 0xd6, 0x6e, 0x5a, 0xd8, 0xd4, 0xbf, 0x55, 0x6b, 0x46, 0x26, 0x1e, 0xfe, 0x0c, 0x00, 0x00, 0xff, - 0xff, 0xf9, 0x3e, 0x1b, 0x2c, 0x32, 0x07, 0x00, 0x00, + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdb, 0x6e, 0xd3, 0x40, + 0x10, 0x55, 0xee, 0xcd, 0x04, 0x7a, 0x59, 0x55, 0x68, 0x09, 0x14, 0x85, 0xc2, 0x43, 0xa5, 0xb6, + 0x31, 0x6a, 0x91, 0x40, 0x3c, 0x54, 0xea, 0x4d, 0xa2, 0x02, 0x4a, 0xb4, 0x2d, 0x42, 0x42, 0xe2, + 0x61, 0x63, 0x4f, 0x6d, 0xab, 0xa9, 0x37, 0x5a, 0x6f, 0xab, 0xe6, 0x53, 0xf8, 0x0a, 0xbe, 0x83, + 0xbf, 0x42, 0x3b, 0xde, 0x24, 0x8e, 0x5b, 0x50, 0x90, 0x78, 0xf2, 0x7a, 0xce, 0x39, 0xb3, 0xe3, + 0x39, 0x33, 0x86, 0xf6, 0x50, 0x2b, 0xa3, 0xbc, 0x61, 0x3f, 0x45, 0x7d, 0x13, 0xfb, 0xe8, 0x25, + 0x2a, 0xc0, 0x2e, 0x05, 0x59, 0x73, 0x12, 0x6d, 0x3f, 0x19, 0xd3, 0x7c, 0x75, 0x75, 0xa5, 0x12, + 0x2f, 0x7b, 0x64, 0xbc, 0xf6, 0xf3, 0x62, 0x8e, 0x08, 0xe5, 0xc0, 0x44, 0x7e, 0x84, 0xfe, 0xa5, + 0xa3, 0xac, 0x15, 0x29, 0xee, 0x99, 0xc1, 0xeb, 0x3f, 0x4a, 0xb0, 0x7c, 0x68, 0xe9, 0x67, 0x59, + 0xf8, 0x54, 0x05, 0xc8, 0x5e, 0x40, 0xd5, 0x3e, 0x79, 0xa9, 0x53, 0xda, 0x68, 0xed, 0x2c, 0x75, + 0x27, 0xe2, 0xae, 0x0d, 0x0b, 0x02, 0xd9, 0x2b, 0x68, 0x38, 0x0d, 0x2f, 0x13, 0xef, 0x51, 0x81, + 0xe7, 0x50, 0x31, 0xa6, 0xb1, 0x2e, 0xd4, 0xe9, 0xaa, 0x94, 0x57, 0x3a, 0x95, 0x82, 0xe0, 0x3d, + 0x15, 0x4e, 0xb0, 0x70, 0xac, 0xf5, 0x9f, 0x95, 0xac, 0x0e, 0xb6, 0x08, 0xe5, 0x93, 0x23, 0xaa, + 0xa6, 0x29, 0xca, 0x27, 0x47, 0x8c, 0xb9, 0xfa, 0xca, 0x14, 0xc9, 0x38, 0x4f, 0xa1, 0xd9, 0x93, + 0xda, 0xc4, 0x26, 0x56, 0x09, 0x5f, 0x20, 0x60, 0x1a, 0x60, 0x1c, 0x1a, 0xfb, 0x41, 0xa0, 0x31, + 0xb5, 0x77, 0x5b, 0x6c, 0xfc, 0xca, 0x9e, 0x01, 0x1c, 0x49, 0x23, 0x7d, 0x4c, 0x0c, 0x6a, 0x5e, + 0x25, 0x30, 0x17, 0x61, 0xa7, 0xb0, 0x74, 0x2e, 0xc3, 0x10, 0x03, 0x27, 0xc0, 0x94, 0xd7, 0xa8, + 0xfa, 0x97, 0x85, 0xcf, 0xed, 0x16, 0x68, 0xc7, 0x89, 0xd1, 0x23, 0x51, 0x14, 0xb3, 0x6d, 0xa8, + 0x7e, 0x42, 0x23, 0x79, 0x9d, 0x92, 0x3c, 0x2e, 0x26, 0xb1, 0x58, 0xa6, 0x24, 0x1a, 0xf3, 0xa0, + 0x29, 0xe4, 0x85, 0x39, 0x49, 0x02, 0xbc, 0xe5, 0x0d, 0xea, 0xf3, 0x4a, 0xd7, 0xcd, 0xc0, 0x04, + 0x10, 0x53, 0x4e, 0xfb, 0x00, 0x56, 0xef, 0x2b, 0x84, 0x2d, 0x43, 0xe5, 0x12, 0x47, 0xae, 0x89, + 0xf6, 0xc8, 0x56, 0xa1, 0x76, 0x23, 0x07, 0xd7, 0xe3, 0x36, 0x66, 0x2f, 0xef, 0xca, 0x6f, 0x4b, + 0xed, 0x37, 0xd0, 0x9c, 0xd4, 0xf1, 0x2f, 0xc2, 0xf5, 0x5f, 0x75, 0x68, 0xe5, 0xac, 0xb7, 0x46, + 0x7d, 0x88, 0x93, 0xc0, 0x89, 0xe9, 0xec, 0xcc, 0x2c, 0x4f, 0xcc, 0xe4, 0xd3, 0x39, 0x72, 0xd6, + 0xe4, 0xd4, 0xe7, 0x32, 0x4c, 0x79, 0xb5, 0x53, 0xb1, 0x6a, 0x7b, 0xce, 0x1b, 0x59, 0x9b, 0x35, + 0xf2, 0xcb, 0x5d, 0xa3, 0x96, 0xa8, 0xc7, 0x9b, 0xf7, 0xcf, 0xe5, 0x9c, 0x7e, 0xbd, 0x9e, 0xf1, + 0xab, 0xf3, 0x87, 0x5c, 0x45, 0xdb, 0x18, 0x54, 0x7b, 0x4a, 0x1b, 0x72, 0xac, 0x26, 0xe8, 0x6c, + 0x27, 0xed, 0x4c, 0xf9, 0x97, 0x68, 0x7a, 0xd2, 0x44, 0x7c, 0x25, 0x9b, 0xb4, 0x69, 0x84, 0x6d, + 0x41, 0xe3, 0x2b, 0xc6, 0x61, 0x64, 0x52, 0x9a, 0xdf, 0xd6, 0x0e, 0xcb, 0x5d, 0xe6, 0x10, 0x31, + 0xa6, 0xb0, 0x2d, 0x58, 0x39, 0x4e, 0x64, 0x7f, 0x80, 0xe7, 0x32, 0xfc, 0x7c, 0x83, 0x5a, 0xc7, + 0x01, 0xf2, 0x66, 0xa7, 0xb4, 0xb1, 0x20, 0xee, 0x02, 0x6c, 0x17, 0x6a, 0x3d, 0xad, 0x6e, 0x47, + 0xbc, 0x45, 0x99, 0xd7, 0x72, 0x99, 0x0f, 0x55, 0x92, 0xa0, 0x6f, 0x08, 0x3e, 0x54, 0xc9, 0x45, + 0x1c, 0x8a, 0x8c, 0xcb, 0x76, 0xa1, 0xe1, 0x40, 0xfe, 0x80, 0x64, 0xf9, 0x69, 0x75, 0x5f, 0xee, + 0x08, 0x62, 0xcc, 0x64, 0x7b, 0xd0, 0xfe, 0xa8, 0x7c, 0x39, 0x18, 0x8c, 0x04, 0x86, 0x71, 0x6a, + 0x50, 0x63, 0xb0, 0x9f, 0x9e, 0xc5, 0x01, 0xfa, 0x52, 0xf3, 0x87, 0x54, 0xe0, 0x5f, 0x18, 0x6c, + 0x0f, 0x16, 0x8f, 0xed, 0xe2, 0x0d, 0x75, 0x9c, 0x22, 0x75, 0x7e, 0xd9, 0xfd, 0x5d, 0xdc, 0xd4, + 0xcf, 0xa2, 0xa2, 0xc0, 0x9e, 0x5d, 0x98, 0xc5, 0x39, 0x16, 0xe6, 0xfb, 0xdc, 0x0b, 0xe3, 0xe5, + 0xe7, 0xfe, 0xde, 0x6e, 0xb8, 0x14, 0xff, 0x63, 0x97, 0x0e, 0xb6, 0xbf, 0x6d, 0x86, 0xb1, 0x89, + 0xae, 0xfb, 0xb6, 0x7a, 0x2f, 0x92, 0x69, 0x14, 0xfb, 0x4a, 0x0f, 0x3d, 0x5f, 0x25, 0xe9, 0xf5, + 0xc0, 0x2b, 0xfc, 0xd6, 0xfb, 0x75, 0x0a, 0xec, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x69, + 0xf0, 0xb9, 0x57, 0x06, 0x00, 0x00, } - -func (m *CheckServiceNode) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckServiceNode) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckServiceNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Checks) > 0 { - for iNdEx := len(m.Checks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Checks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Node != nil { - { - size, err := m.Node.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Node) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintNode(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x42 - } - { - size, err := m.RaftIndex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Meta) > 0 { - for k := range m.Meta { - v := m.Meta[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNode(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNode(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNode(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.TaggedAddresses) > 0 { - for k := range m.TaggedAddresses { - v := m.TaggedAddresses[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNode(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNode(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNode(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintNode(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0x22 - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintNode(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0x1a - } - if len(m.Node) > 0 { - i -= len(m.Node) - copy(dAtA[i:], m.Node) - i = encodeVarintNode(dAtA, i, uint64(len(m.Node))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintNode(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NodeService) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeService) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeService) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SocketPath) > 0 { - i -= len(m.SocketPath) - copy(dAtA[i:], m.SocketPath) - i = encodeVarintNode(dAtA, i, uint64(len(m.SocketPath))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - { - size, err := m.EnterpriseMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - if len(m.TaggedAddresses) > 0 { - for k := range m.TaggedAddresses { - v := m.TaggedAddresses[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNode(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNode(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x7a - } - } - { - size, err := m.RaftIndex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - if m.LocallyRegisteredAsSidecar { - i-- - if m.LocallyRegisteredAsSidecar { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - } - { - size, err := m.Connect.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.Proxy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - if m.EnableTagOverride { - i-- - if m.EnableTagOverride { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if m.Weights != nil { - { - size, err := m.Weights.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNode(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Port != 0 { - i = encodeVarintNode(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x38 - } - if len(m.Meta) > 0 { - for k := range m.Meta { - v := m.Meta[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNode(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNode(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNode(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintNode(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0x2a - } - if len(m.Tags) > 0 { - for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tags[iNdEx]) - copy(dAtA[i:], m.Tags[iNdEx]) - i = encodeVarintNode(dAtA, i, uint64(len(m.Tags[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarintNode(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0x1a - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintNode(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Kind) > 0 { - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintNode(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintNode(dAtA []byte, offset int, v uint64) int { - offset -= sovNode(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CheckServiceNode) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Node != nil { - l = m.Node.Size() - n += 1 + l + sovNode(uint64(l)) - } - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovNode(uint64(l)) - } - if len(m.Checks) > 0 { - for _, e := range m.Checks { - l = e.Size() - n += 1 + l + sovNode(uint64(l)) - } - } - return n -} - -func (m *Node) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - l = len(m.Node) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - l = len(m.Address) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - if len(m.TaggedAddresses) > 0 { - for k, v := range m.TaggedAddresses { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNode(uint64(len(k))) + 1 + len(v) + sovNode(uint64(len(v))) - n += mapEntrySize + 1 + sovNode(uint64(mapEntrySize)) - } - } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNode(uint64(len(k))) + 1 + len(v) + sovNode(uint64(len(v))) - n += mapEntrySize + 1 + sovNode(uint64(mapEntrySize)) - } - } - l = m.RaftIndex.Size() - n += 1 + l + sovNode(uint64(l)) - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - return n -} - -func (m *NodeService) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Kind) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sovNode(uint64(l)) - } - } - l = len(m.Address) - if l > 0 { - n += 1 + l + sovNode(uint64(l)) - } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNode(uint64(len(k))) + 1 + len(v) + sovNode(uint64(len(v))) - n += mapEntrySize + 1 + sovNode(uint64(mapEntrySize)) - } - } - if m.Port != 0 { - n += 1 + sovNode(uint64(m.Port)) - } - if m.Weights != nil { - l = m.Weights.Size() - n += 1 + l + sovNode(uint64(l)) - } - if m.EnableTagOverride { - n += 2 - } - l = m.Proxy.Size() - n += 1 + l + sovNode(uint64(l)) - l = m.Connect.Size() - n += 1 + l + sovNode(uint64(l)) - if m.LocallyRegisteredAsSidecar { - n += 2 - } - l = m.RaftIndex.Size() - n += 1 + l + sovNode(uint64(l)) - if len(m.TaggedAddresses) > 0 { - for k, v := range m.TaggedAddresses { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovNode(uint64(len(k))) + 1 + l + sovNode(uint64(l)) - n += mapEntrySize + 1 + sovNode(uint64(mapEntrySize)) - } - } - l = m.EnterpriseMeta.Size() - n += 2 + l + sovNode(uint64(l)) - l = len(m.SocketPath) - if l > 0 { - n += 2 + l + sovNode(uint64(l)) - } - return n -} - -func sovNode(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNode(x uint64) (n int) { - return sovNode(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CheckServiceNode) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckServiceNode: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckServiceNode: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Node == nil { - m.Node = &Node{} - } - if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &NodeService{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checks = append(m.Checks, &HealthCheck{}) - if err := m.Checks[len(m.Checks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Node) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Node: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = github_com_hashicorp_consul_types.NodeID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Node = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TaggedAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TaggedAddresses == nil { - m.TaggedAddresses = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TaggedAddresses[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Meta[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeService) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeService: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeService: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = github_com_hashicorp_consul_agent_structs.ServiceKind(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Meta[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Weights", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Weights == nil { - m.Weights = &Weights{} - } - if err := m.Weights.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableTagOverride", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableTagOverride = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proxy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Proxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Connect", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Connect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocallyRegisteredAsSidecar", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocallyRegisteredAsSidecar = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RaftIndex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TaggedAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TaggedAddresses == nil { - m.TaggedAddresses = make(map[string]ServiceAddress) - } - var mapkey string - mapvalue := &ServiceAddress{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNode - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNode - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthNode - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthNode - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ServiceAddress{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TaggedAddresses[mapkey] = *mapvalue - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnterpriseMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EnterpriseMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SocketPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNode - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNode - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNode - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SocketPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNode(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNode - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNode(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNode - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNode - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNode - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNode - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNode - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNode - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthNode = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNode = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNode = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbservice/node.proto b/proto/pbservice/node.proto index c07e809a8..4b8389352 100644 --- a/proto/pbservice/node.proto +++ b/proto/pbservice/node.proto @@ -4,18 +4,10 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; -import "proto/pbcommongogo/common.proto"; +import "proto/pbcommon/common.proto"; import "proto/pbservice/healthcheck.proto"; import "proto/pbservice/service.proto"; -// This fake import path is replaced by the build script with a versioned path -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; - // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. message CheckServiceNode { @@ -32,7 +24,8 @@ message CheckServiceNode { // output=node.gen.go // name=Structs message Node { - string ID = 1 [(gogoproto.casttype) = "github.com/hashicorp/consul/types.NodeID"]; + // mog: func-to=NodeIDType func-from=string + string ID = 1; string Node = 2; string Partition = 8; @@ -42,7 +35,7 @@ message Node { map Meta = 6; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - commongogo.RaftIndex RaftIndex = 7 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + common.RaftIndex RaftIndex = 7; } // NodeService is a service provided by a node @@ -56,14 +49,15 @@ message NodeService { // Kind is the kind of service this is. Different kinds of services may // have differing validation, DNS behavior, etc. An empty kind will default // to the Default kind. See ServiceKind for the full list of kinds. - string Kind = 1 [(gogoproto.casttype) = "github.com/hashicorp/consul/agent/structs.ServiceKind"]; + // mog: func-to=structs.ServiceKind func-from=string + string Kind = 1; string ID = 2; string Service = 3; repeated string Tags = 4; string Address = 5; // mog: func-to=MapStringServiceAddressToStructs func-from=NewMapStringServiceAddressFromStructs - map TaggedAddresses = 15 [(gogoproto.nullable) = false]; + map TaggedAddresses = 15; map Meta = 6; // mog: func-to=int func-from=int32 int32 Port = 7; @@ -83,11 +77,11 @@ message NodeService { // in the other case. ProxyConfig may be a more natural name here, but it's // confusing for the UX because one of the fields in ConnectProxyConfig is // also called just "Config" - ConnectProxyConfig Proxy = 11 [(gogoproto.nullable) = false]; + ConnectProxyConfig Proxy = 11; // Connect are the Connect settings for a service. This is purposely NOT // a pointer so that we never have to nil-check this. - ServiceConnect Connect = 12 [(gogoproto.nullable) = false]; + ServiceConnect Connect = 12; // LocallyRegisteredAsSidecar is private as it is only used by a local agent // state to track if the service was registered from a nested sidecar_service @@ -109,8 +103,8 @@ message NodeService { bool LocallyRegisteredAsSidecar = 13; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - commongogo.EnterpriseMeta EnterpriseMeta = 16 [(gogoproto.nullable) = false]; + common.EnterpriseMeta EnterpriseMeta = 16; // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - commongogo.RaftIndex RaftIndex = 14 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; + common.RaftIndex RaftIndex = 14; } diff --git a/proto/pbservice/service.gen.go b/proto/pbservice/service.gen.go index 88dfe596d..9644f7c87 100644 --- a/proto/pbservice/service.gen.go +++ b/proto/pbservice/service.gen.go @@ -2,93 +2,123 @@ package pbservice -import structs "github.com/hashicorp/consul/agent/structs" +import "github.com/hashicorp/consul/agent/structs" -func ConnectProxyConfigToStructs(s ConnectProxyConfig) structs.ConnectProxyConfig { - var t structs.ConnectProxyConfig +func ConnectProxyConfigToStructs(s *ConnectProxyConfig, t *structs.ConnectProxyConfig) { + if s == nil { + return + } t.DestinationServiceName = s.DestinationServiceName t.DestinationServiceID = s.DestinationServiceID t.LocalServiceAddress = s.LocalServiceAddress t.LocalServicePort = int(s.LocalServicePort) t.LocalServiceSocketPath = s.LocalServiceSocketPath - t.Mode = s.Mode + t.Mode = structs.ProxyMode(s.Mode) t.Config = ProtobufTypesStructToMapStringInterface(s.Config) t.Upstreams = UpstreamsToStructs(s.Upstreams) - t.MeshGateway = MeshGatewayConfigToStructs(s.MeshGateway) - t.Expose = ExposeConfigToStructs(s.Expose) - t.TransparentProxy = TransparentProxyConfigToStructs(s.TransparentProxy) - return t + if s.MeshGateway != nil { + MeshGatewayConfigToStructs(s.MeshGateway, &t.MeshGateway) + } + if s.Expose != nil { + ExposeConfigToStructs(s.Expose, &t.Expose) + } + if s.TransparentProxy != nil { + TransparentProxyConfigToStructs(s.TransparentProxy, &t.TransparentProxy) + } } -func NewConnectProxyConfigFromStructs(t structs.ConnectProxyConfig) ConnectProxyConfig { - var s ConnectProxyConfig +func ConnectProxyConfigFromStructs(t *structs.ConnectProxyConfig, s *ConnectProxyConfig) { + if s == nil { + return + } s.DestinationServiceName = t.DestinationServiceName s.DestinationServiceID = t.DestinationServiceID s.LocalServiceAddress = t.LocalServiceAddress s.LocalServicePort = int32(t.LocalServicePort) s.LocalServiceSocketPath = t.LocalServiceSocketPath - s.Mode = t.Mode + s.Mode = string(t.Mode) s.Config = MapStringInterfaceToProtobufTypesStruct(t.Config) s.Upstreams = NewUpstreamsFromStructs(t.Upstreams) - s.MeshGateway = NewMeshGatewayConfigFromStructs(t.MeshGateway) - s.Expose = NewExposeConfigFromStructs(t.Expose) - s.TransparentProxy = NewTransparentProxyConfigFromStructs(t.TransparentProxy) - return s + { + var x MeshGatewayConfig + MeshGatewayConfigFromStructs(&t.MeshGateway, &x) + s.MeshGateway = &x + } + { + var x ExposeConfig + ExposeConfigFromStructs(&t.Expose, &x) + s.Expose = &x + } + { + var x TransparentProxyConfig + TransparentProxyConfigFromStructs(&t.TransparentProxy, &x) + s.TransparentProxy = &x + } } -func ExposeConfigToStructs(s ExposeConfig) structs.ExposeConfig { - var t structs.ExposeConfig +func ExposeConfigToStructs(s *ExposeConfig, t *structs.ExposeConfig) { + if s == nil { + return + } t.Checks = s.Checks t.Paths = ExposePathSliceToStructs(s.Paths) - return t } -func NewExposeConfigFromStructs(t structs.ExposeConfig) ExposeConfig { - var s ExposeConfig +func ExposeConfigFromStructs(t *structs.ExposeConfig, s *ExposeConfig) { + if s == nil { + return + } s.Checks = t.Checks s.Paths = NewExposePathSliceFromStructs(t.Paths) - return s } -func ExposePathToStructs(s ExposePath) structs.ExposePath { - var t structs.ExposePath +func ExposePathToStructs(s *ExposePath, t *structs.ExposePath) { + if s == nil { + return + } t.ListenerPort = int(s.ListenerPort) t.Path = s.Path t.LocalPathPort = int(s.LocalPathPort) t.Protocol = s.Protocol t.ParsedFromCheck = s.ParsedFromCheck - return t } -func NewExposePathFromStructs(t structs.ExposePath) ExposePath { - var s ExposePath +func ExposePathFromStructs(t *structs.ExposePath, s *ExposePath) { + if s == nil { + return + } s.ListenerPort = int32(t.ListenerPort) s.Path = t.Path s.LocalPathPort = int32(t.LocalPathPort) s.Protocol = t.Protocol s.ParsedFromCheck = t.ParsedFromCheck - return s } -func MeshGatewayConfigToStructs(s MeshGatewayConfig) structs.MeshGatewayConfig { - var t structs.MeshGatewayConfig - t.Mode = s.Mode - return t +func MeshGatewayConfigToStructs(s *MeshGatewayConfig, t *structs.MeshGatewayConfig) { + if s == nil { + return + } + t.Mode = structs.MeshGatewayMode(s.Mode) } -func NewMeshGatewayConfigFromStructs(t structs.MeshGatewayConfig) MeshGatewayConfig { - var s MeshGatewayConfig - s.Mode = t.Mode - return s +func MeshGatewayConfigFromStructs(t *structs.MeshGatewayConfig, s *MeshGatewayConfig) { + if s == nil { + return + } + s.Mode = string(t.Mode) } -func ServiceConnectToStructs(s ServiceConnect) structs.ServiceConnect { - var t structs.ServiceConnect +func ServiceConnectToStructs(s *ServiceConnect, t *structs.ServiceConnect) { + if s == nil { + return + } t.Native = s.Native t.SidecarService = ServiceDefinitionPtrToStructs(s.SidecarService) - return t } -func NewServiceConnectFromStructs(t structs.ServiceConnect) ServiceConnect { - var s ServiceConnect +func ServiceConnectFromStructs(t *structs.ServiceConnect, s *ServiceConnect) { + if s == nil { + return + } s.Native = t.Native s.SidecarService = NewServiceDefinitionPtrFromStructs(t.SidecarService) - return s } -func ServiceDefinitionToStructs(s ServiceDefinition) (structs.ServiceDefinition, error) { - var t structs.ServiceDefinition - t.Kind = s.Kind +func ServiceDefinitionToStructs(s *ServiceDefinition, t *structs.ServiceDefinition) { + if s == nil { + return + } + t.Kind = structs.ServiceKind(s.Kind) t.ID = s.ID t.Name = s.Name t.Tags = s.Tags @@ -97,27 +127,22 @@ func ServiceDefinitionToStructs(s ServiceDefinition) (structs.ServiceDefinition, t.Meta = s.Meta t.Port = int(s.Port) t.SocketPath = s.SocketPath - check, err := CheckTypeToStructs(s.Check) - if err != nil { - return t, err + if s.Check != nil { + CheckTypeToStructs(s.Check, &t.Check) } - t.Check = check - checks, err := CheckTypesToStructs(s.Checks) - if err != nil { - return t, err - } - t.Checks = checks + t.Checks = CheckTypesToStructs(s.Checks) t.Weights = WeightsPtrToStructs(s.Weights) t.Token = s.Token t.EnableTagOverride = s.EnableTagOverride t.Proxy = ConnectProxyConfigPtrToStructs(s.Proxy) t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) t.Connect = ServiceConnectPtrToStructs(s.Connect) - return t, nil } -func NewServiceDefinitionFromStructs(t structs.ServiceDefinition) ServiceDefinition { - var s ServiceDefinition - s.Kind = t.Kind +func ServiceDefinitionFromStructs(t *structs.ServiceDefinition, s *ServiceDefinition) { + if s == nil { + return + } + s.Kind = string(t.Kind) s.ID = t.ID s.Name = t.Name s.Tags = t.Tags @@ -126,7 +151,11 @@ func NewServiceDefinitionFromStructs(t structs.ServiceDefinition) ServiceDefinit s.Meta = t.Meta s.Port = int32(t.Port) s.SocketPath = t.SocketPath - s.Check = NewCheckTypeFromStructs(t.Check) + { + var x CheckType + CheckTypeFromStructs(&t.Check, &x) + s.Check = &x + } s.Checks = NewCheckTypesFromStructs(t.Checks) s.Weights = NewWeightsPtrFromStructs(t.Weights) s.Token = t.Token @@ -134,22 +163,25 @@ func NewServiceDefinitionFromStructs(t structs.ServiceDefinition) ServiceDefinit s.Proxy = NewConnectProxyConfigPtrFromStructs(t.Proxy) s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) s.Connect = NewServiceConnectPtrFromStructs(t.Connect) - return s } -func TransparentProxyConfigToStructs(s TransparentProxyConfig) structs.TransparentProxyConfig { - var t structs.TransparentProxyConfig +func TransparentProxyConfigToStructs(s *TransparentProxyConfig, t *structs.TransparentProxyConfig) { + if s == nil { + return + } t.OutboundListenerPort = int(s.OutboundListenerPort) t.DialedDirectly = s.DialedDirectly - return t } -func NewTransparentProxyConfigFromStructs(t structs.TransparentProxyConfig) TransparentProxyConfig { - var s TransparentProxyConfig +func TransparentProxyConfigFromStructs(t *structs.TransparentProxyConfig, s *TransparentProxyConfig) { + if s == nil { + return + } s.OutboundListenerPort = int32(t.OutboundListenerPort) s.DialedDirectly = t.DialedDirectly - return s } -func UpstreamToStructs(s Upstream) structs.Upstream { - var t structs.Upstream +func UpstreamToStructs(s *Upstream, t *structs.Upstream) { + if s == nil { + return + } t.DestinationType = s.DestinationType t.DestinationNamespace = s.DestinationNamespace t.DestinationPartition = s.DestinationPartition @@ -160,12 +192,15 @@ func UpstreamToStructs(s Upstream) structs.Upstream { t.LocalBindSocketPath = s.LocalBindSocketPath t.LocalBindSocketMode = s.LocalBindSocketMode t.Config = ProtobufTypesStructToMapStringInterface(s.Config) - t.MeshGateway = MeshGatewayConfigToStructs(s.MeshGateway) + if s.MeshGateway != nil { + MeshGatewayConfigToStructs(s.MeshGateway, &t.MeshGateway) + } t.CentrallyConfigured = s.CentrallyConfigured - return t } -func NewUpstreamFromStructs(t structs.Upstream) Upstream { - var s Upstream +func UpstreamFromStructs(t *structs.Upstream, s *Upstream) { + if s == nil { + return + } s.DestinationType = t.DestinationType s.DestinationNamespace = t.DestinationNamespace s.DestinationPartition = t.DestinationPartition @@ -176,7 +211,10 @@ func NewUpstreamFromStructs(t structs.Upstream) Upstream { s.LocalBindSocketPath = t.LocalBindSocketPath s.LocalBindSocketMode = t.LocalBindSocketMode s.Config = MapStringInterfaceToProtobufTypesStruct(t.Config) - s.MeshGateway = NewMeshGatewayConfigFromStructs(t.MeshGateway) + { + var x MeshGatewayConfig + MeshGatewayConfigFromStructs(&t.MeshGateway, &x) + s.MeshGateway = &x + } s.CentrallyConfigured = t.CentrallyConfigured - return s } diff --git a/proto/pbservice/service.pb.go b/proto/pbservice/service.pb.go index ca5761fdc..b71a0a387 100644 --- a/proto/pbservice/service.pb.go +++ b/proto/pbservice/service.pb.go @@ -1,18 +1,14 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbservice/service.proto package pbservice import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" - github_com_hashicorp_consul_agent_structs "github.com/hashicorp/consul/agent/structs" - pbcommongogo "github.com/hashicorp/consul/proto/pbcommongogo" - io "io" + _struct "github.com/golang/protobuf/ptypes/struct" + pbcommon "github.com/hashicorp/consul/proto/pbcommon" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -62,22 +58,26 @@ type ConnectProxyConfig struct { // Config is the arbitrary configuration data provided with the proxy // registration. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - Config *types.Struct `protobuf:"bytes,5,opt,name=Config,proto3" json:"Config,omitempty"` + Config *_struct.Struct `protobuf:"bytes,5,opt,name=Config,proto3" json:"Config,omitempty"` // Upstreams describes any upstream dependencies the proxy instance should // setup. // mog: func-to=UpstreamsToStructs func-from=NewUpstreamsFromStructs - Upstreams []Upstream `protobuf:"bytes,6,rep,name=Upstreams,proto3" json:"Upstreams"` + Upstreams []*Upstream `protobuf:"bytes,6,rep,name=Upstreams,proto3" json:"Upstreams,omitempty"` // MeshGateway defines the mesh gateway configuration for upstreams - MeshGateway MeshGatewayConfig `protobuf:"bytes,7,opt,name=MeshGateway,proto3" json:"MeshGateway"` + MeshGateway *MeshGatewayConfig `protobuf:"bytes,7,opt,name=MeshGateway,proto3" json:"MeshGateway,omitempty"` // Expose defines whether checks or paths are exposed through the proxy - Expose ExposeConfig `protobuf:"bytes,8,opt,name=Expose,proto3" json:"Expose"` + Expose *ExposeConfig `protobuf:"bytes,8,opt,name=Expose,proto3" json:"Expose,omitempty"` // Mode represents how the proxy's inbound and upstream listeners are dialed. - Mode github_com_hashicorp_consul_agent_structs.ProxyMode `protobuf:"bytes,9,opt,name=Mode,proto3,casttype=github.com/hashicorp/consul/agent/structs.ProxyMode" json:"Mode,omitempty"` + // mog: func-to=structs.ProxyMode func-from=string + Mode string `protobuf:"bytes,9,opt,name=Mode,proto3" json:"Mode,omitempty"` // TransparentProxy defines configuration for when the proxy is in // transparent mode. - TransparentProxy TransparentProxyConfig `protobuf:"bytes,10,opt,name=TransparentProxy,proto3" json:"TransparentProxy"` + TransparentProxy *TransparentProxyConfig `protobuf:"bytes,10,opt,name=TransparentProxy,proto3" json:"TransparentProxy,omitempty"` // LocalServiceSocketPath is the path to the unix domain socket for the local service instance - LocalServiceSocketPath string `protobuf:"bytes,11,opt,name=LocalServiceSocketPath,proto3" json:"LocalServiceSocketPath,omitempty"` + LocalServiceSocketPath string `protobuf:"bytes,11,opt,name=LocalServiceSocketPath,proto3" json:"LocalServiceSocketPath,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ConnectProxyConfig) Reset() { *m = ConnectProxyConfig{} } @@ -86,26 +86,18 @@ func (*ConnectProxyConfig) ProtoMessage() {} func (*ConnectProxyConfig) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{0} } + func (m *ConnectProxyConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ConnectProxyConfig.Unmarshal(m, b) } func (m *ConnectProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConnectProxyConfig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ConnectProxyConfig.Marshal(b, m, deterministic) } func (m *ConnectProxyConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ConnectProxyConfig.Merge(m, src) } func (m *ConnectProxyConfig) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ConnectProxyConfig.Size(m) } func (m *ConnectProxyConfig) XXX_DiscardUnknown() { xxx_messageInfo_ConnectProxyConfig.DiscardUnknown(m) @@ -113,6 +105,83 @@ func (m *ConnectProxyConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ConnectProxyConfig proto.InternalMessageInfo +func (m *ConnectProxyConfig) GetDestinationServiceName() string { + if m != nil { + return m.DestinationServiceName + } + return "" +} + +func (m *ConnectProxyConfig) GetDestinationServiceID() string { + if m != nil { + return m.DestinationServiceID + } + return "" +} + +func (m *ConnectProxyConfig) GetLocalServiceAddress() string { + if m != nil { + return m.LocalServiceAddress + } + return "" +} + +func (m *ConnectProxyConfig) GetLocalServicePort() int32 { + if m != nil { + return m.LocalServicePort + } + return 0 +} + +func (m *ConnectProxyConfig) GetConfig() *_struct.Struct { + if m != nil { + return m.Config + } + return nil +} + +func (m *ConnectProxyConfig) GetUpstreams() []*Upstream { + if m != nil { + return m.Upstreams + } + return nil +} + +func (m *ConnectProxyConfig) GetMeshGateway() *MeshGatewayConfig { + if m != nil { + return m.MeshGateway + } + return nil +} + +func (m *ConnectProxyConfig) GetExpose() *ExposeConfig { + if m != nil { + return m.Expose + } + return nil +} + +func (m *ConnectProxyConfig) GetMode() string { + if m != nil { + return m.Mode + } + return "" +} + +func (m *ConnectProxyConfig) GetTransparentProxy() *TransparentProxyConfig { + if m != nil { + return m.TransparentProxy + } + return nil +} + +func (m *ConnectProxyConfig) GetLocalServiceSocketPath() string { + if m != nil { + return m.LocalServiceSocketPath + } + return "" +} + // Upstream represents a single upstream dependency for a service or proxy. It // describes the mechanism used to discover instances to communicate with (the // Target) as well as any potential client configuration that may be useful such @@ -150,16 +219,19 @@ type Upstream struct { // It can be used to pass arbitrary configuration for this specific upstream // to the proxy. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - Config *types.Struct `protobuf:"bytes,7,opt,name=Config,proto3" json:"Config,omitempty"` + Config *_struct.Struct `protobuf:"bytes,7,opt,name=Config,proto3" json:"Config,omitempty"` // MeshGateway is the configuration for mesh gateway usage of this upstream - MeshGateway MeshGatewayConfig `protobuf:"bytes,8,opt,name=MeshGateway,proto3" json:"MeshGateway"` + MeshGateway *MeshGatewayConfig `protobuf:"bytes,8,opt,name=MeshGateway,proto3" json:"MeshGateway,omitempty"` // CentrallyConfigured indicates whether the upstream was defined in a proxy // instance registration or whether it was generated from a config entry. CentrallyConfigured bool `protobuf:"varint,9,opt,name=CentrallyConfigured,proto3" json:"CentrallyConfigured,omitempty"` // LocalBindSocketPath is the socket to create to connect to the upstream service - LocalBindSocketPath string `protobuf:"bytes,10,opt,name=LocalBindSocketPath,proto3" json:"LocalBindSocketPath,omitempty"` - LocalBindSocketMode string `protobuf:"bytes,11,opt,name=LocalBindSocketMode,proto3" json:"LocalBindSocketMode,omitempty"` - DestinationPartition string `protobuf:"bytes,12,opt,name=DestinationPartition,proto3" json:"DestinationPartition,omitempty"` + LocalBindSocketPath string `protobuf:"bytes,10,opt,name=LocalBindSocketPath,proto3" json:"LocalBindSocketPath,omitempty"` + LocalBindSocketMode string `protobuf:"bytes,11,opt,name=LocalBindSocketMode,proto3" json:"LocalBindSocketMode,omitempty"` + DestinationPartition string `protobuf:"bytes,12,opt,name=DestinationPartition,proto3" json:"DestinationPartition,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Upstream) Reset() { *m = Upstream{} } @@ -168,26 +240,18 @@ func (*Upstream) ProtoMessage() {} func (*Upstream) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{1} } + func (m *Upstream) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Upstream.Unmarshal(m, b) } func (m *Upstream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Upstream.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Upstream.Marshal(b, m, deterministic) } func (m *Upstream) XXX_Merge(src proto.Message) { xxx_messageInfo_Upstream.Merge(m, src) } func (m *Upstream) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Upstream.Size(m) } func (m *Upstream) XXX_DiscardUnknown() { xxx_messageInfo_Upstream.DiscardUnknown(m) @@ -195,6 +259,90 @@ func (m *Upstream) XXX_DiscardUnknown() { var xxx_messageInfo_Upstream proto.InternalMessageInfo +func (m *Upstream) GetDestinationType() string { + if m != nil { + return m.DestinationType + } + return "" +} + +func (m *Upstream) GetDestinationNamespace() string { + if m != nil { + return m.DestinationNamespace + } + return "" +} + +func (m *Upstream) GetDestinationName() string { + if m != nil { + return m.DestinationName + } + return "" +} + +func (m *Upstream) GetDatacenter() string { + if m != nil { + return m.Datacenter + } + return "" +} + +func (m *Upstream) GetLocalBindAddress() string { + if m != nil { + return m.LocalBindAddress + } + return "" +} + +func (m *Upstream) GetLocalBindPort() int32 { + if m != nil { + return m.LocalBindPort + } + return 0 +} + +func (m *Upstream) GetConfig() *_struct.Struct { + if m != nil { + return m.Config + } + return nil +} + +func (m *Upstream) GetMeshGateway() *MeshGatewayConfig { + if m != nil { + return m.MeshGateway + } + return nil +} + +func (m *Upstream) GetCentrallyConfigured() bool { + if m != nil { + return m.CentrallyConfigured + } + return false +} + +func (m *Upstream) GetLocalBindSocketPath() string { + if m != nil { + return m.LocalBindSocketPath + } + return "" +} + +func (m *Upstream) GetLocalBindSocketMode() string { + if m != nil { + return m.LocalBindSocketMode + } + return "" +} + +func (m *Upstream) GetDestinationPartition() string { + if m != nil { + return m.DestinationPartition + } + return "" +} + // ServiceConnect are the shared Connect settings between all service // definitions from the agent to the state store. // mog annotation: @@ -213,7 +361,10 @@ type ServiceConnect struct { // result is identical to just making a second service registration via any // other means. // mog: func-to=ServiceDefinitionPtrToStructs func-from=NewServiceDefinitionPtrFromStructs - SidecarService *ServiceDefinition `protobuf:"bytes,3,opt,name=SidecarService,proto3" json:"SidecarService,omitempty"` + SidecarService *ServiceDefinition `protobuf:"bytes,3,opt,name=SidecarService,proto3" json:"SidecarService,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ServiceConnect) Reset() { *m = ServiceConnect{} } @@ -222,26 +373,18 @@ func (*ServiceConnect) ProtoMessage() {} func (*ServiceConnect) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{2} } + func (m *ServiceConnect) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ServiceConnect.Unmarshal(m, b) } func (m *ServiceConnect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceConnect.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ServiceConnect.Marshal(b, m, deterministic) } func (m *ServiceConnect) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceConnect.Merge(m, src) } func (m *ServiceConnect) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ServiceConnect.Size(m) } func (m *ServiceConnect) XXX_DiscardUnknown() { xxx_messageInfo_ServiceConnect.DiscardUnknown(m) @@ -249,6 +392,20 @@ func (m *ServiceConnect) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceConnect proto.InternalMessageInfo +func (m *ServiceConnect) GetNative() bool { + if m != nil { + return m.Native + } + return false +} + +func (m *ServiceConnect) GetSidecarService() *ServiceDefinition { + if m != nil { + return m.SidecarService + } + return nil +} + // ExposeConfig describes HTTP paths to expose through Envoy outside of Connect. // Users can expose individual paths and/or all HTTP/GRPC paths for checks. // @@ -263,7 +420,10 @@ type ExposeConfig struct { Checks bool `protobuf:"varint,1,opt,name=Checks,proto3" json:"Checks,omitempty"` // Paths is the list of paths exposed through the proxy. // mog: func-to=ExposePathSliceToStructs func-from=NewExposePathSliceFromStructs - Paths []ExposePath `protobuf:"bytes,2,rep,name=Paths,proto3" json:"Paths"` + Paths []*ExposePath `protobuf:"bytes,2,rep,name=Paths,proto3" json:"Paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ExposeConfig) Reset() { *m = ExposeConfig{} } @@ -272,26 +432,18 @@ func (*ExposeConfig) ProtoMessage() {} func (*ExposeConfig) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{3} } + func (m *ExposeConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ExposeConfig.Unmarshal(m, b) } func (m *ExposeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExposeConfig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ExposeConfig.Marshal(b, m, deterministic) } func (m *ExposeConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ExposeConfig.Merge(m, src) } func (m *ExposeConfig) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ExposeConfig.Size(m) } func (m *ExposeConfig) XXX_DiscardUnknown() { xxx_messageInfo_ExposeConfig.DiscardUnknown(m) @@ -299,6 +451,20 @@ func (m *ExposeConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ExposeConfig proto.InternalMessageInfo +func (m *ExposeConfig) GetChecks() bool { + if m != nil { + return m.Checks + } + return false +} + +func (m *ExposeConfig) GetPaths() []*ExposePath { + if m != nil { + return m.Paths + } + return nil +} + // mog annotation: // // target=github.com/hashicorp/consul/agent/structs.ExposePath @@ -317,7 +483,10 @@ type ExposePath struct { // Valid values are "http" and "http2", defaults to "http" Protocol string `protobuf:"bytes,4,opt,name=Protocol,proto3" json:"Protocol,omitempty"` // ParsedFromCheck is set if this path was parsed from a registered check - ParsedFromCheck bool `protobuf:"varint,5,opt,name=ParsedFromCheck,proto3" json:"ParsedFromCheck,omitempty"` + ParsedFromCheck bool `protobuf:"varint,5,opt,name=ParsedFromCheck,proto3" json:"ParsedFromCheck,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ExposePath) Reset() { *m = ExposePath{} } @@ -326,26 +495,18 @@ func (*ExposePath) ProtoMessage() {} func (*ExposePath) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{4} } + func (m *ExposePath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ExposePath.Unmarshal(m, b) } func (m *ExposePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExposePath.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ExposePath.Marshal(b, m, deterministic) } func (m *ExposePath) XXX_Merge(src proto.Message) { xxx_messageInfo_ExposePath.Merge(m, src) } func (m *ExposePath) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ExposePath.Size(m) } func (m *ExposePath) XXX_DiscardUnknown() { xxx_messageInfo_ExposePath.DiscardUnknown(m) @@ -353,13 +514,52 @@ func (m *ExposePath) XXX_DiscardUnknown() { var xxx_messageInfo_ExposePath proto.InternalMessageInfo +func (m *ExposePath) GetListenerPort() int32 { + if m != nil { + return m.ListenerPort + } + return 0 +} + +func (m *ExposePath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *ExposePath) GetLocalPathPort() int32 { + if m != nil { + return m.LocalPathPort + } + return 0 +} + +func (m *ExposePath) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func (m *ExposePath) GetParsedFromCheck() bool { + if m != nil { + return m.ParsedFromCheck + } + return false +} + // mog annotation: // // target=github.com/hashicorp/consul/agent/structs.MeshGatewayConfig // output=service.gen.go // name=Structs type MeshGatewayConfig struct { - Mode github_com_hashicorp_consul_agent_structs.MeshGatewayMode `protobuf:"bytes,1,opt,name=Mode,proto3,casttype=github.com/hashicorp/consul/agent/structs.MeshGatewayMode" json:"Mode,omitempty"` + // mog: func-to=structs.MeshGatewayMode func-from=string + Mode string `protobuf:"bytes,1,opt,name=Mode,proto3" json:"Mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *MeshGatewayConfig) Reset() { *m = MeshGatewayConfig{} } @@ -368,26 +568,18 @@ func (*MeshGatewayConfig) ProtoMessage() {} func (*MeshGatewayConfig) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{5} } + func (m *MeshGatewayConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_MeshGatewayConfig.Unmarshal(m, b) } func (m *MeshGatewayConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MeshGatewayConfig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_MeshGatewayConfig.Marshal(b, m, deterministic) } func (m *MeshGatewayConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_MeshGatewayConfig.Merge(m, src) } func (m *MeshGatewayConfig) XXX_Size() int { - return m.Size() + return xxx_messageInfo_MeshGatewayConfig.Size(m) } func (m *MeshGatewayConfig) XXX_DiscardUnknown() { xxx_messageInfo_MeshGatewayConfig.DiscardUnknown(m) @@ -395,6 +587,13 @@ func (m *MeshGatewayConfig) XXX_DiscardUnknown() { var xxx_messageInfo_MeshGatewayConfig proto.InternalMessageInfo +func (m *MeshGatewayConfig) GetMode() string { + if m != nil { + return m.Mode + } + return "" +} + // mog annotation: // // target=github.com/hashicorp/consul/agent/structs.TransparentProxyConfig @@ -406,7 +605,10 @@ type TransparentProxyConfig struct { // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly. // The discovery chain is not considered when dialing a service instance directly. // This setting is useful when addressing stateful services, such as a database cluster with a leader node. - DialedDirectly bool `protobuf:"varint,2,opt,name=DialedDirectly,proto3" json:"DialedDirectly,omitempty"` + DialedDirectly bool `protobuf:"varint,2,opt,name=DialedDirectly,proto3" json:"DialedDirectly,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TransparentProxyConfig) Reset() { *m = TransparentProxyConfig{} } @@ -415,26 +617,18 @@ func (*TransparentProxyConfig) ProtoMessage() {} func (*TransparentProxyConfig) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{6} } + func (m *TransparentProxyConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_TransparentProxyConfig.Unmarshal(m, b) } func (m *TransparentProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransparentProxyConfig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_TransparentProxyConfig.Marshal(b, m, deterministic) } func (m *TransparentProxyConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_TransparentProxyConfig.Merge(m, src) } func (m *TransparentProxyConfig) XXX_Size() int { - return m.Size() + return xxx_messageInfo_TransparentProxyConfig.Size(m) } func (m *TransparentProxyConfig) XXX_DiscardUnknown() { xxx_messageInfo_TransparentProxyConfig.DiscardUnknown(m) @@ -442,6 +636,20 @@ func (m *TransparentProxyConfig) XXX_DiscardUnknown() { var xxx_messageInfo_TransparentProxyConfig proto.InternalMessageInfo +func (m *TransparentProxyConfig) GetOutboundListenerPort() int32 { + if m != nil { + return m.OutboundListenerPort + } + return 0 +} + +func (m *TransparentProxyConfig) GetDialedDirectly() bool { + if m != nil { + return m.DialedDirectly + } + return false +} + // ServiceDefinition is used to JSON decode the Service definitions. For // documentation on specific fields see NodeService which is better documented. // @@ -451,19 +659,20 @@ var xxx_messageInfo_TransparentProxyConfig proto.InternalMessageInfo // output=service.gen.go // name=Structs type ServiceDefinition struct { - Kind github_com_hashicorp_consul_agent_structs.ServiceKind `protobuf:"bytes,1,opt,name=Kind,proto3,casttype=github.com/hashicorp/consul/agent/structs.ServiceKind" json:"Kind,omitempty"` - ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` - Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` - Address string `protobuf:"bytes,5,opt,name=Address,proto3" json:"Address,omitempty"` + // mog: func-to=structs.ServiceKind func-from=string + Kind string `protobuf:"bytes,1,opt,name=Kind,proto3" json:"Kind,omitempty"` + ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` + Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` + Address string `protobuf:"bytes,5,opt,name=Address,proto3" json:"Address,omitempty"` // mog: func-to=MapStringServiceAddressToStructs func-from=NewMapStringServiceAddressFromStructs - TaggedAddresses map[string]ServiceAddress `protobuf:"bytes,16,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaggedAddresses map[string]*ServiceAddress `protobuf:"bytes,16,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // mog: func-to=int func-from=int32 Port int32 `protobuf:"varint,7,opt,name=Port,proto3" json:"Port,omitempty"` // Path for socket - SocketPath string `protobuf:"bytes,18,opt,name=SocketPath,proto3" json:"SocketPath,omitempty"` - Check CheckType `protobuf:"bytes,8,opt,name=Check,proto3" json:"Check"` + SocketPath string `protobuf:"bytes,18,opt,name=SocketPath,proto3" json:"SocketPath,omitempty"` + Check *CheckType `protobuf:"bytes,8,opt,name=Check,proto3" json:"Check,omitempty"` // mog: func-to=CheckTypesToStructs func-from=NewCheckTypesFromStructs Checks []*CheckType `protobuf:"bytes,9,rep,name=Checks,proto3" json:"Checks,omitempty"` // mog: func-to=WeightsPtrToStructs func-from=NewWeightsPtrFromStructs @@ -483,9 +692,12 @@ type ServiceDefinition struct { // mog: func-to=ConnectProxyConfigPtrToStructs func-from=NewConnectProxyConfigPtrFromStructs Proxy *ConnectProxyConfig `protobuf:"bytes,14,opt,name=Proxy,proto3" json:"Proxy,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - EnterpriseMeta pbcommongogo.EnterpriseMeta `protobuf:"bytes,17,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"` + EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,17,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=ServiceConnectPtrToStructs func-from=NewServiceConnectPtrFromStructs - Connect *ServiceConnect `protobuf:"bytes,15,opt,name=Connect,proto3" json:"Connect,omitempty"` + Connect *ServiceConnect `protobuf:"bytes,15,opt,name=Connect,proto3" json:"Connect,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ServiceDefinition) Reset() { *m = ServiceDefinition{} } @@ -494,26 +706,18 @@ func (*ServiceDefinition) ProtoMessage() {} func (*ServiceDefinition) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{7} } + func (m *ServiceDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ServiceDefinition.Unmarshal(m, b) } func (m *ServiceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceDefinition.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ServiceDefinition.Marshal(b, m, deterministic) } func (m *ServiceDefinition) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceDefinition.Merge(m, src) } func (m *ServiceDefinition) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ServiceDefinition.Size(m) } func (m *ServiceDefinition) XXX_DiscardUnknown() { xxx_messageInfo_ServiceDefinition.DiscardUnknown(m) @@ -521,11 +725,133 @@ func (m *ServiceDefinition) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceDefinition proto.InternalMessageInfo +func (m *ServiceDefinition) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *ServiceDefinition) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *ServiceDefinition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ServiceDefinition) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *ServiceDefinition) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ServiceDefinition) GetTaggedAddresses() map[string]*ServiceAddress { + if m != nil { + return m.TaggedAddresses + } + return nil +} + +func (m *ServiceDefinition) GetMeta() map[string]string { + if m != nil { + return m.Meta + } + return nil +} + +func (m *ServiceDefinition) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ServiceDefinition) GetSocketPath() string { + if m != nil { + return m.SocketPath + } + return "" +} + +func (m *ServiceDefinition) GetCheck() *CheckType { + if m != nil { + return m.Check + } + return nil +} + +func (m *ServiceDefinition) GetChecks() []*CheckType { + if m != nil { + return m.Checks + } + return nil +} + +func (m *ServiceDefinition) GetWeights() *Weights { + if m != nil { + return m.Weights + } + return nil +} + +func (m *ServiceDefinition) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *ServiceDefinition) GetEnableTagOverride() bool { + if m != nil { + return m.EnableTagOverride + } + return false +} + +func (m *ServiceDefinition) GetProxy() *ConnectProxyConfig { + if m != nil { + return m.Proxy + } + return nil +} + +func (m *ServiceDefinition) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if m != nil { + return m.EnterpriseMeta + } + return nil +} + +func (m *ServiceDefinition) GetConnect() *ServiceConnect { + if m != nil { + return m.Connect + } + return nil +} + // Type to hold an address and port of a service type ServiceAddress struct { Address string `protobuf:"bytes,1,opt,name=Address,proto3" json:"Address,omitempty"` // mog: func-to=int func-from=int32 - Port int32 `protobuf:"varint,2,opt,name=Port,proto3" json:"Port,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=Port,proto3" json:"Port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ServiceAddress) Reset() { *m = ServiceAddress{} } @@ -534,26 +860,18 @@ func (*ServiceAddress) ProtoMessage() {} func (*ServiceAddress) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{8} } + func (m *ServiceAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ServiceAddress.Unmarshal(m, b) } func (m *ServiceAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceAddress.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ServiceAddress.Marshal(b, m, deterministic) } func (m *ServiceAddress) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceAddress.Merge(m, src) } func (m *ServiceAddress) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ServiceAddress.Size(m) } func (m *ServiceAddress) XXX_DiscardUnknown() { xxx_messageInfo_ServiceAddress.DiscardUnknown(m) @@ -561,12 +879,29 @@ func (m *ServiceAddress) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceAddress proto.InternalMessageInfo +func (m *ServiceAddress) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ServiceAddress) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + // Weights represent the weight used by DNS for a given status type Weights struct { // mog: func-to=int func-from=int32 Passing int32 `protobuf:"varint,1,opt,name=Passing,proto3" json:"Passing,omitempty"` // mog: func-to=int func-from=int32 - Warning int32 `protobuf:"varint,2,opt,name=Warning,proto3" json:"Warning,omitempty"` + Warning int32 `protobuf:"varint,2,opt,name=Warning,proto3" json:"Warning,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Weights) Reset() { *m = Weights{} } @@ -575,26 +910,18 @@ func (*Weights) ProtoMessage() {} func (*Weights) Descriptor() ([]byte, []int) { return fileDescriptor_cbb99233b75fb80b, []int{9} } + func (m *Weights) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Weights.Unmarshal(m, b) } func (m *Weights) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Weights.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Weights.Marshal(b, m, deterministic) } func (m *Weights) XXX_Merge(src proto.Message) { xxx_messageInfo_Weights.Merge(m, src) } func (m *Weights) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Weights.Size(m) } func (m *Weights) XXX_DiscardUnknown() { xxx_messageInfo_Weights.DiscardUnknown(m) @@ -602,6 +929,20 @@ func (m *Weights) XXX_DiscardUnknown() { var xxx_messageInfo_Weights proto.InternalMessageInfo +func (m *Weights) GetPassing() int32 { + if m != nil { + return m.Passing + } + return 0 +} + +func (m *Weights) GetWarning() int32 { + if m != nil { + return m.Warning + } + return 0 +} + func init() { proto.RegisterType((*ConnectProxyConfig)(nil), "pbservice.ConnectProxyConfig") proto.RegisterType((*Upstream)(nil), "pbservice.Upstream") @@ -612,3554 +953,83 @@ func init() { proto.RegisterType((*TransparentProxyConfig)(nil), "pbservice.TransparentProxyConfig") proto.RegisterType((*ServiceDefinition)(nil), "pbservice.ServiceDefinition") proto.RegisterMapType((map[string]string)(nil), "pbservice.ServiceDefinition.MetaEntry") - proto.RegisterMapType((map[string]ServiceAddress)(nil), "pbservice.ServiceDefinition.TaggedAddressesEntry") + proto.RegisterMapType((map[string]*ServiceAddress)(nil), "pbservice.ServiceDefinition.TaggedAddressesEntry") proto.RegisterType((*ServiceAddress)(nil), "pbservice.ServiceAddress") proto.RegisterType((*Weights)(nil), "pbservice.Weights") } -func init() { proto.RegisterFile("proto/pbservice/service.proto", fileDescriptor_cbb99233b75fb80b) } +func init() { + proto.RegisterFile("proto/pbservice/service.proto", fileDescriptor_cbb99233b75fb80b) +} var fileDescriptor_cbb99233b75fb80b = []byte{ - // 1216 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x96, 0xdf, 0x6e, 0x13, 0x47, - 0x17, 0xc0, 0xb3, 0x89, 0x1d, 0xdb, 0x27, 0x7c, 0x21, 0x19, 0xf2, 0xd1, 0xad, 0x05, 0x4e, 0xb0, - 0x2a, 0x14, 0xb5, 0xc8, 0x06, 0x22, 0x4a, 0x41, 0xa2, 0x52, 0x13, 0xa7, 0x2d, 0x82, 0x80, 0xbb, - 0x71, 0x85, 0x5a, 0xa9, 0x17, 0xe3, 0xf5, 0x64, 0xbd, 0x8a, 0xbd, 0x63, 0xcd, 0x8c, 0x53, 0xfc, - 0x16, 0xbd, 0xec, 0x0b, 0x70, 0xdf, 0xc7, 0x40, 0xea, 0x0d, 0x97, 0xbd, 0x8a, 0x5a, 0xf2, 0x16, - 0x5c, 0x55, 0x73, 0x66, 0x76, 0xbd, 0xde, 0xdd, 0x22, 0xda, 0x2b, 0xcf, 0x9c, 0x7f, 0x33, 0x7b, - 0xce, 0xef, 0x9c, 0x31, 0x5c, 0x9f, 0x08, 0xae, 0x78, 0x7b, 0xd2, 0x97, 0x4c, 0x9c, 0x85, 0x3e, - 0x6b, 0xdb, 0xdf, 0x16, 0xca, 0x49, 0x2d, 0x51, 0xd4, 0xaf, 0x05, 0x9c, 0x07, 0x23, 0xd6, 0x46, - 0x45, 0x7f, 0x7a, 0xd2, 0x96, 0x4a, 0x4c, 0x7d, 0x65, 0x0c, 0xeb, 0xdb, 0x71, 0x1c, 0x9f, 0x8f, - 0xc7, 0x3c, 0x0a, 0x78, 0xc0, 0xdb, 0x66, 0x69, 0x0d, 0x6e, 0x64, 0x0f, 0x1a, 0x32, 0x3a, 0x52, - 0x43, 0x7f, 0xc8, 0xfc, 0x53, 0x6b, 0xb2, 0xa5, 0xbd, 0x8c, 0x99, 0x5e, 0x19, 0x69, 0xf3, 0x55, - 0x19, 0xc8, 0x01, 0x8f, 0x22, 0xe6, 0xab, 0xae, 0xe0, 0x2f, 0x67, 0x07, 0x3c, 0x3a, 0x09, 0x03, - 0xf2, 0x39, 0x5c, 0xed, 0x30, 0xa9, 0xc2, 0x88, 0xaa, 0x90, 0x47, 0xc7, 0x26, 0xe8, 0x33, 0x3a, - 0x66, 0xae, 0xb3, 0xe3, 0xec, 0xd6, 0xbc, 0x7f, 0xd0, 0x92, 0xbb, 0xb0, 0x95, 0xd7, 0x3c, 0xee, - 0xb8, 0xcb, 0xe8, 0x55, 0xa8, 0x23, 0xb7, 0xe1, 0xca, 0x53, 0xee, 0xd3, 0x91, 0x95, 0x7c, 0x35, - 0x18, 0x08, 0x26, 0xa5, 0xbb, 0x82, 0x2e, 0x45, 0x2a, 0xf2, 0x29, 0x6c, 0xa4, 0xc5, 0x5d, 0x2e, - 0x94, 0x5b, 0xda, 0x71, 0x76, 0xcb, 0x5e, 0x4e, 0x4e, 0xee, 0xc1, 0xaa, 0xf9, 0x26, 0xb7, 0xbc, - 0xe3, 0xec, 0xae, 0xdd, 0xfd, 0xa8, 0x65, 0x32, 0xdd, 0x8a, 0x33, 0xdd, 0x3a, 0xc6, 0x4c, 0xef, - 0x97, 0x5e, 0x9f, 0x6f, 0x3b, 0x9e, 0x35, 0x26, 0xf7, 0xa1, 0xf6, 0xfd, 0x44, 0x2a, 0xc1, 0xe8, - 0x58, 0xba, 0xab, 0x3b, 0x2b, 0xbb, 0x6b, 0x77, 0xaf, 0xb4, 0x92, 0xf4, 0xb6, 0x62, 0x1d, 0x7a, - 0x2d, 0x79, 0x73, 0x5b, 0xd2, 0x81, 0xb5, 0x23, 0x26, 0x87, 0xdf, 0x50, 0xc5, 0x7e, 0xa6, 0x33, - 0xb7, 0x82, 0x87, 0x5e, 0x4b, 0xb9, 0xa6, 0xb4, 0xe6, 0x2c, 0x1b, 0x23, 0xed, 0xa6, 0x6f, 0x7d, - 0xf8, 0x72, 0xc2, 0x25, 0x73, 0xab, 0xf6, 0xd6, 0xf3, 0x00, 0x46, 0xb1, 0xe0, 0x6b, 0x8d, 0xc9, - 0x13, 0x28, 0x1d, 0xf1, 0x01, 0x73, 0x6b, 0x3a, 0x77, 0xfb, 0xf7, 0xdf, 0x9d, 0x6f, 0xef, 0x05, - 0xa1, 0x1a, 0x4e, 0xfb, 0x2d, 0x9f, 0x8f, 0xdb, 0x43, 0x2a, 0x87, 0xa1, 0xcf, 0xc5, 0xa4, 0xed, - 0xf3, 0x48, 0x4e, 0x47, 0x6d, 0x1a, 0xb0, 0x48, 0x59, 0xd2, 0x64, 0x0b, 0xeb, 0xaf, 0xdd, 0x3d, - 0x0c, 0x42, 0x8e, 0x61, 0xa3, 0x27, 0x68, 0x24, 0x27, 0x54, 0xb0, 0xc8, 0xd0, 0xe1, 0x02, 0xde, - 0xe6, 0x46, 0xea, 0x36, 0x59, 0x93, 0x85, 0x7b, 0xe5, 0x02, 0x68, 0xb0, 0xd2, 0x25, 0x3a, 0xe6, - 0xfe, 0x29, 0x53, 0x5d, 0xaa, 0x86, 0xee, 0x9a, 0x01, 0xab, 0x58, 0xdb, 0xfc, 0xbd, 0x04, 0xd5, - 0x38, 0xc9, 0x64, 0x17, 0x2e, 0xa7, 0x48, 0xea, 0xcd, 0x26, 0x31, 0x96, 0x59, 0x71, 0x86, 0x47, - 0x8d, 0xa8, 0x9c, 0x50, 0x9f, 0x15, 0xf0, 0x98, 0xe8, 0x32, 0xd1, 0x11, 0xfa, 0x95, 0x5c, 0x74, - 0xa4, 0xbd, 0x01, 0xd0, 0xa1, 0x8a, 0xfa, 0x2c, 0x52, 0x4c, 0x20, 0x81, 0x35, 0x2f, 0x25, 0x49, - 0x38, 0xdd, 0x0f, 0xa3, 0x41, 0x8c, 0x75, 0x19, 0xad, 0x72, 0x72, 0xf2, 0x09, 0xfc, 0x2f, 0x91, - 0x21, 0xd0, 0xab, 0x08, 0xf4, 0xa2, 0x30, 0x45, 0x73, 0xe5, 0xdf, 0xd0, 0x9c, 0x81, 0xb2, 0xfa, - 0xdf, 0xa0, 0xbc, 0x0d, 0x57, 0x0e, 0x58, 0xa4, 0x04, 0x1d, 0x8d, 0xac, 0xd5, 0x54, 0xb0, 0x01, - 0xc2, 0x56, 0xf5, 0x8a, 0x54, 0x49, 0x6b, 0xeb, 0xfb, 0xa7, 0x4a, 0x0d, 0xa9, 0xd6, 0x5e, 0x54, - 0x15, 0x78, 0x20, 0xd0, 0x6b, 0x85, 0x1e, 0x88, 0xe9, 0x62, 0x89, 0xbb, 0x54, 0xa8, 0x50, 0x2f, - 0xdc, 0x4b, 0xb9, 0x12, 0x27, 0xba, 0x66, 0x04, 0xeb, 0x16, 0x31, 0x3b, 0xfb, 0xc8, 0x55, 0x58, - 0x7d, 0x46, 0x55, 0x78, 0x66, 0x48, 0xaa, 0x7a, 0x76, 0x47, 0x3a, 0xb0, 0x7e, 0x1c, 0x0e, 0x98, - 0x4f, 0x85, 0x75, 0x40, 0x16, 0x16, 0x93, 0x67, 0x35, 0x1d, 0x76, 0x12, 0x46, 0x18, 0xdf, 0xcb, - 0xf8, 0x34, 0x7f, 0x80, 0x4b, 0xe9, 0xae, 0xd5, 0xa7, 0x1d, 0xe8, 0xd1, 0x2c, 0xe3, 0xd3, 0xcc, - 0x8e, 0xdc, 0x81, 0xb2, 0xce, 0x82, 0x74, 0x97, 0x71, 0xe2, 0xfc, 0x3f, 0xd7, 0xf5, 0x5a, 0x6b, - 0x4b, 0x63, 0x2c, 0x9b, 0xbf, 0x39, 0x00, 0x73, 0x1d, 0x69, 0xc2, 0xa5, 0xa7, 0xa1, 0x54, 0x2c, - 0x62, 0x02, 0x29, 0x72, 0x90, 0xa2, 0x05, 0x19, 0x21, 0x50, 0xc2, 0x32, 0x98, 0x26, 0xc0, 0x75, - 0x82, 0x9f, 0xde, 0xa0, 0xe3, 0x4a, 0x0a, 0xbf, 0x58, 0x48, 0xea, 0x50, 0xed, 0x6a, 0xd0, 0x7c, - 0x3e, 0xb2, 0xb8, 0x27, 0x7b, 0xdd, 0x36, 0x5d, 0x2a, 0x24, 0x1b, 0x7c, 0x2d, 0xf8, 0x18, 0xbf, - 0x07, 0x59, 0xaf, 0x7a, 0x59, 0x71, 0xf3, 0x04, 0x36, 0x73, 0xbc, 0x91, 0xef, 0xec, 0xe8, 0xc2, - 0x46, 0xde, 0x7f, 0xf4, 0xee, 0x7c, 0xfb, 0xc1, 0x87, 0x8f, 0xae, 0x54, 0xb8, 0xf9, 0x00, 0x6b, - 0x2a, 0xb8, 0x5a, 0x3c, 0x9d, 0x34, 0x33, 0xcf, 0xa7, 0xaa, 0xcf, 0xa7, 0xd1, 0xa0, 0x20, 0x5b, - 0x85, 0x3a, 0x72, 0x13, 0xd6, 0x3b, 0x21, 0x1d, 0xb1, 0x41, 0x27, 0x14, 0xcc, 0x57, 0xa3, 0x19, - 0xe6, 0xaf, 0xea, 0x65, 0xa4, 0xcd, 0x57, 0x15, 0xd8, 0xcc, 0x11, 0x41, 0x8e, 0xa0, 0xf4, 0x24, - 0x8c, 0x06, 0xf6, 0xf3, 0x1e, 0xbc, 0x3b, 0xdf, 0xbe, 0xf7, 0xe1, 0x9f, 0x67, 0xc3, 0xe9, 0x00, - 0x1e, 0x86, 0x21, 0xeb, 0xb0, 0x9c, 0xbc, 0xaa, 0xcb, 0x8f, 0x3b, 0xba, 0xa4, 0xa9, 0x41, 0x85, - 0x6b, 0x2d, 0xeb, 0xd1, 0x40, 0xba, 0xa5, 0x9d, 0x15, 0x2d, 0xd3, 0x6b, 0xe2, 0x42, 0x65, 0x71, - 0x10, 0xc5, 0x5b, 0x42, 0xe1, 0x72, 0x8f, 0x06, 0x01, 0x8b, 0x07, 0x12, 0x93, 0xee, 0x06, 0x42, - 0x78, 0xe7, 0x7d, 0xa4, 0xb7, 0x32, 0x3e, 0x87, 0x91, 0x12, 0x33, 0x0b, 0x68, 0x36, 0x1e, 0x79, - 0x08, 0xa5, 0x23, 0xa6, 0xa8, 0x7d, 0x4e, 0x6f, 0xbe, 0x37, 0xae, 0x36, 0xc4, 0x60, 0x1e, 0xfa, - 0x20, 0xb3, 0xba, 0x42, 0x15, 0xac, 0x10, 0xae, 0xf5, 0xf8, 0x4d, 0x0d, 0x15, 0x62, 0xc6, 0xef, - 0xc2, 0x2c, 0x29, 0x1b, 0x0e, 0xcd, 0xbc, 0xdb, 0x4a, 0x1d, 0x88, 0x72, 0xfd, 0x42, 0xc4, 0xcd, - 0x84, 0x02, 0x72, 0x2b, 0xe9, 0xcb, 0x1a, 0xde, 0xb1, 0xd0, 0x25, 0xe9, 0xd6, 0x5b, 0x50, 0x79, - 0xc1, 0xc2, 0x60, 0xa8, 0xa4, 0x7d, 0x17, 0x49, 0xca, 0xdc, 0x6a, 0xbc, 0xd8, 0x84, 0x6c, 0x41, - 0xb9, 0xc7, 0x4f, 0x59, 0x64, 0x67, 0x99, 0xd9, 0x90, 0x5b, 0xb0, 0x79, 0x18, 0xd1, 0xfe, 0x88, - 0xf5, 0x68, 0xf0, 0xfc, 0x8c, 0x09, 0x11, 0x0e, 0x18, 0x8e, 0xae, 0xaa, 0x97, 0x57, 0x90, 0x3d, - 0x28, 0x9b, 0x77, 0x78, 0x1d, 0xcf, 0xbb, 0x9e, 0xbe, 0x5e, 0xee, 0x4f, 0x9c, 0x67, 0x6c, 0xc9, - 0xb7, 0xb0, 0x7e, 0xa8, 0x9f, 0xa3, 0x89, 0x08, 0x25, 0xc3, 0x02, 0x6c, 0xa2, 0x77, 0xbd, 0x35, - 0xff, 0x37, 0xd9, 0x5a, 0xb4, 0xb0, 0x59, 0xc9, 0xf8, 0x91, 0x3d, 0xa8, 0xd8, 0x63, 0xdc, 0xcb, - 0x18, 0xe2, 0xe3, 0x7c, 0x0d, 0xad, 0x81, 0x17, 0x5b, 0xd6, 0x7f, 0x82, 0xad, 0x22, 0x48, 0xc8, - 0x06, 0xac, 0x9c, 0xb2, 0x99, 0x7d, 0xb8, 0xf5, 0x92, 0xb4, 0xa1, 0x7c, 0x46, 0x47, 0x53, 0xf3, - 0x3a, 0x17, 0x06, 0xb7, 0x21, 0x3c, 0x63, 0xf7, 0x70, 0xf9, 0x0b, 0xa7, 0x7e, 0x1f, 0x6a, 0x09, - 0x2b, 0x05, 0x31, 0xb7, 0xd2, 0x31, 0x6b, 0x29, 0xc7, 0xe6, 0x97, 0xc9, 0x1b, 0x10, 0xb7, 0x40, - 0xaa, 0x39, 0x9c, 0xc5, 0xe6, 0x88, 0xe9, 0x5b, 0x9e, 0xd3, 0xd7, 0x7c, 0x94, 0x54, 0x5f, 0x3b, - 0x76, 0xa9, 0x94, 0x61, 0x14, 0xd8, 0x09, 0x12, 0x6f, 0xb5, 0xe6, 0x05, 0x15, 0x91, 0xd6, 0x18, - 0xdf, 0x78, 0xbb, 0x7f, 0xf4, 0xfa, 0xaf, 0xc6, 0xd2, 0xeb, 0xb7, 0x0d, 0xe7, 0xcd, 0xdb, 0x86, - 0xf3, 0xe7, 0xdb, 0x86, 0xf3, 0xcb, 0x45, 0x63, 0xe9, 0xd7, 0x8b, 0xc6, 0xd2, 0x9b, 0x8b, 0xc6, - 0xd2, 0x1f, 0x17, 0x8d, 0xa5, 0x1f, 0x3f, 0x7b, 0xdf, 0x80, 0xc8, 0xfc, 0xdf, 0xef, 0xaf, 0xa2, - 0x60, 0xef, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6d, 0x83, 0xcc, 0x12, 0x72, 0x0c, 0x00, 0x00, + // 1086 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xef, 0x6e, 0x1b, 0x45, + 0x10, 0x97, 0xff, 0xdb, 0x93, 0xe0, 0x24, 0x9b, 0x10, 0x8e, 0xd0, 0xa2, 0xf4, 0x84, 0x4a, 0xd4, + 0x06, 0xbb, 0x4d, 0x24, 0x40, 0x95, 0x88, 0x44, 0xe3, 0x80, 0x2a, 0x9a, 0xd6, 0xba, 0x18, 0x55, + 0x02, 0xf1, 0x61, 0x7d, 0xb7, 0xb1, 0x4f, 0xb9, 0xec, 0x59, 0xbb, 0xeb, 0x50, 0xbf, 0x15, 0x6f, + 0xc0, 0x1b, 0xf0, 0x89, 0x07, 0x42, 0x3b, 0xbb, 0x77, 0x59, 0xdf, 0x1d, 0x15, 0x7c, 0xf2, 0xee, + 0xfc, 0xe6, 0x37, 0xbb, 0x9e, 0xf9, 0xcd, 0xec, 0xc1, 0xc3, 0x85, 0x48, 0x55, 0x3a, 0x5c, 0x4c, + 0x25, 0x13, 0x77, 0x71, 0xc8, 0x86, 0xf6, 0x77, 0x80, 0x76, 0xd2, 0xcb, 0x81, 0x83, 0x07, 0xb3, + 0x34, 0x9d, 0x25, 0x6c, 0x88, 0xc0, 0x74, 0x79, 0x3d, 0x94, 0x4a, 0x2c, 0x43, 0x65, 0x1c, 0x0f, + 0x3e, 0xcb, 0xe2, 0x84, 0xe9, 0xed, 0x6d, 0xca, 0x87, 0xe6, 0xc7, 0x82, 0x8f, 0x8a, 0x87, 0xcc, + 0x19, 0x4d, 0xd4, 0x3c, 0x9c, 0xb3, 0xf0, 0xc6, 0xb8, 0xf8, 0x7f, 0x35, 0x81, 0x9c, 0xa7, 0x9c, + 0xb3, 0x50, 0x8d, 0x45, 0xfa, 0x7e, 0x75, 0x9e, 0xf2, 0xeb, 0x78, 0x46, 0xbe, 0x86, 0xfd, 0x11, + 0x93, 0x2a, 0xe6, 0x54, 0xc5, 0x29, 0xbf, 0x32, 0xf4, 0x37, 0xf4, 0x96, 0x79, 0xb5, 0xc3, 0xda, + 0x51, 0x2f, 0xf8, 0x17, 0x94, 0x9c, 0xc0, 0x5e, 0x19, 0x79, 0x35, 0xf2, 0xea, 0xc8, 0xaa, 0xc4, + 0xc8, 0x33, 0xd8, 0x7d, 0x9d, 0x86, 0x34, 0xb1, 0x96, 0xef, 0xa3, 0x48, 0x30, 0x29, 0xbd, 0x06, + 0x52, 0xaa, 0x20, 0xf2, 0x04, 0xb6, 0x5d, 0xf3, 0x38, 0x15, 0xca, 0x6b, 0x1e, 0xd6, 0x8e, 0x5a, + 0x41, 0xc9, 0x4e, 0x86, 0xd0, 0x36, 0xff, 0xc9, 0x6b, 0x1d, 0xd6, 0x8e, 0x36, 0x4e, 0x3e, 0x19, + 0x98, 0x7c, 0x0e, 0xb2, 0x7c, 0x0e, 0xae, 0x30, 0x9f, 0x81, 0x75, 0x23, 0xcf, 0xa1, 0xf7, 0xf3, + 0x42, 0x2a, 0xc1, 0xe8, 0xad, 0xf4, 0xda, 0x87, 0x8d, 0xa3, 0x8d, 0x93, 0xdd, 0x41, 0x9e, 0xc2, + 0x41, 0x86, 0x05, 0xf7, 0x5e, 0xe4, 0x0c, 0x36, 0x2e, 0x99, 0x9c, 0xff, 0x48, 0x15, 0xfb, 0x9d, + 0xae, 0xbc, 0x0e, 0x1e, 0xf4, 0xc0, 0x21, 0x39, 0xa8, 0x39, 0x25, 0x70, 0x09, 0xfa, 0x8e, 0x17, + 0xef, 0x17, 0xa9, 0x64, 0x5e, 0xd7, 0xde, 0xf1, 0x9e, 0x6a, 0x00, 0xcb, 0xb2, 0x6e, 0x84, 0x40, + 0xf3, 0x32, 0x8d, 0x98, 0xd7, 0xc3, 0x1c, 0xe1, 0x9a, 0x5c, 0xc2, 0xf6, 0x44, 0x50, 0x2e, 0x17, + 0x54, 0x30, 0x6e, 0x8a, 0xe9, 0x01, 0x86, 0x7b, 0xe4, 0x84, 0x2b, 0xba, 0xd8, 0xc0, 0x25, 0xaa, + 0x56, 0x80, 0x9b, 0xcb, 0xab, 0x34, 0xbc, 0x61, 0x6a, 0x4c, 0xd5, 0xdc, 0xdb, 0x30, 0x0a, 0xa8, + 0x46, 0xfd, 0x3f, 0x9b, 0xd0, 0xcd, 0x32, 0x43, 0x8e, 0x60, 0xcb, 0x29, 0xf9, 0x64, 0xb5, 0xc8, + 0xf4, 0x53, 0x34, 0x17, 0x84, 0xa3, 0xb5, 0x24, 0x17, 0x34, 0x64, 0x15, 0xc2, 0xc9, 0xb1, 0x42, + 0x74, 0x54, 0x67, 0xa3, 0x14, 0x1d, 0x65, 0xf9, 0x39, 0xc0, 0x88, 0x2a, 0x1a, 0x32, 0xae, 0x98, + 0x40, 0xa9, 0xf4, 0x02, 0xc7, 0x92, 0x0b, 0xea, 0x65, 0xcc, 0xa3, 0x4c, 0x7f, 0x2d, 0xf4, 0x2a, + 0xd9, 0xc9, 0x17, 0xf0, 0x51, 0x6e, 0x43, 0xe5, 0xb5, 0x51, 0x79, 0xeb, 0x46, 0x47, 0x76, 0x9d, + 0xff, 0x26, 0xbb, 0x82, 0x86, 0xba, 0xff, 0x57, 0x43, 0xcf, 0x60, 0xf7, 0x9c, 0x71, 0x25, 0x68, + 0x92, 0x58, 0x7c, 0x29, 0x58, 0x84, 0x0a, 0xe9, 0x06, 0x55, 0x50, 0xde, 0x77, 0xfa, 0xce, 0x4e, + 0x79, 0xc1, 0xe9, 0xbb, 0x75, 0xa8, 0x82, 0x81, 0x2a, 0xdc, 0xa8, 0x64, 0xa0, 0x28, 0xd7, 0xcb, + 0x3a, 0xa6, 0x42, 0xc5, 0x7a, 0xe1, 0x6d, 0x96, 0xca, 0x9a, 0x63, 0x3e, 0x87, 0xbe, 0x95, 0x95, + 0x1d, 0x4c, 0x64, 0x1f, 0xda, 0x6f, 0xa8, 0x8a, 0xef, 0x8c, 0x7a, 0xba, 0x81, 0xdd, 0x91, 0x11, + 0xf4, 0xaf, 0xe2, 0x88, 0x85, 0x54, 0x58, 0x02, 0xd6, 0x7f, 0x3d, 0x6d, 0x16, 0x19, 0xb1, 0xeb, + 0x98, 0x63, 0xfc, 0xa0, 0xc0, 0xf1, 0xaf, 0x60, 0xd3, 0x6d, 0x32, 0x7d, 0xda, 0xb9, 0x9e, 0x90, + 0x32, 0x3b, 0xcd, 0xec, 0xc8, 0x53, 0x68, 0xe9, 0x2c, 0x48, 0xaf, 0x8e, 0x43, 0xe1, 0xe3, 0x52, + 0x93, 0x6a, 0x34, 0x30, 0x3e, 0xfe, 0x1f, 0x35, 0x80, 0x7b, 0x2b, 0xf1, 0x61, 0xf3, 0x75, 0x2c, + 0x15, 0xe3, 0x4c, 0xa0, 0x66, 0x6a, 0xa8, 0x99, 0x35, 0x9b, 0x6e, 0x6a, 0x2c, 0x80, 0x91, 0x3c, + 0xae, 0x73, 0xb1, 0xe9, 0x0d, 0x12, 0x1b, 0x8e, 0xd8, 0x32, 0x23, 0x39, 0x80, 0xee, 0x58, 0xcb, + 0x2a, 0x4c, 0x13, 0x2b, 0xee, 0x7c, 0xaf, 0x9b, 0x64, 0x4c, 0x85, 0x64, 0xd1, 0x0f, 0x22, 0xbd, + 0xc5, 0x7f, 0x82, 0xca, 0xee, 0x06, 0x45, 0xb3, 0xff, 0x25, 0xec, 0x94, 0x34, 0x96, 0x4f, 0x9a, + 0xda, 0xfd, 0xa4, 0xf1, 0x15, 0xec, 0x57, 0x8f, 0x11, 0x5d, 0xee, 0xb7, 0x4b, 0x35, 0x4d, 0x97, + 0x3c, 0xaa, 0xf8, 0xbb, 0x95, 0x18, 0x79, 0x0c, 0xfd, 0x51, 0x4c, 0x13, 0x16, 0x8d, 0x62, 0xc1, + 0x42, 0x95, 0xac, 0x30, 0x01, 0xdd, 0xa0, 0x60, 0xf5, 0xff, 0x6e, 0xc3, 0x4e, 0xa9, 0x98, 0xfa, + 0x7e, 0x3f, 0xc5, 0x3c, 0xca, 0xee, 0xa7, 0xd7, 0xa4, 0x0f, 0xf5, 0xfc, 0xc9, 0xa9, 0xbf, 0x1a, + 0x69, 0x1f, 0x67, 0x38, 0xe0, 0x5a, 0xdb, 0x26, 0x74, 0x26, 0xbd, 0xe6, 0x61, 0x43, 0xdb, 0xf4, + 0x9a, 0x78, 0xd0, 0x59, 0x6f, 0xfe, 0x6c, 0x4b, 0x7e, 0x85, 0xad, 0x09, 0x9d, 0xcd, 0x58, 0x36, + 0x04, 0x98, 0xf4, 0xb6, 0x51, 0x04, 0xcf, 0x3f, 0xa4, 0xb4, 0x41, 0x81, 0x73, 0xc1, 0x95, 0x58, + 0x05, 0xc5, 0x48, 0xe4, 0x05, 0x34, 0x2f, 0x99, 0xa2, 0xf6, 0xad, 0x79, 0xfc, 0xc1, 0x88, 0xda, + 0xd1, 0x84, 0x41, 0x0e, 0x6a, 0x46, 0x27, 0xb8, 0x83, 0x09, 0xc6, 0xb5, 0x1e, 0x76, 0x4e, 0x3b, + 0x13, 0x33, 0xec, 0x9c, 0x2e, 0x7e, 0x02, 0x2d, 0xa3, 0x03, 0x33, 0x63, 0xf6, 0x9c, 0x03, 0xd1, + 0xae, 0xe7, 0x71, 0x60, 0x5c, 0xc8, 0x71, 0xde, 0x0b, 0x3d, 0xbc, 0x5d, 0xb5, 0x73, 0xd6, 0x21, + 0xc7, 0xd0, 0x79, 0xc7, 0xe2, 0xd9, 0x5c, 0x49, 0xfb, 0xf2, 0x10, 0xc7, 0xdd, 0x22, 0x41, 0xe6, + 0x42, 0xf6, 0xa0, 0x35, 0x49, 0x6f, 0x18, 0xb7, 0xf3, 0xc3, 0x6c, 0xc8, 0x31, 0xec, 0x5c, 0x70, + 0x3a, 0x4d, 0xd8, 0x84, 0xce, 0xde, 0xde, 0x31, 0x21, 0xe2, 0x88, 0xe1, 0xb8, 0xe8, 0x06, 0x65, + 0x80, 0x9c, 0x42, 0xcb, 0xbc, 0x74, 0x7d, 0x3c, 0xef, 0xa1, 0x7b, 0xbd, 0xd2, 0x57, 0x4d, 0x60, + 0x7c, 0xc9, 0x19, 0xf4, 0x2f, 0xf4, 0xd8, 0x5f, 0x88, 0x58, 0x32, 0x4c, 0xfd, 0x0e, 0xb2, 0xf7, + 0x07, 0xf6, 0xeb, 0x69, 0x1d, 0x0d, 0x0a, 0xde, 0xe4, 0x14, 0x3a, 0x36, 0xb8, 0xb7, 0x85, 0xc4, + 0x4f, 0xcb, 0x35, 0xb3, 0x0e, 0x41, 0xe6, 0x79, 0xf0, 0x1b, 0xec, 0x55, 0xc9, 0x81, 0x6c, 0x43, + 0xe3, 0x86, 0xad, 0xac, 0x7e, 0xf5, 0x92, 0x0c, 0xa1, 0x75, 0x47, 0x93, 0xa5, 0x79, 0xfb, 0x2a, + 0x83, 0xdb, 0x10, 0x81, 0xf1, 0x7b, 0x51, 0xff, 0xb6, 0x76, 0xf0, 0x0d, 0xf4, 0x72, 0x6d, 0x54, + 0xc4, 0xdc, 0x73, 0x63, 0xf6, 0x1c, 0xa2, 0x7f, 0x96, 0x4f, 0xdb, 0x4c, 0xec, 0x4e, 0x1b, 0xd4, + 0xd6, 0xdb, 0x20, 0x53, 0x5b, 0xfd, 0x5e, 0x6d, 0xfe, 0x77, 0x79, 0xcd, 0x35, 0x71, 0x4c, 0xa5, + 0x8c, 0xf9, 0xcc, 0x36, 0x7c, 0xb6, 0xd5, 0xc8, 0x3b, 0x2a, 0xb8, 0x46, 0x0c, 0x37, 0xdb, 0xbe, + 0xfc, 0xea, 0x97, 0xa7, 0xb3, 0x58, 0xcd, 0x97, 0x53, 0x9d, 0xfb, 0xe1, 0x9c, 0xca, 0x79, 0x1c, + 0xa6, 0x62, 0x31, 0x0c, 0x53, 0x2e, 0x97, 0xc9, 0xb0, 0xf0, 0x01, 0x3b, 0x6d, 0xa3, 0xe1, 0xf4, + 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0x23, 0xd7, 0x20, 0x3f, 0x0b, 0x00, 0x00, } - -func (m *ConnectProxyConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConnectProxyConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConnectProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LocalServiceSocketPath) > 0 { - i -= len(m.LocalServiceSocketPath) - copy(dAtA[i:], m.LocalServiceSocketPath) - i = encodeVarintService(dAtA, i, uint64(len(m.LocalServiceSocketPath))) - i-- - dAtA[i] = 0x5a - } - { - size, err := m.TransparentProxy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - if len(m.Mode) > 0 { - i -= len(m.Mode) - copy(dAtA[i:], m.Mode) - i = encodeVarintService(dAtA, i, uint64(len(m.Mode))) - i-- - dAtA[i] = 0x4a - } - { - size, err := m.Expose.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - { - size, err := m.MeshGateway.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Upstreams) > 0 { - for iNdEx := len(m.Upstreams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Upstreams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.LocalServicePort != 0 { - i = encodeVarintService(dAtA, i, uint64(m.LocalServicePort)) - i-- - dAtA[i] = 0x20 - } - if len(m.LocalServiceAddress) > 0 { - i -= len(m.LocalServiceAddress) - copy(dAtA[i:], m.LocalServiceAddress) - i = encodeVarintService(dAtA, i, uint64(len(m.LocalServiceAddress))) - i-- - dAtA[i] = 0x1a - } - if len(m.DestinationServiceID) > 0 { - i -= len(m.DestinationServiceID) - copy(dAtA[i:], m.DestinationServiceID) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationServiceID))) - i-- - dAtA[i] = 0x12 - } - if len(m.DestinationServiceName) > 0 { - i -= len(m.DestinationServiceName) - copy(dAtA[i:], m.DestinationServiceName) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationServiceName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Upstream) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Upstream) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Upstream) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DestinationPartition) > 0 { - i -= len(m.DestinationPartition) - copy(dAtA[i:], m.DestinationPartition) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationPartition))) - i-- - dAtA[i] = 0x62 - } - if len(m.LocalBindSocketMode) > 0 { - i -= len(m.LocalBindSocketMode) - copy(dAtA[i:], m.LocalBindSocketMode) - i = encodeVarintService(dAtA, i, uint64(len(m.LocalBindSocketMode))) - i-- - dAtA[i] = 0x5a - } - if len(m.LocalBindSocketPath) > 0 { - i -= len(m.LocalBindSocketPath) - copy(dAtA[i:], m.LocalBindSocketPath) - i = encodeVarintService(dAtA, i, uint64(len(m.LocalBindSocketPath))) - i-- - dAtA[i] = 0x52 - } - if m.CentrallyConfigured { - i-- - if m.CentrallyConfigured { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - { - size, err := m.MeshGateway.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.LocalBindPort != 0 { - i = encodeVarintService(dAtA, i, uint64(m.LocalBindPort)) - i-- - dAtA[i] = 0x30 - } - if len(m.LocalBindAddress) > 0 { - i -= len(m.LocalBindAddress) - copy(dAtA[i:], m.LocalBindAddress) - i = encodeVarintService(dAtA, i, uint64(len(m.LocalBindAddress))) - i-- - dAtA[i] = 0x2a - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintService(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0x22 - } - if len(m.DestinationName) > 0 { - i -= len(m.DestinationName) - copy(dAtA[i:], m.DestinationName) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationName))) - i-- - dAtA[i] = 0x1a - } - if len(m.DestinationNamespace) > 0 { - i -= len(m.DestinationNamespace) - copy(dAtA[i:], m.DestinationNamespace) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationNamespace))) - i-- - dAtA[i] = 0x12 - } - if len(m.DestinationType) > 0 { - i -= len(m.DestinationType) - copy(dAtA[i:], m.DestinationType) - i = encodeVarintService(dAtA, i, uint64(len(m.DestinationType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceConnect) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceConnect) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceConnect) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SidecarService != nil { - { - size, err := m.SidecarService.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Native { - i-- - if m.Native { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExposeConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExposeConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExposeConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Checks { - i-- - if m.Checks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExposePath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExposePath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExposePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ParsedFromCheck { - i-- - if m.ParsedFromCheck { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Protocol) > 0 { - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintService(dAtA, i, uint64(len(m.Protocol))) - i-- - dAtA[i] = 0x22 - } - if m.LocalPathPort != 0 { - i = encodeVarintService(dAtA, i, uint64(m.LocalPathPort)) - i-- - dAtA[i] = 0x18 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintService(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if m.ListenerPort != 0 { - i = encodeVarintService(dAtA, i, uint64(m.ListenerPort)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MeshGatewayConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MeshGatewayConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MeshGatewayConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Mode) > 0 { - i -= len(m.Mode) - copy(dAtA[i:], m.Mode) - i = encodeVarintService(dAtA, i, uint64(len(m.Mode))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TransparentProxyConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransparentProxyConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransparentProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DialedDirectly { - i-- - if m.DialedDirectly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.OutboundListenerPort != 0 { - i = encodeVarintService(dAtA, i, uint64(m.OutboundListenerPort)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ServiceDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SocketPath) > 0 { - i -= len(m.SocketPath) - copy(dAtA[i:], m.SocketPath) - i = encodeVarintService(dAtA, i, uint64(len(m.SocketPath))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - { - size, err := m.EnterpriseMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - if len(m.TaggedAddresses) > 0 { - for k := range m.TaggedAddresses { - v := m.TaggedAddresses[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - } - if m.Connect != nil { - { - size, err := m.Connect.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - if m.Proxy != nil { - { - size, err := m.Proxy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.EnableTagOverride { - i-- - if m.EnableTagOverride { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintService(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0x5a - } - if m.Weights != nil { - { - size, err := m.Weights.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if len(m.Checks) > 0 { - for iNdEx := len(m.Checks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Checks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - { - size, err := m.Check.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if m.Port != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x38 - } - if len(m.Meta) > 0 { - for k := range m.Meta { - v := m.Meta[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintService(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintService(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0x2a - } - if len(m.Tags) > 0 { - for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tags[iNdEx]) - copy(dAtA[i:], m.Tags[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Tags[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintService(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Kind) > 0 { - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintService(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x10 - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintService(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Weights) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Weights) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Weights) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Warning != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Warning)) - i-- - dAtA[i] = 0x10 - } - if m.Passing != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Passing)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintService(dAtA []byte, offset int, v uint64) int { - offset -= sovService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ConnectProxyConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DestinationServiceName) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.DestinationServiceID) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.LocalServiceAddress) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.LocalServicePort != 0 { - n += 1 + sovService(uint64(m.LocalServicePort)) - } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovService(uint64(l)) - } - if len(m.Upstreams) > 0 { - for _, e := range m.Upstreams { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - l = m.MeshGateway.Size() - n += 1 + l + sovService(uint64(l)) - l = m.Expose.Size() - n += 1 + l + sovService(uint64(l)) - l = len(m.Mode) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = m.TransparentProxy.Size() - n += 1 + l + sovService(uint64(l)) - l = len(m.LocalServiceSocketPath) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *Upstream) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DestinationType) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.DestinationNamespace) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.DestinationName) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.LocalBindAddress) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.LocalBindPort != 0 { - n += 1 + sovService(uint64(m.LocalBindPort)) - } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovService(uint64(l)) - } - l = m.MeshGateway.Size() - n += 1 + l + sovService(uint64(l)) - if m.CentrallyConfigured { - n += 2 - } - l = len(m.LocalBindSocketPath) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.LocalBindSocketMode) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.DestinationPartition) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *ServiceConnect) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Native { - n += 2 - } - if m.SidecarService != nil { - l = m.SidecarService.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *ExposeConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Checks { - n += 2 - } - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *ExposePath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ListenerPort != 0 { - n += 1 + sovService(uint64(m.ListenerPort)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.LocalPathPort != 0 { - n += 1 + sovService(uint64(m.LocalPathPort)) - } - l = len(m.Protocol) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.ParsedFromCheck { - n += 2 - } - return n -} - -func (m *MeshGatewayConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Mode) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *TransparentProxyConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OutboundListenerPort != 0 { - n += 1 + sovService(uint64(m.OutboundListenerPort)) - } - if m.DialedDirectly { - n += 2 - } - return n -} - -func (m *ServiceDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Kind) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - l = len(m.Address) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + 1 + len(v) + sovService(uint64(len(v))) - n += mapEntrySize + 1 + sovService(uint64(mapEntrySize)) - } - } - if m.Port != 0 { - n += 1 + sovService(uint64(m.Port)) - } - l = m.Check.Size() - n += 1 + l + sovService(uint64(l)) - if len(m.Checks) > 0 { - for _, e := range m.Checks { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - if m.Weights != nil { - l = m.Weights.Size() - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.EnableTagOverride { - n += 2 - } - if m.Proxy != nil { - l = m.Proxy.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.Connect != nil { - l = m.Connect.Size() - n += 1 + l + sovService(uint64(l)) - } - if len(m.TaggedAddresses) > 0 { - for k, v := range m.TaggedAddresses { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + 1 + l + sovService(uint64(l)) - n += mapEntrySize + 2 + sovService(uint64(mapEntrySize)) - } - } - l = m.EnterpriseMeta.Size() - n += 2 + l + sovService(uint64(l)) - l = len(m.SocketPath) - if l > 0 { - n += 2 + l + sovService(uint64(l)) - } - return n -} - -func (m *ServiceAddress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Address) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovService(uint64(m.Port)) - } - return n -} - -func (m *Weights) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Passing != 0 { - n += 1 + sovService(uint64(m.Passing)) - } - if m.Warning != 0 { - n += 1 + sovService(uint64(m.Warning)) - } - return n -} - -func sovService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozService(x uint64) (n int) { - return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ConnectProxyConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectProxyConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationServiceID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationServiceID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalServiceAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalServiceAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalServicePort", wireType) - } - m.LocalServicePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocalServicePort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Config == nil { - m.Config = &types.Struct{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Upstreams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Upstreams = append(m.Upstreams, Upstream{}) - if err := m.Upstreams[len(m.Upstreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MeshGateway", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MeshGateway.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expose", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Expose.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mode = github_com_hashicorp_consul_agent_structs.ProxyMode(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TransparentProxy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TransparentProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalServiceSocketPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalServiceSocketPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Upstream) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Upstream: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Upstream: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationNamespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalBindAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalBindAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalBindPort", wireType) - } - m.LocalBindPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocalBindPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Config == nil { - m.Config = &types.Struct{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MeshGateway", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MeshGateway.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CentrallyConfigured", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CentrallyConfigured = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalBindSocketPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalBindSocketPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalBindSocketMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalBindSocketMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationPartition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DestinationPartition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceConnect) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceConnect: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceConnect: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Native", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Native = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SidecarService", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SidecarService == nil { - m.SidecarService = &ServiceDefinition{} - } - if err := m.SidecarService.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExposeConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExposeConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExposeConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Checks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Checks = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, ExposePath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExposePath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExposePath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExposePath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ListenerPort", wireType) - } - m.ListenerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ListenerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalPathPort", wireType) - } - m.LocalPathPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocalPathPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParsedFromCheck", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ParsedFromCheck = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MeshGatewayConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MeshGatewayConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MeshGatewayConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mode = github_com_hashicorp_consul_agent_structs.MeshGatewayMode(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransparentProxyConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransparentProxyConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransparentProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OutboundListenerPort", wireType) - } - m.OutboundListenerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OutboundListenerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DialedDirectly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DialedDirectly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = github_com_hashicorp_consul_agent_structs.ServiceKind(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthService - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthService - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthService - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthService - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Meta[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Check", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Check.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checks = append(m.Checks, &CheckType{}) - if err := m.Checks[len(m.Checks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Weights", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Weights == nil { - m.Weights = &Weights{} - } - if err := m.Weights.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableTagOverride", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableTagOverride = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proxy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proxy == nil { - m.Proxy = &ConnectProxyConfig{} - } - if err := m.Proxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Connect", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Connect == nil { - m.Connect = &ServiceConnect{} - } - if err := m.Connect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TaggedAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TaggedAddresses == nil { - m.TaggedAddresses = make(map[string]ServiceAddress) - } - var mapkey string - mapvalue := &ServiceAddress{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthService - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthService - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthService - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthService - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ServiceAddress{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TaggedAddresses[mapkey] = *mapvalue - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnterpriseMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EnterpriseMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SocketPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SocketPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Weights) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Weights: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Weights: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Passing", wireType) - } - m.Passing = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Passing |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) - } - m.Warning = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Warning |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbservice/service.proto b/proto/pbservice/service.proto index 360e3e3ff..64860625b 100644 --- a/proto/pbservice/service.proto +++ b/proto/pbservice/service.proto @@ -5,17 +5,9 @@ package pbservice; option go_package = "github.com/hashicorp/consul/proto/pbservice"; import "google/protobuf/struct.proto"; -import "proto/pbcommongogo/common.proto"; +import "proto/pbcommon/common.proto"; import "proto/pbservice/healthcheck.proto"; -// This fake import path is replaced by the build script with a versioned path -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; - // ConnectProxyConfig describes the configuration needed for any proxy managed // or unmanaged. It describes a single logical service's listener and optionally @@ -57,25 +49,26 @@ message ConnectProxyConfig { // Config is the arbitrary configuration data provided with the proxy // registration. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - google.protobuf.Struct Config = 5 [(gogoproto.nullable) = true]; + google.protobuf.Struct Config = 5; // Upstreams describes any upstream dependencies the proxy instance should // setup. // mog: func-to=UpstreamsToStructs func-from=NewUpstreamsFromStructs - repeated Upstream Upstreams = 6 [(gogoproto.nullable) = false]; + repeated Upstream Upstreams = 6; // MeshGateway defines the mesh gateway configuration for upstreams - MeshGatewayConfig MeshGateway = 7 [(gogoproto.nullable) = false]; + MeshGatewayConfig MeshGateway = 7; // Expose defines whether checks or paths are exposed through the proxy - ExposeConfig Expose = 8 [(gogoproto.nullable) = false]; + ExposeConfig Expose = 8; // Mode represents how the proxy's inbound and upstream listeners are dialed. - string Mode = 9 [(gogoproto.casttype) = "github.com/hashicorp/consul/agent/structs.ProxyMode"]; + // mog: func-to=structs.ProxyMode func-from=string + string Mode = 9; // TransparentProxy defines configuration for when the proxy is in // transparent mode. - TransparentProxyConfig TransparentProxy = 10 [(gogoproto.nullable) = false]; + TransparentProxyConfig TransparentProxy = 10; // LocalServiceSocketPath is the path to the unix domain socket for the local service instance string LocalServiceSocketPath = 11; @@ -122,10 +115,10 @@ message Upstream { // It can be used to pass arbitrary configuration for this specific upstream // to the proxy. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - google.protobuf.Struct Config = 7 [(gogoproto.nullable) = true]; + google.protobuf.Struct Config = 7; // MeshGateway is the configuration for mesh gateway usage of this upstream - MeshGatewayConfig MeshGateway = 8 [(gogoproto.nullable) = false]; + MeshGatewayConfig MeshGateway = 8; // CentrallyConfigured indicates whether the upstream was defined in a proxy // instance registration or whether it was generated from a config entry. @@ -174,7 +167,7 @@ message ExposeConfig { // Paths is the list of paths exposed through the proxy. // mog: func-to=ExposePathSliceToStructs func-from=NewExposePathSliceFromStructs - repeated ExposePath Paths = 2 [(gogoproto.nullable) = false]; + repeated ExposePath Paths = 2; } // mog annotation: @@ -208,7 +201,8 @@ message ExposePath { // output=service.gen.go // name=Structs message MeshGatewayConfig { - string Mode = 1 [(gogoproto.casttype) = "github.com/hashicorp/consul/agent/structs.MeshGatewayMode"]; + // mog: func-to=structs.MeshGatewayMode func-from=string + string Mode = 1; } // mog annotation: @@ -235,19 +229,20 @@ message TransparentProxyConfig { // output=service.gen.go // name=Structs message ServiceDefinition { - string Kind = 1 [(gogoproto.casttype) = "github.com/hashicorp/consul/agent/structs.ServiceKind"]; + // mog: func-to=structs.ServiceKind func-from=string + string Kind = 1; string ID = 2; string Name = 3; repeated string Tags = 4; string Address = 5; // mog: func-to=MapStringServiceAddressToStructs func-from=NewMapStringServiceAddressFromStructs - map TaggedAddresses = 16 [(gogoproto.nullable) = false]; + map TaggedAddresses = 16; map Meta = 6; // mog: func-to=int func-from=int32 int32 Port = 7; // Path for socket string SocketPath = 18; - CheckType Check = 8 [(gogoproto.nullable) = false]; + CheckType Check = 8; // mog: func-to=CheckTypesToStructs func-from=NewCheckTypesFromStructs repeated CheckType Checks = 9; // mog: func-to=WeightsPtrToStructs func-from=NewWeightsPtrFromStructs @@ -269,7 +264,7 @@ message ServiceDefinition { ConnectProxyConfig Proxy = 14; // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs - commongogo.EnterpriseMeta EnterpriseMeta = 17 [(gogoproto.nullable) = false]; + common.EnterpriseMeta EnterpriseMeta = 17; // mog: func-to=ServiceConnectPtrToStructs func-from=NewServiceConnectPtrFromStructs ServiceConnect Connect = 15; diff --git a/proto/pbsubscribe/subscribe.pb.go b/proto/pbsubscribe/subscribe.pb.go index 98ad79087..9df5bba25 100644 --- a/proto/pbsubscribe/subscribe.pb.go +++ b/proto/pbsubscribe/subscribe.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbsubscribe/subscribe.proto package pbsubscribe @@ -11,9 +11,7 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - io "io" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -129,26 +127,18 @@ func (*SubscribeRequest) ProtoMessage() {} func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ab3eb8c810e315fb, []int{0} } + func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_SubscribeRequest.Unmarshal(m, b) } func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) } func (m *SubscribeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SubscribeRequest.Merge(m, src) } func (m *SubscribeRequest) XXX_Size() int { - return m.Size() + return xxx_messageInfo_SubscribeRequest.Size(m) } func (m *SubscribeRequest) XXX_DiscardUnknown() { xxx_messageInfo_SubscribeRequest.DiscardUnknown(m) @@ -234,26 +224,18 @@ func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor_ab3eb8c810e315fb, []int{1} } + func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Event.Unmarshal(m, b) } func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Event.Marshal(b, m, deterministic) } func (m *Event) XXX_Merge(src proto.Message) { xxx_messageInfo_Event.Merge(m, src) } func (m *Event) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Event.Size(m) } func (m *Event) XXX_DiscardUnknown() { xxx_messageInfo_Event.DiscardUnknown(m) @@ -261,29 +243,40 @@ func (m *Event) XXX_DiscardUnknown() { var xxx_messageInfo_Event proto.InternalMessageInfo +func (m *Event) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + type isEvent_Payload interface { isEvent_Payload() - MarshalTo([]byte) (int, error) - Size() int } type Event_EndOfSnapshot struct { - EndOfSnapshot bool `protobuf:"varint,2,opt,name=EndOfSnapshot,proto3,oneof" json:"EndOfSnapshot,omitempty"` -} -type Event_NewSnapshotToFollow struct { - NewSnapshotToFollow bool `protobuf:"varint,3,opt,name=NewSnapshotToFollow,proto3,oneof" json:"NewSnapshotToFollow,omitempty"` -} -type Event_EventBatch struct { - EventBatch *EventBatch `protobuf:"bytes,4,opt,name=EventBatch,proto3,oneof" json:"EventBatch,omitempty"` -} -type Event_ServiceHealth struct { - ServiceHealth *ServiceHealthUpdate `protobuf:"bytes,10,opt,name=ServiceHealth,proto3,oneof" json:"ServiceHealth,omitempty"` + EndOfSnapshot bool `protobuf:"varint,2,opt,name=EndOfSnapshot,proto3,oneof"` } -func (*Event_EndOfSnapshot) isEvent_Payload() {} +type Event_NewSnapshotToFollow struct { + NewSnapshotToFollow bool `protobuf:"varint,3,opt,name=NewSnapshotToFollow,proto3,oneof"` +} + +type Event_EventBatch struct { + EventBatch *EventBatch `protobuf:"bytes,4,opt,name=EventBatch,proto3,oneof"` +} + +type Event_ServiceHealth struct { + ServiceHealth *ServiceHealthUpdate `protobuf:"bytes,10,opt,name=ServiceHealth,proto3,oneof"` +} + +func (*Event_EndOfSnapshot) isEvent_Payload() {} + func (*Event_NewSnapshotToFollow) isEvent_Payload() {} -func (*Event_EventBatch) isEvent_Payload() {} -func (*Event_ServiceHealth) isEvent_Payload() {} + +func (*Event_EventBatch) isEvent_Payload() {} + +func (*Event_ServiceHealth) isEvent_Payload() {} func (m *Event) GetPayload() isEvent_Payload { if m != nil { @@ -292,13 +285,6 @@ func (m *Event) GetPayload() isEvent_Payload { return nil } -func (m *Event) GetIndex() uint64 { - if m != nil { - return m.Index - } - return 0 -} - func (m *Event) GetEndOfSnapshot() bool { if x, ok := m.GetPayload().(*Event_EndOfSnapshot); ok { return x.EndOfSnapshot @@ -350,26 +336,18 @@ func (*EventBatch) ProtoMessage() {} func (*EventBatch) Descriptor() ([]byte, []int) { return fileDescriptor_ab3eb8c810e315fb, []int{2} } + func (m *EventBatch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_EventBatch.Unmarshal(m, b) } func (m *EventBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EventBatch.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_EventBatch.Marshal(b, m, deterministic) } func (m *EventBatch) XXX_Merge(src proto.Message) { xxx_messageInfo_EventBatch.Merge(m, src) } func (m *EventBatch) XXX_Size() int { - return m.Size() + return xxx_messageInfo_EventBatch.Size(m) } func (m *EventBatch) XXX_DiscardUnknown() { xxx_messageInfo_EventBatch.DiscardUnknown(m) @@ -398,26 +376,18 @@ func (*ServiceHealthUpdate) ProtoMessage() {} func (*ServiceHealthUpdate) Descriptor() ([]byte, []int) { return fileDescriptor_ab3eb8c810e315fb, []int{3} } + func (m *ServiceHealthUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ServiceHealthUpdate.Unmarshal(m, b) } func (m *ServiceHealthUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceHealthUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ServiceHealthUpdate.Marshal(b, m, deterministic) } func (m *ServiceHealthUpdate) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceHealthUpdate.Merge(m, src) } func (m *ServiceHealthUpdate) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ServiceHealthUpdate.Size(m) } func (m *ServiceHealthUpdate) XXX_DiscardUnknown() { xxx_messageInfo_ServiceHealthUpdate.DiscardUnknown(m) @@ -448,54 +418,54 @@ func init() { proto.RegisterType((*ServiceHealthUpdate)(nil), "subscribe.ServiceHealthUpdate") } -func init() { proto.RegisterFile("proto/pbsubscribe/subscribe.proto", fileDescriptor_ab3eb8c810e315fb) } +func init() { + proto.RegisterFile("proto/pbsubscribe/subscribe.proto", fileDescriptor_ab3eb8c810e315fb) +} var fileDescriptor_ab3eb8c810e315fb = []byte{ - // 550 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcb, 0x6e, 0xd3, 0x4c, - 0x14, 0xf6, 0xa4, 0x57, 0x9f, 0xfc, 0xad, 0xfc, 0x4f, 0x83, 0xb0, 0x52, 0x64, 0x85, 0x08, 0x55, - 0xa1, 0x12, 0x31, 0x0a, 0x12, 0xec, 0x40, 0x24, 0x6d, 0x09, 0x42, 0x4a, 0x2a, 0xa7, 0x5d, 0xc0, - 0x6e, 0x62, 0x1f, 0x62, 0x2b, 0xee, 0x8c, 0xb1, 0x27, 0x0d, 0xdd, 0xc3, 0x3b, 0xf0, 0x48, 0x2c, - 0x59, 0xf0, 0x00, 0x28, 0xbc, 0x08, 0xf2, 0xc4, 0x71, 0x9c, 0xa4, 0xbb, 0x39, 0xdf, 0x65, 0xce, - 0xcc, 0xb9, 0xc0, 0xe3, 0x28, 0x16, 0x52, 0xd8, 0xd1, 0x30, 0x99, 0x0c, 0x13, 0x37, 0x0e, 0x86, - 0x68, 0xe7, 0xa7, 0xa6, 0xe2, 0xa8, 0x9e, 0x03, 0xd5, 0x6a, 0xae, 0xc6, 0xf8, 0x36, 0x70, 0xd1, - 0xe6, 0xc2, 0xcb, 0x64, 0xf5, 0xdf, 0x04, 0x8c, 0xc1, 0x42, 0xe9, 0xe0, 0x97, 0x09, 0x26, 0x92, - 0x9e, 0xc0, 0xce, 0x95, 0x88, 0x02, 0xd7, 0x24, 0x35, 0xd2, 0x38, 0x6c, 0x19, 0xcd, 0xe5, 0xe5, - 0x0a, 0x77, 0xe6, 0x34, 0x35, 0x60, 0xeb, 0x03, 0xde, 0x99, 0xa5, 0x1a, 0x69, 0xe8, 0x4e, 0x7a, - 0xa4, 0x95, 0xd4, 0x39, 0x46, 0x6e, 0x6e, 0x29, 0x6c, 0x1e, 0xa4, 0xe8, 0x7b, 0xee, 0xe1, 0x57, - 0x73, 0xbb, 0x46, 0x1a, 0xdb, 0xce, 0x3c, 0xa0, 0x16, 0xc0, 0x19, 0x93, 0xcc, 0x45, 0x2e, 0x31, - 0x36, 0x77, 0x94, 0xa1, 0x80, 0xd0, 0x47, 0xa0, 0xf7, 0xd8, 0x0d, 0x26, 0x11, 0x73, 0xd1, 0xdc, - 0x55, 0xf4, 0x12, 0x48, 0xd9, 0x4b, 0x16, 0xcb, 0x40, 0x06, 0x82, 0x9b, 0x7b, 0x73, 0x36, 0x07, - 0xea, 0xdf, 0x4b, 0xb0, 0x73, 0x7e, 0x8b, 0x5c, 0x2e, 0x73, 0x93, 0x62, 0xee, 0x13, 0x38, 0x38, - 0xe7, 0x5e, 0xff, 0xf3, 0x80, 0xb3, 0x28, 0xf1, 0x85, 0x54, 0x7f, 0xd8, 0xef, 0x6a, 0xce, 0x2a, - 0x4c, 0x5b, 0x70, 0xd4, 0xc3, 0xe9, 0x22, 0xbc, 0x12, 0x17, 0x22, 0x0c, 0xc5, 0x54, 0xfd, 0x2e, - 0x55, 0xdf, 0x47, 0xd2, 0x57, 0x00, 0x2a, 0x75, 0x9b, 0x49, 0xd7, 0x57, 0x5f, 0x2e, 0xb7, 0x1e, - 0x14, 0x4a, 0xb8, 0x24, 0xbb, 0x9a, 0x53, 0x90, 0xd2, 0x0b, 0x38, 0x18, 0xcc, 0x3b, 0xd4, 0x45, - 0x16, 0x4a, 0xdf, 0x04, 0xe5, 0xb5, 0x0a, 0xde, 0x15, 0xfe, 0x3a, 0xf2, 0x98, 0xc4, 0xf4, 0xd1, - 0x2b, 0x70, 0x5b, 0x87, 0xbd, 0x4b, 0x76, 0x17, 0x0a, 0xe6, 0xd5, 0x5f, 0x16, 0xdf, 0x42, 0x1b, - 0xb0, 0xab, 0xa2, 0xc4, 0x24, 0xb5, 0xad, 0x46, 0x79, 0xa5, 0xb1, 0x8a, 0x70, 0x32, 0xbe, 0xfe, - 0x8d, 0xc0, 0xd1, 0x3d, 0xb9, 0xe8, 0x13, 0x28, 0xf5, 0xa3, 0x6c, 0x2c, 0x2a, 0x05, 0x77, 0x87, - 0x49, 0x16, 0x8a, 0x51, 0x3f, 0x72, 0x4a, 0xfd, 0x88, 0xbe, 0x03, 0xa3, 0xe3, 0xa3, 0x3b, 0xce, - 0x6e, 0xe8, 0x09, 0x0f, 0x55, 0x81, 0xcb, 0xad, 0xe3, 0x66, 0x3e, 0x85, 0xcd, 0x75, 0x89, 0xb3, - 0x61, 0x3a, 0x7d, 0x9b, 0x0d, 0x22, 0x2d, 0xc3, 0xde, 0x35, 0x1f, 0x73, 0x31, 0xe5, 0x86, 0x46, - 0xff, 0x5f, 0xab, 0x93, 0x41, 0xa8, 0x09, 0x95, 0x15, 0xa8, 0x23, 0x38, 0x47, 0x57, 0x1a, 0xa5, - 0xd3, 0xa7, 0xa0, 0xe7, 0x8f, 0xa3, 0xff, 0xc1, 0xbe, 0x83, 0xa3, 0x20, 0x91, 0x18, 0x1b, 0x1a, - 0x3d, 0x04, 0x38, 0xc3, 0x78, 0x11, 0x93, 0xd6, 0x47, 0x78, 0x38, 0x90, 0x4c, 0x62, 0xc7, 0x67, - 0x7c, 0x84, 0xd9, 0x56, 0x44, 0xe9, 0x3c, 0xd1, 0xd7, 0xa0, 0xe7, 0x5b, 0x42, 0x8f, 0x8b, 0x0d, - 0x59, 0xdb, 0x9d, 0xea, 0x46, 0x4d, 0xeb, 0xda, 0x73, 0xd2, 0x7e, 0xf3, 0x73, 0x66, 0x91, 0x5f, - 0x33, 0x8b, 0xfc, 0x99, 0x59, 0xe4, 0xc7, 0x5f, 0x4b, 0xfb, 0xf4, 0x6c, 0x14, 0x48, 0x7f, 0x32, - 0x6c, 0xba, 0xe2, 0xc6, 0xf6, 0x59, 0xe2, 0x07, 0xae, 0x88, 0x23, 0xdb, 0x15, 0x3c, 0x99, 0x84, - 0xf6, 0xc6, 0x7a, 0x0f, 0x77, 0x15, 0xf4, 0xe2, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xb6, - 0x48, 0xa0, 0xfa, 0x03, 0x00, 0x00, + // 527 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6f, 0xda, 0x3c, + 0x14, 0xc6, 0xb4, 0x85, 0xe6, 0xf0, 0xb6, 0xca, 0xeb, 0x32, 0x2d, 0xa2, 0x53, 0xc5, 0xd0, 0x54, + 0xb1, 0x4a, 0x23, 0x13, 0x93, 0xb6, 0xbb, 0x49, 0x83, 0xb6, 0x63, 0x9a, 0x04, 0x55, 0x68, 0x2f, + 0xb6, 0x3b, 0xe3, 0x9c, 0x91, 0x88, 0xd4, 0xf6, 0x12, 0x53, 0xd6, 0xfb, 0xed, 0x1f, 0xee, 0x07, + 0x4d, 0x31, 0x21, 0x04, 0xe8, 0x9d, 0xcf, 0xf3, 0xe1, 0x63, 0x9f, 0x0f, 0x78, 0xa9, 0x62, 0xa9, + 0xa5, 0xab, 0x26, 0xc9, 0x7c, 0x92, 0xf0, 0x38, 0x9c, 0xa0, 0x9b, 0x9f, 0x3a, 0x86, 0xa3, 0x56, + 0x0e, 0x34, 0x1a, 0xb9, 0x1a, 0xe3, 0x87, 0x90, 0xa3, 0x2b, 0xa4, 0x9f, 0xc9, 0x5a, 0x7f, 0x09, + 0xd8, 0xe3, 0x95, 0xd2, 0xc3, 0x9f, 0x73, 0x4c, 0x34, 0x3d, 0x87, 0x83, 0x5b, 0xa9, 0x42, 0xee, + 0x90, 0x26, 0x69, 0x1f, 0x77, 0xed, 0xce, 0xfa, 0x72, 0x83, 0x7b, 0x4b, 0x9a, 0xda, 0xb0, 0xf7, + 0x15, 0x1f, 0x9d, 0x72, 0x93, 0xb4, 0x2d, 0x2f, 0x3d, 0xd2, 0x7a, 0xea, 0x9c, 0xa1, 0x70, 0xf6, + 0x0c, 0xb6, 0x0c, 0x52, 0xf4, 0x8b, 0xf0, 0xf1, 0x97, 0xb3, 0xdf, 0x24, 0xed, 0x7d, 0x6f, 0x19, + 0xd0, 0x33, 0x80, 0x4b, 0xa6, 0x19, 0x47, 0xa1, 0x31, 0x76, 0x0e, 0x8c, 0xa1, 0x80, 0xd0, 0x17, + 0x60, 0x0d, 0xd9, 0x3d, 0x26, 0x8a, 0x71, 0x74, 0x2a, 0x86, 0x5e, 0x03, 0x29, 0x7b, 0xc3, 0x62, + 0x1d, 0xea, 0x50, 0x0a, 0xa7, 0xba, 0x64, 0x73, 0xa0, 0xf5, 0xa7, 0x0c, 0x07, 0x57, 0x0f, 0x28, + 0xf4, 0x3a, 0x37, 0x29, 0xe6, 0x3e, 0x87, 0xa3, 0x2b, 0xe1, 0x8f, 0x7e, 0x8c, 0x05, 0x53, 0x49, + 0x20, 0xb5, 0xf9, 0xc3, 0xe1, 0xa0, 0xe4, 0x6d, 0xc2, 0xb4, 0x0b, 0x27, 0x43, 0x5c, 0xac, 0xc2, + 0x5b, 0x79, 0x2d, 0xa3, 0x48, 0x2e, 0xcc, 0xef, 0x52, 0xf5, 0x53, 0x24, 0xfd, 0x00, 0x60, 0x52, + 0xf7, 0x98, 0xe6, 0x81, 0xf9, 0x72, 0xad, 0xfb, 0xac, 0x50, 0xc2, 0x35, 0x39, 0x28, 0x79, 0x05, + 0x29, 0xbd, 0x86, 0xa3, 0xf1, 0xb2, 0x43, 0x03, 0x64, 0x91, 0x0e, 0x1c, 0x30, 0xde, 0xb3, 0x82, + 0x77, 0x83, 0xbf, 0x53, 0x3e, 0xd3, 0x98, 0x3e, 0x7a, 0x03, 0xee, 0x59, 0x50, 0xbd, 0x61, 0x8f, + 0x91, 0x64, 0x7e, 0xeb, 0x7d, 0xf1, 0x2d, 0xb4, 0x0d, 0x15, 0x13, 0x25, 0x0e, 0x69, 0xee, 0xb5, + 0x6b, 0x1b, 0x8d, 0x35, 0x84, 0x97, 0xf1, 0xad, 0xdf, 0x04, 0x4e, 0x9e, 0xc8, 0x45, 0x5f, 0x41, + 0x79, 0xa4, 0xb2, 0xb1, 0xa8, 0x17, 0xdc, 0x7d, 0xa6, 0x59, 0x24, 0xa7, 0x23, 0xe5, 0x95, 0x47, + 0x8a, 0x7e, 0x06, 0xbb, 0x1f, 0x20, 0x9f, 0x65, 0x37, 0x0c, 0xa5, 0x8f, 0xa6, 0xc0, 0xb5, 0xee, + 0x69, 0x27, 0x9f, 0xc2, 0xce, 0xb6, 0xc4, 0xdb, 0x31, 0x5d, 0x7c, 0xca, 0x06, 0x91, 0xd6, 0xa0, + 0x7a, 0x27, 0x66, 0x42, 0x2e, 0x84, 0x5d, 0xa2, 0xff, 0x6f, 0xd5, 0xc9, 0x26, 0xd4, 0x81, 0xfa, + 0x06, 0xd4, 0x97, 0x42, 0x20, 0xd7, 0x76, 0xf9, 0xe2, 0x35, 0x58, 0xf9, 0xe3, 0xe8, 0x7f, 0x70, + 0xe8, 0xe1, 0x34, 0x4c, 0x34, 0xc6, 0x76, 0x89, 0x1e, 0x03, 0x5c, 0x62, 0xbc, 0x8a, 0x49, 0xf7, + 0x1b, 0x3c, 0x1f, 0x6b, 0xa6, 0xb1, 0x1f, 0x30, 0x31, 0xc5, 0x6c, 0x2b, 0x54, 0x3a, 0x4f, 0xf4, + 0x23, 0x58, 0xf9, 0x96, 0xd0, 0xd3, 0x62, 0x43, 0xb6, 0x76, 0xa7, 0xb1, 0x53, 0xd3, 0x56, 0xe9, + 0x2d, 0xe9, 0xb9, 0xdf, 0xdf, 0x4c, 0x43, 0x1d, 0xcc, 0x27, 0x1d, 0x2e, 0xef, 0xdd, 0x80, 0x25, + 0x41, 0xc8, 0x65, 0xac, 0x5c, 0x2e, 0x45, 0x32, 0x8f, 0xdc, 0x9d, 0x75, 0x9e, 0x54, 0x0c, 0xf4, + 0xee, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xb3, 0x69, 0x51, 0xea, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // StateChangeSubscriptionClient is the client API for StateChangeSubscription service. // @@ -524,10 +494,10 @@ type StateChangeSubscriptionClient interface { } type stateChangeSubscriptionClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewStateChangeSubscriptionClient(cc *grpc.ClientConn) StateChangeSubscriptionClient { +func NewStateChangeSubscriptionClient(cc grpc.ClientConnInterface) StateChangeSubscriptionClient { return &stateChangeSubscriptionClient{cc} } @@ -633,1136 +603,3 @@ var _StateChangeSubscription_serviceDesc = grpc.ServiceDesc{ }, Metadata: "proto/pbsubscribe/subscribe.proto", } - -func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubscribeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintSubscribe(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x3a - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintSubscribe(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x32 - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintSubscribe(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0x2a - } - if m.Index != 0 { - i = encodeVarintSubscribe(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x20 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintSubscribe(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSubscribe(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if m.Topic != 0 { - i = encodeVarintSubscribe(dAtA, i, uint64(m.Topic)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Payload != nil { - { - size := m.Payload.Size() - i -= size - if _, err := m.Payload.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Index != 0 { - i = encodeVarintSubscribe(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Event_EndOfSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event_EndOfSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i-- - if m.EndOfSnapshot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *Event_NewSnapshotToFollow) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event_NewSnapshotToFollow) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i-- - if m.NewSnapshotToFollow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - return len(dAtA) - i, nil -} -func (m *Event_EventBatch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event_EventBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EventBatch != nil { - { - size, err := m.EventBatch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscribe(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *Event_ServiceHealth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event_ServiceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ServiceHealth != nil { - { - size, err := m.ServiceHealth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscribe(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - return len(dAtA) - i, nil -} -func (m *EventBatch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventBatch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscribe(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ServiceHealthUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceHealthUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceHealthUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.CheckServiceNode != nil { - { - size, err := m.CheckServiceNode.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSubscribe(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Op != 0 { - i = encodeVarintSubscribe(dAtA, i, uint64(m.Op)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintSubscribe(dAtA []byte, offset int, v uint64) int { - offset -= sovSubscribe(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SubscribeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Topic != 0 { - n += 1 + sovSubscribe(uint64(m.Topic)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSubscribe(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovSubscribe(uint64(l)) - } - if m.Index != 0 { - n += 1 + sovSubscribe(uint64(m.Index)) - } - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovSubscribe(uint64(l)) - } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovSubscribe(uint64(l)) - } - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovSubscribe(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovSubscribe(uint64(m.Index)) - } - if m.Payload != nil { - n += m.Payload.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Event_EndOfSnapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} -func (m *Event_NewSnapshotToFollow) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} -func (m *Event_EventBatch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EventBatch != nil { - l = m.EventBatch.Size() - n += 1 + l + sovSubscribe(uint64(l)) - } - return n -} -func (m *Event_ServiceHealth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ServiceHealth != nil { - l = m.ServiceHealth.Size() - n += 1 + l + sovSubscribe(uint64(l)) - } - return n -} -func (m *EventBatch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovSubscribe(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ServiceHealthUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Op != 0 { - n += 1 + sovSubscribe(uint64(m.Op)) - } - if m.CheckServiceNode != nil { - l = m.CheckServiceNode.Size() - n += 1 + l + sovSubscribe(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSubscribe(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSubscribe(x uint64) (n int) { - return sovSubscribe(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SubscribeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - m.Topic = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Topic |= Topic(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscribe(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscribe - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndOfSnapshot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Payload = &Event_EndOfSnapshot{b} - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NewSnapshotToFollow", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Payload = &Event_NewSnapshotToFollow{b} - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventBatch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &EventBatch{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &Event_EventBatch{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceHealth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ServiceHealthUpdate{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &Event_ServiceHealth{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscribe(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscribe - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventBatch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventBatch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventBatch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscribe(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscribe - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceHealthUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceHealthUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceHealthUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - m.Op = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Op |= CatalogOp(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckServiceNode", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubscribe - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSubscribe - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSubscribe - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckServiceNode == nil { - m.CheckServiceNode = &pbservice.CheckServiceNode{} - } - if err := m.CheckServiceNode.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubscribe(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSubscribe - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSubscribe(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscribe - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscribe - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSubscribe - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSubscribe - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSubscribe - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSubscribe - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSubscribe = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSubscribe = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSubscribe = fmt.Errorf("proto: unexpected end of group") -) From 6553bf4a2a0fdc93953cfe6dcc5abbbfe999b68f Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Wed, 23 Mar 2022 10:22:08 -0700 Subject: [PATCH 011/785] Lkysow/docs updates 2 (#12604) * Document intermediate_cert_ttl --- website/content/docs/agent/options.mdx | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/options.mdx index 452b2e51c..11eae98ae 100644 --- a/website/content/docs/agent/options.mdx +++ b/website/content/docs/agent/options.mdx @@ -1408,19 +1408,25 @@ There are also a number of common configuration options supported by all provide if servers have more than one CPU core. Setting this to zero disables rate limiting. Added in 1.4.1. - - `leaf_cert_ttl` ((#ca_leaf_cert_ttl)) The upper bound on the lease - duration of a leaf certificate issued for a service. In most cases a new leaf + - `leaf_cert_ttl` ((#ca_leaf_cert_ttl)) Specifies the upper bound on the expiry + of a leaf certificate issued for a service. In most cases a new leaf certificate will be requested by a proxy before this limit is reached. This is also the effective limit on how long a server outage can last (with no leader) before network connections will start being rejected. Defaults to `72h`. - This value cannot be lower than 1 hour or higher than 1 year. + + You can specify a range from one hour (minimum) up to one year (maximum) using + the following units: `h`, `m`, `s`, `ms`, `us` (or `µs`), `ns`, or a combination + of those units, e.g. `1h5m`. This value is also used when rotating out old root certificates from the cluster. When a root certificate has been inactive (rotated out) for more than twice the _current_ `leaf_cert_ttl`, it will be removed from the trusted list. - - `root_cert_ttl` ((#ca_root_cert_ttl)) The time to live (TTL) for a root certificate. + - `intermediate_cert_ttl` ((#ca_intermediate_cert_ttl)) Specifies the expiry for the + intermediate certificates. Defaults to `8760h` (1 year). Must be at least 3 times `leaf_cert_ttl`. + + - `root_cert_ttl` ((#ca_root_cert_ttl)) Specifies the expiry for a root certificate. Defaults to 10 years as `87600h`. This value, if provided, needs to be higher than the intermediate certificate TTL. @@ -2212,7 +2218,11 @@ There are also a number of common configuration options supported by all provide ```json { "telemetry": { - "prefix_filter": ["+consul.raft.apply", "-consul.http", "+consul.http.GET"] + "prefix_filter": [ + "+consul.raft.apply", + "-consul.http", + "+consul.http.GET" + ] } } ``` From 1b654c98071ba84e392a6ea4fb154a06779eb15e Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 23 Mar 2022 12:47:12 -0700 Subject: [PATCH 012/785] Clean up ent meta id usage in overview summary --- agent/consul/internal_endpoint_test.go | 11 ++--------- agent/consul/server_overview.go | 25 +++++++++++++++---------- agent/structs/structs_oss.go | 4 ---- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 25c9c75f4..e639c003f 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -2485,13 +2485,10 @@ func TestInternal_CatalogOverview(t *testing.T) { } t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { + _, s1 := testServerWithConfig(t, func(c *Config) { c.MetricsReportingInterval = 100 * time.Millisecond }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() codec := rpcClient(t, s1) - defer codec.Close() testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -2540,17 +2537,13 @@ func TestInternal_CatalogOverview_ACLDeny(t *testing.T) { t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { + _, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLInitialManagementToken = TestDefaultInitialManagementToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() testrpc.WaitForLeader(t, s1.RPC, "dc1") diff --git a/agent/consul/server_overview.go b/agent/consul/server_overview.go index 149743d3f..7417f8032 100644 --- a/agent/consul/server_overview.go +++ b/agent/consul/server_overview.go @@ -69,11 +69,15 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar serviceInstanceChecks := make(map[string][]*structs.HealthCheck) checkSummaries := make(map[string]structs.HealthSummary) + entMetaIDString := func(id string, entMeta structs.EnterpriseMeta) string { + return fmt.Sprintf("%s/%s/%s", id, entMeta.PartitionOrEmpty(), entMeta.NamespaceOrEmpty()) + } + // Compute the health check summaries by taking the pass/warn/fail counts // of each unique part/ns/checkname combo and storing them. Also store the // per-node and per-service instance checks for their respective summaries below. for _, check := range catalog.Checks { - checkID := fmt.Sprintf("%s/%s", check.EnterpriseMeta.String(), check.Name) + checkID := entMetaIDString(check.Name, check.EnterpriseMeta) summary, ok := checkSummaries[checkID] if !ok { summary = structs.HealthSummary{ @@ -86,11 +90,10 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar checkSummaries[checkID] = summary if check.ServiceID != "" { - serviceInstanceID := fmt.Sprintf("%s/%s/%s", check.EnterpriseMeta.String(), check.Node, check.ServiceID) + serviceInstanceID := entMetaIDString(fmt.Sprintf("%s/%s", check.Node, check.ServiceID), check.EnterpriseMeta) serviceInstanceChecks[serviceInstanceID] = append(serviceInstanceChecks[serviceInstanceID], check) } else { - nodeMeta := check.NodeIdentity().EnterpriseMeta - nodeID := fmt.Sprintf("%s/%s", nodeMeta.String(), check.Node) + nodeID := structs.NodeNameString(check.Node, &check.EnterpriseMeta) nodeChecks[nodeID] = append(nodeChecks[nodeID], check) } } @@ -110,7 +113,7 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar } // Compute whether this service instance is healthy based on its associated checks. - serviceInstanceID := fmt.Sprintf("%s/%s/%s", svc.EnterpriseMeta.String(), svc.Node, svc.ServiceID) + serviceInstanceID := entMetaIDString(fmt.Sprintf("%s/%s", svc.Node, svc.ServiceID), svc.EnterpriseMeta) status := api.HealthPassing for _, checks := range serviceInstanceChecks[serviceInstanceID] { if checks.Status == api.HealthWarning && status == api.HealthPassing { @@ -130,8 +133,7 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar // each partition. nodeSummaries := make(map[string]structs.HealthSummary) for _, node := range catalog.Nodes { - nodeMeta := structs.NodeEnterpriseMetaInPartition(node.Partition) - summary, ok := nodeSummaries[nodeMeta.String()] + summary, ok := nodeSummaries[node.Partition] if !ok { summary = structs.HealthSummary{ EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(node.Partition), @@ -140,7 +142,7 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar // Compute whether this node is healthy based on its associated checks. status := api.HealthPassing - nodeID := fmt.Sprintf("%s/%s", nodeMeta.String(), node.Node) + nodeID := structs.NodeNameString(node.Node, structs.NodeEnterpriseMetaInPartition(node.Partition)) for _, checks := range nodeChecks[nodeID] { if checks.Status == api.HealthWarning && status == api.HealthPassing { status = api.HealthWarning @@ -151,7 +153,7 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar } summary.Add(status) - nodeSummaries[nodeMeta.String()] = summary + nodeSummaries[node.Partition] = summary } // Construct the summary. @@ -171,7 +173,10 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar if slice[i].Name < slice[j].Name { return true } - return slice[i].EnterpriseMeta.String() < slice[j].EnterpriseMeta.String() + if slice[i].NamespaceOrEmpty() < slice[j].NamespaceOrEmpty() { + return true + } + return slice[i].PartitionOrEmpty() < slice[j].PartitionOrEmpty() } } sort.Slice(summary.Nodes, summarySort(summary.Nodes)) diff --git a/agent/structs/structs_oss.go b/agent/structs/structs_oss.go index 7f56c4355..669361802 100644 --- a/agent/structs/structs_oss.go +++ b/agent/structs/structs_oss.go @@ -15,10 +15,6 @@ var emptyEnterpriseMeta = EnterpriseMeta{} // EnterpriseMeta stub type EnterpriseMeta struct{} -func (m *EnterpriseMeta) String() string { - return "" -} - func (m *EnterpriseMeta) ToEnterprisePolicyMeta() *acl.EnterprisePolicyMeta { return nil } From 8f98bbda752a434b541dd2524aa4c9bd76b858e6 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Wed, 23 Mar 2022 13:29:12 -0700 Subject: [PATCH 013/785] [metrics][rpc]: add basic prefix filter test for new rpc metric (#12598) Signed-off-by: FFMMM --- agent/metrics_test.go | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/agent/metrics_test.go b/agent/metrics_test.go index 9b5e84500..e0d088cfa 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -66,7 +66,7 @@ func assertMetricNotExists(t *testing.T, respRec *httptest.ResponseRecorder, met } } -// TestAgent_NewRPCMetrics test for the new RPC metrics presence. These are the labeled metrics coming from +// TestAgent_NewRPCMetrics test for the new RPC metrics. These are the labeled metrics coming from // agent.rpc.middleware.interceptors package. func TestAgent_NewRPCMetrics(t *testing.T) { skipIfShortTesting(t) @@ -94,6 +94,30 @@ func TestAgent_NewRPCMetrics(t *testing.T) { assertMetricExists(t, respRec, metricsPrefix+"_rpc_server_call") }) + + t.Run("Check that new rpc metrics can be filtered out", func(t *testing.T) { + metricsPrefix := "new_rpc_metrics_2" + hcl := fmt.Sprintf(` + telemetry = { + prometheus_retention_time = "5s" + disable_hostname = true + metrics_prefix = "%s" + prefix_filter = ["-%s.rpc.server.call"] + } + `, metricsPrefix, metricsPrefix) + + a := StartTestAgent(t, TestAgent{HCL: hcl}) + defer a.Shutdown() + + var out struct{} + err := a.RPC("Status.Ping", struct{}{}, &out) + require.NoError(t, err) + + respRec := httptest.NewRecorder() + recordPromMetrics(t, a, respRec) + + assertMetricNotExists(t, respRec, metricsPrefix+"_rpc_server_call") + }) } // TestHTTPHandlers_AgentMetrics_ConsulAutopilot_Prometheus adds testing around From 96e0d8fd0d8e987043707d8f556aff2203bf2ee6 Mon Sep 17 00:00:00 2001 From: Riddhi Shah Date: Thu, 24 Mar 2022 16:55:05 +0530 Subject: [PATCH 014/785] ACL pkg updates to support Agentless RPCs For many of the new RPCs that will be added in Consul servers for Agentless work, the ACL token will need to be authorized for service:write on any service in any namespace in any partition. The ACL package updates are to make ServiceWriteAny related helpers available on the different authorizers. --- acl/acl_test.go | 12 ++++++++++++ acl/authorizer.go | 11 +++++++++++ acl/authorizer_test.go | 6 ++++++ acl/chained_authorizer.go | 7 +++++++ acl/chained_authorizer_test.go | 3 +++ acl/policy_authorizer.go | 2 +- acl/policy_authorizer_test.go | 2 ++ acl/static_authorizer.go | 7 +++++++ 8 files changed, 49 insertions(+), 1 deletion(-) diff --git a/acl/acl_test.go b/acl/acl_test.go index 3bbfed25e..3ce0fa59b 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -145,6 +145,10 @@ func checkAllowServiceWrite(t *testing.T, authz Authorizer, prefix string, entCt require.Equal(t, Allow, authz.ServiceWrite(prefix, entCtx)) } +func checkAllowServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.ServiceWriteAny(entCtx)) +} + func checkAllowSessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Allow, authz.SessionRead(prefix, entCtx)) } @@ -265,6 +269,10 @@ func checkDenyServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx require.Equal(t, Deny, authz.ServiceWrite(prefix, entCtx)) } +func checkDenyServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.ServiceWriteAny(entCtx)) +} + func checkDenySessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.SessionRead(prefix, entCtx)) } @@ -385,6 +393,10 @@ func checkDefaultServiceWrite(t *testing.T, authz Authorizer, prefix string, ent require.Equal(t, Default, authz.ServiceWrite(prefix, entCtx)) } +func checkDefaultServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.ServiceWriteAny(entCtx)) +} + func checkDefaultSessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.SessionRead(prefix, entCtx)) } diff --git a/acl/authorizer.go b/acl/authorizer.go index 7dc961c57..dfe2eda1d 100644 --- a/acl/authorizer.go +++ b/acl/authorizer.go @@ -149,6 +149,9 @@ type Authorizer interface { // service ServiceWrite(string, *AuthorizerContext) EnforcementDecision + // ServiceWriteAny checks for write permission on any service + ServiceWriteAny(*AuthorizerContext) EnforcementDecision + // SessionRead checks for permission to read sessions for a given node. SessionRead(string, *AuthorizerContext) EnforcementDecision @@ -411,6 +414,14 @@ func (a AllowAuthorizer) ServiceWriteAllowed(name string, ctx *AuthorizerContext return nil } +// ServiceWriteAnyAllowed checks for write permission on any service +func (a AllowAuthorizer) ServiceWriteAnyAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.ServiceWriteAny(ctx) != Allow { + return PermissionDeniedByACL(a, ctx, ResourceService, AccessWrite, "any service") + } + return nil +} + // SessionReadAllowed checks for permission to read sessions for a given node. func (a AllowAuthorizer) SessionReadAllowed(name string, ctx *AuthorizerContext) error { if a.Authorizer.SessionRead(name, ctx) != Allow { diff --git a/acl/authorizer_test.go b/acl/authorizer_test.go index 63eb57fd7..b8f4d21c1 100644 --- a/acl/authorizer_test.go +++ b/acl/authorizer_test.go @@ -185,6 +185,12 @@ func (m *mockAuthorizer) ServiceWrite(segment string, ctx *AuthorizerContext) En return ret.Get(0).(EnforcementDecision) } +// ServiceWriteAny checks for service:write on any service +func (m *mockAuthorizer) ServiceWriteAny(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + // SessionRead checks for permission to read sessions for a given node. func (m *mockAuthorizer) SessionRead(segment string, ctx *AuthorizerContext) EnforcementDecision { ret := m.Called(segment, ctx) diff --git a/acl/chained_authorizer.go b/acl/chained_authorizer.go index f0d7fc329..77df69a3e 100644 --- a/acl/chained_authorizer.go +++ b/acl/chained_authorizer.go @@ -235,6 +235,13 @@ func (c *ChainedAuthorizer) ServiceWrite(name string, entCtx *AuthorizerContext) }) } +// ServiceWriteAny checks for write permission on any service +func (c *ChainedAuthorizer) ServiceWriteAny(entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.ServiceWriteAny(entCtx) + }) +} + // SessionRead checks for permission to read sessions for a given node. func (c *ChainedAuthorizer) SessionRead(node string, entCtx *AuthorizerContext) EnforcementDecision { return c.executeChain(func(authz Authorizer) EnforcementDecision { diff --git a/acl/chained_authorizer_test.go b/acl/chained_authorizer_test.go index f6ca7184d..5f33d0166 100644 --- a/acl/chained_authorizer_test.go +++ b/acl/chained_authorizer_test.go @@ -89,6 +89,9 @@ func (authz testAuthorizer) ServiceReadAll(*AuthorizerContext) EnforcementDecisi func (authz testAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } +func (authz testAuthorizer) ServiceWriteAny(*AuthorizerContext) EnforcementDecision { + return EnforcementDecision(authz) +} func (authz testAuthorizer) SessionRead(string, *AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } diff --git a/acl/policy_authorizer.go b/acl/policy_authorizer.go index 1fdf44543..3b79a6316 100644 --- a/acl/policy_authorizer.go +++ b/acl/policy_authorizer.go @@ -767,7 +767,7 @@ func (p *policyAuthorizer) ServiceWrite(name string, _ *AuthorizerContext) Enfor return Default } -func (p *policyAuthorizer) serviceWriteAny(_ *AuthorizerContext) EnforcementDecision { +func (p *policyAuthorizer) ServiceWriteAny(_ *AuthorizerContext) EnforcementDecision { return p.anyAllowed(p.serviceRules, AccessWrite) } diff --git a/acl/policy_authorizer_test.go b/acl/policy_authorizer_test.go index f87326032..d2f69a4eb 100644 --- a/acl/policy_authorizer_test.go +++ b/acl/policy_authorizer_test.go @@ -56,6 +56,7 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "DefaultPreparedQueryWrite", prefix: "foo", check: checkDefaultPreparedQueryWrite}, {name: "DefaultServiceRead", prefix: "foo", check: checkDefaultServiceRead}, {name: "DefaultServiceWrite", prefix: "foo", check: checkDefaultServiceWrite}, + {name: "DefaultServiceWriteAny", prefix: "", check: checkDefaultServiceWriteAny}, {name: "DefaultSessionRead", prefix: "foo", check: checkDefaultSessionRead}, {name: "DefaultSessionWrite", prefix: "foo", check: checkDefaultSessionWrite}, {name: "DefaultSnapshot", prefix: "foo", check: checkDefaultSnapshot}, @@ -267,6 +268,7 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "ServiceWritePrefixDenied", prefix: "food", check: checkDenyServiceWrite}, {name: "ServiceReadDenied", prefix: "football", check: checkDenyServiceRead}, {name: "ServiceWriteDenied", prefix: "football", check: checkDenyServiceWrite}, + {name: "ServiceWriteAnyAllowed", prefix: "", check: checkAllowServiceWriteAny}, {name: "NodeReadPrefixAllowed", prefix: "fo", check: checkAllowNodeRead}, {name: "NodeWritePrefixDenied", prefix: "fo", check: checkDenyNodeWrite}, diff --git a/acl/static_authorizer.go b/acl/static_authorizer.go index 1807d0684..951b026f3 100644 --- a/acl/static_authorizer.go +++ b/acl/static_authorizer.go @@ -219,6 +219,13 @@ func (s *staticAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementD return Deny } +func (s *staticAuthorizer) ServiceWriteAny(*AuthorizerContext) EnforcementDecision { + if s.defaultAllow { + return Allow + } + return Deny +} + func (s *staticAuthorizer) SessionRead(string, *AuthorizerContext) EnforcementDecision { if s.defaultAllow { return Allow From 4d6229a3abe6cabd9c00ff0c9f7f8c274aff9d7c Mon Sep 17 00:00:00 2001 From: FFMMM Date: Thu, 24 Mar 2022 10:37:04 -0700 Subject: [PATCH 015/785] remove Telemetry.MergeDefaults (#12606) Signed-off-by: FFMMM --- lib/telemetry.go | 52 ----------------------- lib/telemetry_test.go | 99 ------------------------------------------- 2 files changed, 151 deletions(-) delete mode 100644 lib/telemetry_test.go diff --git a/lib/telemetry.go b/lib/telemetry.go index d85e51d45..d74edb37c 100644 --- a/lib/telemetry.go +++ b/lib/telemetry.go @@ -1,7 +1,6 @@ package lib import ( - "reflect" "time" "github.com/armon/go-metrics" @@ -200,57 +199,6 @@ type TelemetryConfig struct { PrometheusOpts prometheus.PrometheusOpts } -// MergeDefaults copies any non-zero field from defaults into the current -// config. -// TODO(kit): We no longer use this function and can probably delete it -func (c *TelemetryConfig) MergeDefaults(defaults *TelemetryConfig) { - if defaults == nil { - return - } - cfgPtrVal := reflect.ValueOf(c) - cfgVal := cfgPtrVal.Elem() - otherVal := reflect.ValueOf(*defaults) - for i := 0; i < cfgVal.NumField(); i++ { - f := cfgVal.Field(i) - if !f.IsValid() || !f.CanSet() { - continue - } - // See if the current value is a zero-value, if _not_ skip it - // - // No built in way to check for zero-values for all types so only - // implementing this for the types we actually have for now. Test failure - // should catch the case where we add new types later. - switch f.Kind() { - case reflect.Struct: - if f.Type() == reflect.TypeOf(prometheus.PrometheusOpts{}) { - continue - } - case reflect.Slice: - if !f.IsNil() { - continue - } - case reflect.Int, reflect.Int64: // time.Duration == int64 - if f.Int() != 0 { - continue - } - case reflect.String: - if f.String() != "" { - continue - } - case reflect.Bool: - if f.Bool() { - continue - } - default: - // Needs implementing, should be caught by tests. - continue - } - - // It's zero, copy it from defaults - f.Set(otherVal.Field(i)) - } -} - func statsiteSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) { addr := cfg.StatsiteAddr if addr == "" { diff --git a/lib/telemetry_test.go b/lib/telemetry_test.go deleted file mode 100644 index 4ee012f1e..000000000 --- a/lib/telemetry_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package lib - -import ( - "reflect" - "testing" - "time" - - "github.com/armon/go-metrics/prometheus" - - "github.com/stretchr/testify/require" -) - -func makeFullTelemetryConfig(t *testing.T) TelemetryConfig { - var ( - promOpts = prometheus.PrometheusOpts{} - strSliceVal = []string{"foo"} - strVal = "foo" - intVal = int64(1 * time.Second) - ) - - cfg := TelemetryConfig{} - cfgP := reflect.ValueOf(&cfg) - cfgV := cfgP.Elem() - for i := 0; i < cfgV.NumField(); i++ { - f := cfgV.Field(i) - if !f.IsValid() || !f.CanSet() { - continue - } - // Set non-zero values for all fields. We only implement kinds that exist - // now for brevity but will fail the test if a new field type is added since - // this is likely not implemented in MergeDefaults either. - switch f.Kind() { - case reflect.Struct: - if f.Type() != reflect.TypeOf(promOpts) { - t.Fatalf("unknown struct type in TelemetryConfig: actual %v, expected: %v", f.Type(), reflect.TypeOf(promOpts)) - } - // TODO(kit): This should delve into the fields and set them individually rather than using an empty struct - f.Set(reflect.ValueOf(promOpts)) - case reflect.Slice: - if f.Type() != reflect.TypeOf(strSliceVal) { - t.Fatalf("unknown slice type in TelemetryConfig." + - " You need to update MergeDefaults and this test code.") - } - f.Set(reflect.ValueOf(strSliceVal)) - case reflect.Int, reflect.Int64: // time.Duration == int64 - f.SetInt(intVal) - case reflect.String: - f.SetString(strVal) - case reflect.Bool: - f.SetBool(true) - default: - t.Fatalf("unknown field type in TelemetryConfig" + - " You need to update MergeDefaults and this test code.") - } - } - return cfg -} - -func TestTelemetryConfig_MergeDefaults(t *testing.T) { - tests := []struct { - name string - cfg TelemetryConfig - defaults TelemetryConfig - want TelemetryConfig - }{ - { - name: "basic merge", - cfg: TelemetryConfig{ - StatsiteAddr: "stats.it:4321", - }, - defaults: TelemetryConfig{ - StatsdAddr: "localhost:5678", - StatsiteAddr: "localhost:1234", - }, - want: TelemetryConfig{ - StatsdAddr: "localhost:5678", - StatsiteAddr: "stats.it:4321", - }, - }, - { - // This test uses reflect to build a TelemetryConfig with every value set - // to ensure that we exercise every possible field type. This means that - // if new fields are added that are not supported types in the code, this - // test should either ensure they work or fail to build the test case and - // fail the test. - name: "exhaustive", - cfg: TelemetryConfig{}, - defaults: makeFullTelemetryConfig(t), - want: makeFullTelemetryConfig(t), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := tt.cfg - c.MergeDefaults(&tt.defaults) - require.Equal(t, tt.want, c) - }) - } -} From 0d5cbf6f301ef73ed3d0be2089a138680f94b47d Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 24 Mar 2022 12:16:05 -0700 Subject: [PATCH 016/785] Sort by partition/ns/servicename instead of the reverse --- agent/consul/server_overview.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent/consul/server_overview.go b/agent/consul/server_overview.go index 7417f8032..b75ffed5d 100644 --- a/agent/consul/server_overview.go +++ b/agent/consul/server_overview.go @@ -170,13 +170,13 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar summarySort := func(slice []structs.HealthSummary) func(int, int) bool { return func(i, j int) bool { - if slice[i].Name < slice[j].Name { + if slice[i].PartitionOrEmpty() < slice[j].PartitionOrEmpty() { return true } if slice[i].NamespaceOrEmpty() < slice[j].NamespaceOrEmpty() { return true } - return slice[i].PartitionOrEmpty() < slice[j].PartitionOrEmpty() + return slice[i].Name < slice[j].Name } } sort.Slice(summary.Nodes, summarySort(summary.Nodes)) From 8020fb2098f7eeed58a84f2f8369ced2bf5396bb Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Thu, 24 Mar 2022 15:32:25 -0400 Subject: [PATCH 017/785] agent: convert listener config to TLS types (#12522) * tlsutil: initial implementation of types/TLSVersion tlsutil: add test for parsing deprecated agent TLS version strings tlsutil: return TLSVersionInvalid with error tlsutil: start moving tlsutil cipher suite lookups over to types/tls tlsutil: rename tlsLookup to ParseTLSVersion, add cipherSuiteLookup agent: attempt to use types in runtime config agent: implement b.tlsVersion validation in config builder agent: fix tlsVersion nil check in builder tlsutil: update to renamed ParseTLSVersion and goTLSVersions tlsutil: fixup TestConfigurator_CommonTLSConfigTLSMinVersion tlsutil: disable invalid config parsing tests tlsutil: update tests auto_config: lookup old config strings from base.TLSMinVersion auto_config: update endpoint tests to use TLS types agent: update runtime_test to use TLS types agent: update TestRuntimeCinfig_Sanitize.golden agent: update config runtime tests to expect TLS types * website: update Consul agent tls_min_version values * agent: fixup TLS parsing and compilation errors * test: fixup lint issues in agent/config_runtime_test and tlsutil/config_test * tlsutil: add CHACHA20_POLY1305 cipher suites to goTLSCipherSuites * test: revert autoconfig tls min version fixtures to old format * types: add TLSVersions public function * agent: add warning for deprecated TLS version strings * agent: move agent config specific logic from tlsutil.ParseTLSVersion into agent config builder * tlsutil(BREAKING): change default TLS min version to TLS 1.2 * agent: move ParseCiphers logic from tlsutil into agent config builder * tlsutil: remove unused CipherString function * agent: fixup import for types package * Revert "tlsutil: remove unused CipherString function" This reverts commit 6ca7f6f58d268e617501b7db9500113c13bae70c. * agent: fixup config builder and runtime tests * tlsutil: fixup one remaining ListenerConfig -> ProtocolConfig * test: move TLS cipher suites parsing test from tlsutil into agent config builder tests * agent: remove parseCiphers helper from auto_config_endpoint_test * test: remove unused imports from tlsutil * agent: remove resolved FIXME comment * tlsutil: remove TODO and FIXME in cipher suite validation * agent: prevent setting inherited cipher suite config when TLS 1.3 is specified * changelog: add entry for converting agent config to TLS types * agent: remove FIXME in runtime test, this is covered in builder tests with invalid tls9 value now * tlsutil: remove config tests for values checked at agent config builder boundary * tlsutil: remove tls version check from loadProtocolConfig * tlsutil: remove tests and TODOs for logic checked in TestBuilder_tlsVersion and TestBuilder_tlsCipherSuites * website: update search link for supported Consul agent cipher suites * website: apply review suggestions for tls_min_version description * website: attempt to clean up markdown list formatting for tls_min_version * website: moar linebreaks to fix tls_min_version formatting * Revert "website: moar linebreaks to fix tls_min_version formatting" This reverts commit 38585927422f73ebf838a7663e566ac245f2a75c. * autoconfig: translate old values for TLSMinVersion * agent: rename var for translated value of deprecated TLS version value * Update agent/config/deprecated.go Co-authored-by: Dan Upton * agent: fix lint issue * agent: fixup deprecated config test assertions for updated warning Co-authored-by: Dan Upton --- .changelog/12522.txt | 15 ++ agent/auto-config/config_translate.go | 13 +- agent/auto-config/config_translate_test.go | 2 +- agent/config/builder.go | 76 ++++++++- agent/config/builder_test.go | 57 +++++++ agent/config/default.go | 2 +- agent/config/deprecated.go | 13 +- agent/config/deprecated_test.go | 12 +- agent/config/runtime_test.go | 35 ++-- .../TestRuntimeConfig_Sanitize.golden | 2 +- agent/config/testdata/full-config.hcl | 19 ++- agent/config/testdata/full-config.json | 19 ++- agent/consul/auto_config_endpoint.go | 1 + agent/consul/auto_config_endpoint_test.go | 26 ++- tlsutil/config.go | 151 +++++++----------- tlsutil/config_test.go | 79 ++------- types/tls.go | 13 +- website/content/docs/agent/options.mdx | 25 ++- 18 files changed, 318 insertions(+), 242 deletions(-) create mode 100644 .changelog/12522.txt diff --git a/.changelog/12522.txt b/.changelog/12522.txt new file mode 100644 index 000000000..a9d8ec5fc --- /dev/null +++ b/.changelog/12522.txt @@ -0,0 +1,15 @@ +```release-note:deprecation +agent: deprecate older syntax for specifying TLS min version values +``` +```release-note:deprecation +agent: remove support for specifying insecure TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 and TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suites +``` +```release-note:enhancement +agent: add additional validation to TLS config +``` +```release-note:enhancement +agent: bump default min version for connections to TLS 1.2 +``` +```release-note:enhancement +agent: add support for specifying TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 and TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 cipher suites +``` diff --git a/agent/auto-config/config_translate.go b/agent/auto-config/config_translate.go index 4b7f7e663..c5c3606a9 100644 --- a/agent/auto-config/config_translate.go +++ b/agent/auto-config/config_translate.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/consul/proto/pbautoconf" "github.com/hashicorp/consul/proto/pbconfig" "github.com/hashicorp/consul/proto/pbconnect" + "github.com/hashicorp/consul/types" ) // translateAgentConfig is meant to take in a proto/pbconfig.Config type @@ -83,9 +84,19 @@ func translateConfig(c *pbconfig.Config) config.Config { if t := c.TLS; t != nil { result.TLS.Defaults = config.TLSProtocolConfig{ VerifyOutgoing: &t.VerifyOutgoing, - TLSMinVersion: stringPtrOrNil(t.MinVersion), TLSCipherSuites: stringPtrOrNil(t.CipherSuites), } + + // NOTE: This inner check for deprecated values should eventually be + // removed, and possibly replaced with a versioning scheme for autoconfig + // or a proper integration with the deprecated config handling in + // agent/config/deprecated.go + if v, ok := types.DeprecatedConsulAgentTLSVersions[t.MinVersion]; ok { + result.TLS.Defaults.TLSMinVersion = stringPtrOrNil(v.String()) + } else { + result.TLS.Defaults.TLSMinVersion = stringPtrOrNil(t.MinVersion) + } + result.TLS.InternalRPC.VerifyServerHostname = &t.VerifyServerHostname } diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index ed605672d..e48eebf31 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -108,7 +108,7 @@ func TestTranslateConfig(t *testing.T) { Defaults: config.TLSProtocolConfig{ VerifyOutgoing: boolPointer(true), TLSCipherSuites: stringPointer("stuff"), - TLSMinVersion: stringPointer("tls13"), + TLSMinVersion: stringPointer("TLSv1_3"), }, InternalRPC: config.TLSProtocolConfig{ VerifyServerHostname: boolPointer(true), diff --git a/agent/config/builder.go b/agent/config/builder.go index 478540db8..d9686254b 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1971,15 +1971,63 @@ func (b *builder) cidrsVal(name string, v []string) (nets []*net.IPNet) { return } -func (b *builder) tlsCipherSuites(name string, v *string) []uint16 { +func (b *builder) tlsVersion(name string, v *string) types.TLSVersion { + // Handles unspecified config and empty string case. + // + // This check is not inside types.ValidateTLSVersionString because Envoy config + // distinguishes between an unset empty string which inherits parent config and + // an explicit TLS_AUTO which allows overriding parent config with the proxy + // defaults. + if v == nil || *v == "" { + return types.TLSVersionAuto + } + + a := types.TLSVersion(*v) + + err := types.ValidateTLSVersion(a) + if err != nil { + b.err = multierror.Append(b.err, fmt.Errorf("%s: invalid TLS version: %s", name, err)) + return types.TLSVersionInvalid + } + return a +} + +// validateTLSVersionCipherSuitesCompat checks that the specified TLS version supports +// specifying cipher suites +func validateTLSVersionCipherSuitesCompat(tlsMinVersion types.TLSVersion) error { + if tlsMinVersion == types.TLSv1_3 { + return fmt.Errorf("TLS 1.3 cipher suites are not configurable") + } + return nil +} + +// tlsCipherSuites parses cipher suites from a comma-separated string into a +// recognized slice +func (b *builder) tlsCipherSuites(name string, v *string, tlsMinVersion types.TLSVersion) []types.TLSCipherSuite { if v == nil { return nil } - var a []uint16 - a, err := tlsutil.ParseCiphers(*v) + if err := validateTLSVersionCipherSuitesCompat(tlsMinVersion); err != nil { + b.err = multierror.Append(b.err, fmt.Errorf("%s: %s", name, err)) + return nil + } + + *v = strings.TrimSpace(*v) + if *v == "" { + return []types.TLSCipherSuite{} + } + ciphers := strings.Split(*v, ",") + + a := make([]types.TLSCipherSuite, len(ciphers)) + for i, cipher := range ciphers { + a[i] = types.TLSCipherSuite(cipher) + } + + err := types.ValidateConsulAgentCipherSuites(a) if err != nil { - b.err = multierror.Append(b.err, fmt.Errorf("%s: invalid tls cipher suites: %s", name, err)) + b.err = multierror.Append(b.err, fmt.Errorf("%s: invalid TLS cipher suites: %s", name, err)) + return []types.TLSCipherSuite{} } return a } @@ -2477,14 +2525,14 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)") } - defaultCipherSuites := b.tlsCipherSuites("tls.defaults.tls_cipher_suites", t.Defaults.TLSCipherSuites) + defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion) + defaultCipherSuites := b.tlsCipherSuites("tls.defaults.tls_cipher_suites", t.Defaults.TLSCipherSuites, defaultTLSMinVersion) mapCommon := func(name string, src TLSProtocolConfig, dst *tlsutil.ProtocolConfig) { dst.CAPath = stringValWithDefault(src.CAPath, stringVal(t.Defaults.CAPath)) dst.CAFile = stringValWithDefault(src.CAFile, stringVal(t.Defaults.CAFile)) dst.CertFile = stringValWithDefault(src.CertFile, stringVal(t.Defaults.CertFile)) dst.KeyFile = stringValWithDefault(src.KeyFile, stringVal(t.Defaults.KeyFile)) - dst.TLSMinVersion = stringValWithDefault(src.TLSMinVersion, stringVal(t.Defaults.TLSMinVersion)) dst.VerifyIncoming = boolValWithDefault(src.VerifyIncoming, boolVal(t.Defaults.VerifyIncoming)) // We prevent this from being set explicity in the tls.grpc stanza above, but @@ -2494,12 +2542,26 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error dst.VerifyOutgoing = boolValWithDefault(src.VerifyOutgoing, boolVal(t.Defaults.VerifyOutgoing)) } + if src.TLSMinVersion == nil { + dst.TLSMinVersion = defaultTLSMinVersion + } else { + dst.TLSMinVersion = b.tlsVersion( + fmt.Sprintf("tls.%s.tls_min_version", name), + src.TLSMinVersion, + ) + } + if src.TLSCipherSuites == nil { - dst.CipherSuites = defaultCipherSuites + // If cipher suite config incompatible with a specified TLS min version + // would be inherited, omit it but don't return an error in the builder. + if validateTLSVersionCipherSuitesCompat(dst.TLSMinVersion) == nil { + dst.CipherSuites = defaultCipherSuites + } } else { dst.CipherSuites = b.tlsCipherSuites( fmt.Sprintf("tls.%s.tls_cipher_suites", name), src.TLSCipherSuites, + dst.TLSMinVersion, ) } } diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index 80ad7b368..58fd922fc 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/types" ) func TestLoad(t *testing.T) { @@ -327,3 +329,58 @@ func TestBuilder_ServiceVal_MultiError(t *testing.T) { func intPtr(v int) *int { return &v } + +func TestBuilder_tlsVersion(t *testing.T) { + b := builder{} + + validTLSVersion := "TLSv1_3" + b.tlsVersion("tls.defaults.tls_min_version", &validTLSVersion) + + deprecatedTLSVersion := "tls11" + b.tlsVersion("tls.defaults.tls_min_version", &deprecatedTLSVersion) + + invalidTLSVersion := "tls9" + b.tlsVersion("tls.defaults.tls_min_version", &invalidTLSVersion) + + require.Error(t, b.err) + require.Contains(t, b.err.Error(), "2 errors") + require.Contains(t, b.err.Error(), deprecatedTLSVersion) + require.Contains(t, b.err.Error(), invalidTLSVersion) +} + +func TestBuilder_tlsCipherSuites(t *testing.T) { + b := builder{} + + validCipherSuites := strings.Join([]string{ + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + }, ",") + b.tlsCipherSuites("tls.defaults.tls_cipher_suites", &validCipherSuites, types.TLSv1_2) + require.NoError(t, b.err) + + unsupportedCipherSuites := strings.Join([]string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + }, ",") + b.tlsCipherSuites("tls.defaults.tls_cipher_suites", &unsupportedCipherSuites, types.TLSv1_2) + + invalidCipherSuites := strings.Join([]string{ + "cipherX", + }, ",") + b.tlsCipherSuites("tls.defaults.tls_cipher_suites", &invalidCipherSuites, types.TLSv1_2) + + b.tlsCipherSuites("tls.defaults.tls_cipher_suites", &validCipherSuites, types.TLSv1_3) + + require.Error(t, b.err) + require.Contains(t, b.err.Error(), "3 errors") + require.Contains(t, b.err.Error(), unsupportedCipherSuites) + require.Contains(t, b.err.Error(), invalidCipherSuites) + require.Contains(t, b.err.Error(), "cipher suites are not configurable") +} diff --git a/agent/config/default.go b/agent/config/default.go index 3f40766ff..c0c1bd9b9 100644 --- a/agent/config/default.go +++ b/agent/config/default.go @@ -58,7 +58,7 @@ func DefaultSource() Source { tls = { defaults = { - tls_min_version = "tls12" + tls_min_version = "TLSv1_2" } } diff --git a/agent/config/deprecated.go b/agent/config/deprecated.go index d2649d61d..b27150b51 100644 --- a/agent/config/deprecated.go +++ b/agent/config/deprecated.go @@ -2,6 +2,8 @@ package config import ( "fmt" + + "github.com/hashicorp/consul/types" ) type DeprecatedConfig struct { @@ -219,7 +221,16 @@ func applyDeprecatedTLSConfig(dep DeprecatedConfig, cfg *Config) []string { if v := dep.TLSMinVersion; v != nil { if defaults.TLSMinVersion == nil { - defaults.TLSMinVersion = v + // NOTE: This inner check for deprecated values should eventually be + // removed + if version, ok := types.DeprecatedConsulAgentTLSVersions[*v]; ok { + // Log warning about deprecated config values + warns = append(warns, fmt.Sprintf("'tls_min_version' value '%s' is deprecated, please specify '%s' instead", *v, version)) + versionString := version.String() + defaults.TLSMinVersion = &versionString + } else { + defaults.TLSMinVersion = v + } } warns = append(warns, deprecationWarning("tls_min_version", "tls.defaults.tls_min_version")) } diff --git a/agent/config/deprecated_test.go b/agent/config/deprecated_test.go index ce32a3ccd..36dd86907 100644 --- a/agent/config/deprecated_test.go +++ b/agent/config/deprecated_test.go @@ -1,7 +1,7 @@ package config import ( - "crypto/tls" + "fmt" "sort" "testing" "time" @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" ) func TestLoad_DeprecatedConfig(t *testing.T) { @@ -33,8 +34,8 @@ ca_file = "some-ca-file" ca_path = "some-ca-path" cert_file = "some-cert-file" key_file = "some-key-file" -tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" -tls_min_version = "some-tls-version" +tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" +tls_min_version = "tls11" verify_incoming = true verify_incoming_https = false verify_incoming_rpc = false @@ -61,6 +62,7 @@ tls_prefer_server_cipher_suites = true deprecationWarning("cert_file", "tls.defaults.cert_file"), deprecationWarning("key_file", "tls.defaults.key_file"), deprecationWarning("tls_cipher_suites", "tls.defaults.tls_cipher_suites"), + fmt.Sprintf("'tls_min_version' value 'tls11' is deprecated, please specify 'TLSv1_1' instead"), deprecationWarning("tls_min_version", "tls.defaults.tls_min_version"), deprecationWarning("verify_incoming", "tls.defaults.verify_incoming"), deprecationWarning("verify_incoming_https", "tls.https.verify_incoming"), @@ -90,8 +92,8 @@ tls_prefer_server_cipher_suites = true require.Equal(t, "some-ca-path", l.CAPath) require.Equal(t, "some-cert-file", l.CertFile) require.Equal(t, "some-key-file", l.KeyFile) - require.Equal(t, "some-tls-version", l.TLSMinVersion) - require.Equal(t, []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256}, l.CipherSuites) + require.Equal(t, types.TLSVersion("TLSv1_1"), l.TLSMinVersion) + require.Equal(t, []types.TLSCipherSuite{types.TLSCipherSuite("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA")}, l.CipherSuites) } require.False(t, rt.TLS.InternalRPC.VerifyIncoming) diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 7c24e71d3..b239b1ed4 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2,7 +2,6 @@ package config import ( "bytes" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -5395,8 +5394,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { ca_file = "default_ca_file" ca_path = "default_ca_path" cert_file = "default_cert_file" - tls_min_version = "tls12" - tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" + tls_min_version = "TLSv1_2" + tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" verify_incoming = true } @@ -5406,7 +5405,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { https { cert_file = "https_cert_file" - tls_min_version = "tls13" + tls_min_version = "TLSv1_3" } grpc { @@ -5425,8 +5424,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { "ca_file": "default_ca_file", "ca_path": "default_ca_path", "cert_file": "default_cert_file", - "tls_min_version": "tls12", - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "tls_min_version": "TLSv1_2", + "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "verify_incoming": true }, "internal_rpc": { @@ -5434,7 +5433,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, "https": { "cert_file": "https_cert_file", - "tls_min_version": "tls13" + "tls_min_version": "TLSv1_3" }, "grpc": { "verify_incoming": false, @@ -5455,22 +5454,21 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.TLS.InternalRPC.CAFile = "internal_rpc_ca_file" rt.TLS.InternalRPC.CAPath = "default_ca_path" rt.TLS.InternalRPC.CertFile = "default_cert_file" - rt.TLS.InternalRPC.TLSMinVersion = "tls12" - rt.TLS.InternalRPC.CipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256} + rt.TLS.InternalRPC.TLSMinVersion = "TLSv1_2" + rt.TLS.InternalRPC.CipherSuites = []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256} rt.TLS.InternalRPC.VerifyIncoming = true rt.TLS.HTTPS.CAFile = "default_ca_file" rt.TLS.HTTPS.CAPath = "default_ca_path" rt.TLS.HTTPS.CertFile = "https_cert_file" - rt.TLS.HTTPS.TLSMinVersion = "tls13" - rt.TLS.HTTPS.CipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256} + rt.TLS.HTTPS.TLSMinVersion = "TLSv1_3" rt.TLS.HTTPS.VerifyIncoming = true rt.TLS.GRPC.CAFile = "default_ca_file" rt.TLS.GRPC.CAPath = "default_ca_path" rt.TLS.GRPC.CertFile = "default_cert_file" - rt.TLS.GRPC.TLSMinVersion = "tls12" - rt.TLS.GRPC.CipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA} + rt.TLS.GRPC.TLSMinVersion = "TLSv1_2" + rt.TLS.GRPC.CipherSuites = []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA} rt.TLS.GRPC.VerifyIncoming = false }, }) @@ -6310,8 +6308,8 @@ func TestLoad_FullConfig(t *testing.T) { CAPath: "lOp1nhPa", CertFile: "dfJ4oPln", KeyFile: "aL1Knkpo", - TLSMinVersion: "lPo1MklP", - CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256}, + TLSMinVersion: types.TLSv1_1, + CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA}, VerifyOutgoing: true, VerifyServerHostname: true, }, @@ -6321,8 +6319,8 @@ func TestLoad_FullConfig(t *testing.T) { CAPath: "fLponKpl", CertFile: "a674klPn", KeyFile: "1y4prKjl", - TLSMinVersion: "lPo4fNkl", - CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256}, + TLSMinVersion: types.TLSv1_0, + CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA}, VerifyOutgoing: false, }, HTTPS: tlsutil.ProtocolConfig{ @@ -6331,8 +6329,7 @@ func TestLoad_FullConfig(t *testing.T) { CAPath: "nu4PlHzn", CertFile: "1yrhPlMk", KeyFile: "1bHapOkL", - TLSMinVersion: "mK14iOpz", - CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256}, + TLSMinVersion: types.TLSv1_3, VerifyOutgoing: true, }, NodeName: "otlLxGaI", diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 0f8d66b8b..a8e2f46ee 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -453,4 +453,4 @@ "Version": "", "VersionPrerelease": "", "Watches": [] -} \ No newline at end of file +} diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index bb68055cf..48b6e9a1a 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -653,8 +653,8 @@ tls { ca_path = "bN63LpXu" cert_file = "hB4PoxkL" key_file = "Po0hB1tY" - tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" - tls_min_version = "yU0uIp1A" + tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + tls_min_version = "TLSv1_2" verify_incoming = true verify_outgoing = true } @@ -663,8 +663,8 @@ tls { ca_path = "lOp1nhPa" cert_file = "dfJ4oPln" key_file = "aL1Knkpo" - tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" - tls_min_version = "lPo1MklP" + tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" + tls_min_version = "TLSv1_1" verify_incoming = true verify_outgoing = true verify_server_hostname = true @@ -674,8 +674,7 @@ tls { ca_path = "nu4PlHzn" cert_file = "1yrhPlMk" key_file = "1bHapOkL" - tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" - tls_min_version = "mK14iOpz" + tls_min_version = "TLSv1_3" verify_incoming = true verify_outgoing = true } @@ -684,13 +683,13 @@ tls { ca_path = "fLponKpl" cert_file = "a674klPn" key_file = "1y4prKjl" - tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" - tls_min_version = "lPo4fNkl" + tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" + tls_min_version = "TLSv1_0" verify_incoming = true } } -tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" -tls_min_version = "pAOWafkR" +tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" +tls_min_version = "tls11" tls_prefer_server_cipher_suites = true translate_wan_addrs = true ui_config { diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 574c715f4..1c92d6d02 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -650,8 +650,8 @@ "ca_path": "bN63LpXu", "cert_file": "hB4PoxkL", "key_file": "Po0hB1tY", - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "tls_min_version": "yU0uIp1A", + "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "tls_min_version": "TLSv1_2", "verify_incoming": true, "verify_outgoing": true }, @@ -660,8 +660,8 @@ "ca_path": "lOp1nhPa", "cert_file": "dfJ4oPln", "key_file": "aL1Knkpo", - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "tls_min_version": "lPo1MklP", + "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "tls_min_version": "TLSv1_1", "verify_incoming": true, "verify_outgoing": true }, @@ -670,8 +670,7 @@ "ca_path": "nu4PlHzn", "cert_file": "1yrhPlMk", "key_file": "1bHapOkL", - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "tls_min_version": "mK14iOpz", + "tls_min_version": "TLSv1_3", "verify_incoming": true, "verify_outgoing": true }, @@ -680,13 +679,13 @@ "ca_path": "fLponKpl", "cert_file": "a674klPn", "key_file": "1y4prKjl", - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "tls_min_version": "lPo4fNkl", + "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "tls_min_version": "TLSv1_0", "verify_incoming": true } }, - "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "tls_min_version": "pAOWafkR", + "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "tls_min_version": "tls11", "tls_prefer_server_cipher_suites": true, "translate_wan_addrs": true, "ui_config": { diff --git a/agent/consul/auto_config_endpoint.go b/agent/consul/auto_config_endpoint.go index 781192d6e..5ca15f33b 100644 --- a/agent/consul/auto_config_endpoint.go +++ b/agent/consul/auto_config_endpoint.go @@ -281,6 +281,7 @@ func (ac *AutoConfig) updateTLSSettingsInConfig(_ AutoConfigOptions, resp *pbaut } var err error + resp.Config.TLS, err = ac.tlsConfigurator.AutoConfigTLSSettings() return err } diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index 00873b615..f81461bbb 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul/proto/pbconnect" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" "gopkg.in/square/go-jose.v2/jwt" ) @@ -173,7 +174,7 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { c.TLSConfig.InternalRPC.VerifyOutgoing = true c.TLSConfig.InternalRPC.VerifyIncoming = true c.TLSConfig.InternalRPC.VerifyServerHostname = true - c.TLSConfig.InternalRPC.TLSMinVersion = "tls12" + c.TLSConfig.InternalRPC.TLSMinVersion = types.TLSv1_2 c.ConnectEnabled = true c.AutoEncryptAllowTLS = true @@ -391,13 +392,6 @@ func TestAutoConfig_baseConfig(t *testing.T) { } } -func parseCiphers(t *testing.T, cipherStr string) []uint16 { - t.Helper() - ciphers, err := tlsutil.ParseCiphers(cipherStr) - require.NoError(t, err) - return ciphers -} - func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { _, _, cacert, err := testTLSCertificates("server.dc1.consul") require.NoError(t, err) @@ -418,9 +412,9 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { InternalRPC: tlsutil.ProtocolConfig{ VerifyServerHostname: true, VerifyOutgoing: true, - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, CAFile: cafile, - CipherSuites: parseCiphers(t, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"), + CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, expected: pbautoconf.AutoConfigResponse{ @@ -439,9 +433,9 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { InternalRPC: tlsutil.ProtocolConfig{ VerifyServerHostname: false, VerifyOutgoing: true, - TLSMinVersion: "tls10", + TLSMinVersion: types.TLSv1_0, CAFile: cafile, - CipherSuites: parseCiphers(t, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"), + CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, expected: pbautoconf.AutoConfigResponse{ @@ -635,9 +629,9 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { InternalRPC: tlsutil.ProtocolConfig{ VerifyServerHostname: true, VerifyOutgoing: true, - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, CAFile: cafile, - CipherSuites: parseCiphers(t, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"), + CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, expected: pbautoconf.AutoConfigResponse{ @@ -654,9 +648,9 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { InternalRPC: tlsutil.ProtocolConfig{ VerifyServerHostname: true, VerifyOutgoing: true, - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, CAFile: cafile, - CipherSuites: parseCiphers(t, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"), + CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, opts: AutoConfigOptions{ diff --git a/tlsutil/config.go b/tlsutil/config.go index c6157da93..bf4e9f6c6 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -8,7 +8,6 @@ import ( "net" "os" "path/filepath" - "sort" "strings" "sync" "sync/atomic" @@ -19,6 +18,7 @@ import ( "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/proto/pbconfig" + "github.com/hashicorp/consul/types" ) // ALPNWrapper is a function that is used to wrap a non-TLS connection and @@ -36,13 +36,13 @@ type DCWrapper func(dc string, conn net.Conn) (net.Conn, error) // a constant value. This is usually done by currying DCWrapper. type Wrapper func(conn net.Conn) (net.Conn, error) -// tlsLookup maps the tls_min_version configuration to the internal value -var tlsLookup = map[string]uint16{ - "": tls.VersionTLS10, // default in golang - "tls10": tls.VersionTLS10, - "tls11": tls.VersionTLS11, - "tls12": tls.VersionTLS12, - "tls13": tls.VersionTLS13, +// goTLSVersions maps types.TLSVersion to the Go internal value +var goTLSVersions = map[types.TLSVersion]uint16{ + types.TLSVersionAuto: tls.VersionTLS12, + types.TLSv1_0: tls.VersionTLS10, + types.TLSv1_1: tls.VersionTLS11, + types.TLSv1_2: tls.VersionTLS12, + types.TLSv1_3: tls.VersionTLS13, } // ProtocolConfig contains configuration for a given protocol. @@ -71,27 +71,16 @@ type ProtocolConfig struct { KeyFile string // TLSMinVersion is the minimum accepted TLS version that can be used. - TLSMinVersion string + + TLSMinVersion types.TLSVersion // CipherSuites is the list of TLS cipher suites to use. // - // The values should be a list of the following values: - // - // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 - // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - // TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - // TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - // TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - // - // todo(fs): IMHO, we should also support the raw 0xNNNN values from - // todo(fs): https://golang.org/pkg/crypto/tls/#pkg-constants - // todo(fs): since they are standardized by IANA. - CipherSuites []uint16 + // We don't support the raw 0xNNNN values from + // https://golang.org/pkg/crypto/tls/#pkg-constants + // even though they are standardized by IANA because it would increase + // the likelihood of an operator inadvertently setting an insecure configuration + CipherSuites []types.TLSCipherSuite // VerifyOutgoing is used to verify the authenticity of outgoing // connections. This means that TLS requests are used, and TCP @@ -148,17 +137,6 @@ type Config struct { AutoTLS bool } -func tlsVersions() []string { - versions := []string{} - for v := range tlsLookup { - if v != "" { - versions = append(versions, v) - } - } - sort.Strings(versions) - return versions -} - // SpecificDC is used to invoke a static datacenter // and turns a DCWrapper into a Wrapper type. func SpecificDC(dc string, tlsWrap DCWrapper) Wrapper { @@ -289,19 +267,12 @@ func (c *Configurator) Update(config Config) error { // loadProtocolConfig loads the certificates etc. for a given ProtocolConfig // and performs validation. -func (c *Configurator) loadProtocolConfig(base Config, lc ProtocolConfig) (*protocolConfig, error) { - if min := lc.TLSMinVersion; min != "" { - if _, ok := tlsLookup[min]; !ok { - versions := strings.Join(tlsVersions(), ", ") - return nil, fmt.Errorf("TLSMinVersion: value %s not supported, please specify one of [%s]", min, versions) - } - } - - cert, err := loadKeyPair(lc.CertFile, lc.KeyFile) +func (c *Configurator) loadProtocolConfig(base Config, pc ProtocolConfig) (*protocolConfig, error) { + cert, err := loadKeyPair(pc.CertFile, pc.KeyFile) if err != nil { return nil, err } - pems, err := LoadCAs(lc.CAFile, lc.CAPath) + pems, err := LoadCAs(pc.CAFile, pc.CAPath) if err != nil { return nil, err } @@ -314,7 +285,7 @@ func (c *Configurator) loadProtocolConfig(base Config, lc ProtocolConfig) (*prot return nil, err } - if lc.VerifyIncoming { + if pc.VerifyIncoming { // Both auto-config and auto-encrypt require verifying the connection from the // client to the server for secure operation. In order to be able to verify the // server's certificate we must have some CA certs already provided. Therefore, @@ -336,7 +307,7 @@ func (c *Configurator) loadProtocolConfig(base Config, lc ProtocolConfig) (*prot } // Ensure we have a CA if VerifyOutgoing is set. - if lc.VerifyOutgoing && combinedPool == nil { + if pc.VerifyOutgoing && combinedPool == nil { return nil, fmt.Errorf("VerifyOutgoing set but no CA certificates were provided") } @@ -572,7 +543,11 @@ func (c *Configurator) commonTLSConfig(state protocolConfig, cfg ProtocolConfig, // Set the cipher suites if len(cfg.CipherSuites) != 0 { - tlsConfig.CipherSuites = cfg.CipherSuites + // TLS cipher suites are validated on input in agent config builder, + // so it's safe to ignore the error case here. + + cipherSuites, _ := cipherSuiteLookup(cfg.CipherSuites) + tlsConfig.CipherSuites = cipherSuites } // GetCertificate is used when acting as a server and responding to @@ -607,10 +582,11 @@ func (c *Configurator) commonTLSConfig(state protocolConfig, cfg ProtocolConfig, tlsConfig.ClientCAs = state.combinedCAPool tlsConfig.RootCAs = state.combinedCAPool - // This is possible because tlsLookup also contains "" with golang's - // default (tls10). And because the initial check makes sure the - // version correctly matches. - tlsConfig.MinVersion = tlsLookup[cfg.TLSMinVersion] + // Error handling is not needed here because agent config builder handles "" + // or a nil value as TLSVersionAuto with goTLSVersions mapping TLSVersionAuto + // to TLS 1.2 and because the initial check makes sure a specified version is + // not invalid. + tlsConfig.MinVersion = goTLSVersions[cfg.TLSMinVersion] // Set ClientAuth if necessary if verifyIncoming { @@ -736,7 +712,7 @@ func (c *Configurator) AutoConfigTLSSettings() (*pbconfig.TLS, error) { return &pbconfig.TLS{ VerifyOutgoing: cfg.VerifyOutgoing, VerifyServerHostname: cfg.VerifyServerHostname || c.autoTLS.verifyServerHostname, - MinVersion: cfg.TLSMinVersion, + MinVersion: types.ConsulAutoConfigTLSVersionStrings[cfg.TLSMinVersion], CipherSuites: cipherString, }, nil } @@ -1080,32 +1056,32 @@ func (c *Configurator) AuthorizeServerConn(dc string, conn TLSConn) error { } -// ParseCiphers parse ciphersuites from the comma-separated string into -// recognized slice -func ParseCiphers(cipherStr string) ([]uint16, error) { +// NOTE: any new cipher suites will also need to be added in types/tls.go +// TODO: should this be moved into types/tls.go? Would importing Go's tls +// package in there be acceptable? +var goTLSCipherSuites = map[types.TLSCipherSuite]uint16{ + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + types.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + + types.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + types.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + types.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + types.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, +} + +func cipherSuiteLookup(ciphers []types.TLSCipherSuite) ([]uint16, error) { suites := []uint16{} - cipherStr = strings.TrimSpace(cipherStr) - if cipherStr == "" { + if len(ciphers) == 0 { return []uint16{}, nil } - ciphers := strings.Split(cipherStr, ",") - // Note: this needs to be kept up to date with the cipherMap in CipherString - cipherMap := map[string]uint16{ - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - } for _, cipher := range ciphers { - if v, ok := cipherMap[cipher]; ok { + if v, ok := goTLSCipherSuites[cipher]; ok { suites = append(suites, v) } else { return suites, fmt.Errorf("unsupported cipher %q", cipher) @@ -1115,29 +1091,16 @@ func ParseCiphers(cipherStr string) ([]uint16, error) { return suites, nil } -// CipherString performs the inverse operation of ParseCiphers -func CipherString(ciphers []uint16) (string, error) { - // Note: this needs to be kept up to date with the cipherMap in ParseCiphers - cipherMap := map[uint16]string{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +// CipherString performs the inverse operation of types.ParseCiphers +func CipherString(ciphers []types.TLSCipherSuite) (string, error) { + err := types.ValidateConsulAgentCipherSuites(ciphers) + if err != nil { + return "", err } cipherStrings := make([]string, len(ciphers)) for i, cipher := range ciphers { - if v, ok := cipherMap[cipher]; ok { - cipherStrings[i] = v - } else { - return "", fmt.Errorf("unsupported cipher %d", cipher) - } + cipherStrings[i] = string(cipher) } return strings.Join(cipherStrings, ","), nil diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index abcade402..b49bd66bc 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -8,8 +8,6 @@ import ( "io/ioutil" "net" "path/filepath" - "reflect" - "strings" "testing" "github.com/google/go-cmp/cmp" @@ -19,6 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/types" ) func TestConfigurator_IncomingConfig_Common(t *testing.T) { @@ -46,7 +45,7 @@ func TestConfigurator_IncomingConfig_Common(t *testing.T) { t.Run(desc, func(t *testing.T) { t.Run("MinTLSVersion", func(t *testing.T) { cfg := ProtocolConfig{ - TLSMinVersion: "tls13", + TLSMinVersion: "TLSv1_3", CertFile: "../test/hostname/Alice.crt", KeyFile: "../test/hostname/Alice.key", } @@ -69,7 +68,7 @@ func TestConfigurator_IncomingConfig_Common(t *testing.T) { t.Run("CipherSuites", func(t *testing.T) { cfg := ProtocolConfig{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, CertFile: "../test/hostname/Alice.crt", KeyFile: "../test/hostname/Alice.key", } @@ -760,45 +759,6 @@ func TestConfigurator_outgoingWrapperALPN_serverHasNoNodeNameInSAN(t *testing.T) <-errc } -func TestConfig_ParseCiphers(t *testing.T) { - testOk := strings.Join([]string{ - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - }, ",") - ciphers := []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - } - v, err := ParseCiphers(testOk) - require.NoError(t, err) - if got, want := v, ciphers; !reflect.DeepEqual(got, want) { - t.Fatalf("got ciphers %#v want %#v", got, want) - } - - _, err = ParseCiphers("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,cipherX") - require.Error(t, err) - - v, err = ParseCiphers("") - require.NoError(t, err) - require.Equal(t, []uint16{}, v) -} - func TestLoadKeyPair(t *testing.T) { type variant struct { cert, key string @@ -866,14 +826,6 @@ func TestConfigurator_Validation(t *testing.T) { } testCases := map[string]testCase{ - "invalid TLSMinVersion": { - ProtocolConfig{TLSMinVersion: "tls9"}, - false, - }, - "default TLSMinVersion": { - ProtocolConfig{TLSMinVersion: ""}, - true, - }, "invalid CAFile": { ProtocolConfig{CAFile: "bogus"}, false, @@ -986,13 +938,6 @@ func TestConfigurator_Validation(t *testing.T) { }, } - for _, v := range tlsVersions() { - testCases[fmt.Sprintf("MinTLSVersion(%s)", v)] = testCase{ - ProtocolConfig{TLSMinVersion: v}, - true, - } - } - for desc, tc := range testCases { for _, p := range []string{"internal", "grpc", "https"} { info := fmt.Sprintf("%s => %s", p, desc) @@ -1229,7 +1174,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: false, }, nil) @@ -1242,7 +1187,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: false, ServerName: "servername", @@ -1257,7 +1202,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: false, ServerName: "servername", @@ -1275,7 +1220,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: true, NodeName: "nodename", @@ -1292,7 +1237,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: true, NodeName: "nodename", @@ -1310,7 +1255,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ - TLSMinVersion: "tls12", + TLSMinVersion: types.TLSv1_2, }, EnableAgentTLSForChecks: true, ServerName: "servername", @@ -1517,12 +1462,6 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { }) } -func TestConfig_tlsVersions(t *testing.T) { - require.Equal(t, []string{"tls10", "tls11", "tls12", "tls13"}, tlsVersions()) - expected := "tls10, tls11, tls12, tls13" - require.Equal(t, expected, strings.Join(tlsVersions(), ", ")) -} - func TestConfigurator_GRPCTLSConfigured(t *testing.T) { t.Run("certificate manually configured", func(t *testing.T) { c := makeConfigurator(t, Config{ diff --git a/types/tls.go b/types/tls.go index 9c50f498b..198d4052d 100644 --- a/types/tls.go +++ b/types/tls.go @@ -2,6 +2,7 @@ package types import ( "fmt" + "sort" "strings" ) @@ -92,9 +93,19 @@ func (a TLSVersion) LessThan(b TLSVersion) (error, bool) { return nil, tlsVersionComparison[a] < tlsVersionComparison[b] } +func TLSVersions() string { + versions := []string{} + for v := range tlsVersions { + versions = append(versions, string(v)) + } + sort.Strings(versions) + + return strings.Join(versions, ", ") +} + func ValidateTLSVersion(v TLSVersion) error { if _, ok := tlsVersions[v]; !ok { - return fmt.Errorf("no matching TLS version found for %s", v.String()) + return fmt.Errorf("no matching TLS version found for %s, please specify one of [%s]", v.String(), TLSVersions()) } return nil diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/options.mdx index 11eae98ae..b35e964ff 100644 --- a/website/content/docs/agent/options.mdx +++ b/website/content/docs/agent/options.mdx @@ -2495,15 +2495,30 @@ specially crafted certificate signed by the CA can be used to gain full access t [`cert_file`](#tls_defaults_cert_file). - `tls_min_version` ((#tls_defaults_tls_min_version)) This specifies the - minimum supported version of TLS. Accepted values are "tls10", "tls11", - "tls12", or "tls13". This defaults to "tls12". **WARNING: TLS 1.1 and - lower are generally considered less secure; avoid using these if - possible.** + minimum supported version of TLS. The following values are accepted: + * `TLSv1_0` + * `TLSv1_1` + * `TLSv1_2` (default) + * `TLSv1_3` + + **WARNING: TLS 1.1 and lower are generally considered less secure and + should not be used if possible.** + + The following values are also valid, but only when using the + [deprecated top-level `tls_min_version` config](#tls_deprecated_options), + and will be removed in a future release: + + * `tls10` + * `tls11` + * `tls12` + * `tls13` + + A warning message will appear if a deprecated value is specified. - `tls_cipher_suites` ((#tls_defaults_tls_cipher_suites)) This specifies the list of supported ciphersuites as a comma-separated-list. Applicable to TLS 1.2 and below only. The list of all supported ciphersuites is - available through [this search](https://github.com/hashicorp/consul/search?q=cipherMap+%3A%3D+map&unscoped_q=cipherMap+%3A%3D+map). + available through [this search](https://github.com/hashicorp/consul/search?q=goTLSCipherSuites+%3D+map). ~> **Note:** The ordering of cipher suites will not be guaranteed from Consul 1.11 onwards. See this [post](https://go.dev/blog/tls-cipher-suites) From 560f8cbc8953ff42db7717e48e2747c80ca016f2 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Thu, 24 Mar 2022 14:41:30 -0700 Subject: [PATCH 018/785] fix bad oss sync, use gauges not counters (#12611) --- agent/metrics_test.go | 24 ------------------------ agent/rpc/middleware/interceptors.go | 2 +- agent/setup.go | 2 ++ 3 files changed, 3 insertions(+), 25 deletions(-) diff --git a/agent/metrics_test.go b/agent/metrics_test.go index e0d088cfa..b530eda25 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -94,30 +94,6 @@ func TestAgent_NewRPCMetrics(t *testing.T) { assertMetricExists(t, respRec, metricsPrefix+"_rpc_server_call") }) - - t.Run("Check that new rpc metrics can be filtered out", func(t *testing.T) { - metricsPrefix := "new_rpc_metrics_2" - hcl := fmt.Sprintf(` - telemetry = { - prometheus_retention_time = "5s" - disable_hostname = true - metrics_prefix = "%s" - prefix_filter = ["-%s.rpc.server.call"] - } - `, metricsPrefix, metricsPrefix) - - a := StartTestAgent(t, TestAgent{HCL: hcl}) - defer a.Shutdown() - - var out struct{} - err := a.RPC("Status.Ping", struct{}{}, &out) - require.NoError(t, err) - - respRec := httptest.NewRecorder() - recordPromMetrics(t, a, respRec) - - assertMetricNotExists(t, respRec, metricsPrefix+"_rpc_server_call") - }) } // TestHTTPHandlers_AgentMetrics_ConsulAutopilot_Prometheus adds testing around diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index d52999e76..a5ee26f4e 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -24,7 +24,7 @@ const RPCTypeNetRPC = "net/rpc" var metricRPCRequest = []string{"rpc", "server", "call"} var requestLogName = "rpc.server.request" -var NewRPCCounters = []prometheus.CounterDefinition{ +var NewRPCGauges = []prometheus.GaugeDefinition{ { Name: metricRPCRequest, Help: "Increments when a server makes an RPC service call. The labels on the metric have more information", diff --git a/agent/setup.go b/agent/setup.go index bf67c0360..4921a42d8 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/submatview" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/xds" @@ -214,6 +215,7 @@ func getPrometheusDefs(cfg lib.TelemetryConfig, isServer bool) ([]prometheus.Gau CertExpirationGauges, Gauges, raftGauges, + middleware.NewRPCGauges, } // TODO(ffmmm): conditionally add only leader specific metrics to gauges, counters, summaries, etc From 523e054c81368634f95452589accb357b7f09cc4 Mon Sep 17 00:00:00 2001 From: Eric Date: Fri, 25 Mar 2022 09:30:30 -0400 Subject: [PATCH 019/785] assorted changes required to remove gogo --- .circleci/config.yml | 13 +++++---- agent/http.go | 6 ++-- agent/structs/protobuf_compat.go | 17 ----------- agent/structs/structs.go | 32 +++++++++++++++++++-- build-support/scripts/proto-gen-no-gogo.sh | 33 ++++++++++++++++------ proto/pbcommon/common.gen.go | 12 ++++++++ proto/pbcommon/common.go | 29 ------------------- proto/pbcommon/common.pb.go | 6 ++++ proto/pbcommon/common.proto | 6 ++++ 9 files changed, 90 insertions(+), 64 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 64395e67b..e637d0d08 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -232,6 +232,7 @@ jobs: wget https://github.com/protocolbuffers/protobuf/releases/download/v3.12.3/protoc-3.12.3-linux-x86_64.zip sudo unzip -d /usr/local protoc-*.zip sudo chmod +x /usr/local/bin/protoc + sudo chmod -R a+Xr /usr/local/include/google/ rm protoc-*.zip - run: name: Install gogo/protobuf @@ -239,6 +240,8 @@ jobs: gogo_version=$(go list -m github.com/gogo/protobuf | awk '{print $2}') go install -v github.com/hashicorp/protoc-gen-go-binary@master go install -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version} + go install -v github.com/favadi/protoc-go-inject-tag@v1.3.0 + go install -v github.com/golang/protobuf/protoc-gen-go@v1.3.5 - run: command: make --always-make proto @@ -278,7 +281,7 @@ jobs: fi - run-go-test-full: go_test_flags: 'if ! [[ "$CIRCLE_BRANCH" =~ ^main$|^release/ ]]; then export GO_TEST_FLAGS="-short"; fi' - + go-test: docker: - image: *GOLANG_IMAGE @@ -330,9 +333,9 @@ jobs: path: /tmp/jsonfile - run: *notify-slack-failure - # go-test-32bit is to catch problems where 64-bit ints must be 64-bit aligned + # go-test-32bit is to catch problems where 64-bit ints must be 64-bit aligned # to use them with sync/atomic. See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - # Running tests with GOARCH=386 seems to be the best way to detect this + # Running tests with GOARCH=386 seems to be the best way to detect this # problem. Only runs tests that are -short to limit the time we spend checking # for these bugs. go-test-32bit: @@ -747,11 +750,11 @@ jobs: if ! git diff --quiet --exit-code HEAD^! ui/; then git config --local user.email "github-team-consul-core@hashicorp.com" git config --local user.name "hc-github-team-consul-core" - + # -B resets the CI branch to main which may diverge history # but we will force push anyways. git checkout -B ci/main-assetfs-build main - + short_sha=$(git rev-parse --short HEAD) git add agent/uiserver/bindata_assetfs.go git commit -m "auto-updated agent/uiserver/bindata_assetfs.go from commit ${short_sha}" diff --git a/agent/http.go b/agent/http.go index 90885bc3f..e039c2c7c 100644 --- a/agent/http.go +++ b/agent/http.go @@ -31,7 +31,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" ) var HTTPSummaries = []prometheus.SummaryDefinition{ @@ -781,7 +781,7 @@ func setLastContact(resp http.ResponseWriter, last time.Duration) { } // setMeta is used to set the query response meta data -func setMeta(resp http.ResponseWriter, m structs.QueryMetaCompat) error { +func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) error { lastContact, err := m.GetLastContact() if err != nil { return err @@ -981,7 +981,7 @@ func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Requ } // parseConsistencyReadRequest is used to parse the ?consistent query param. -func parseConsistencyReadRequest(resp http.ResponseWriter, req *http.Request, b *pbcommongogo.ReadRequest) { +func parseConsistencyReadRequest(resp http.ResponseWriter, req *http.Request, b *pbcommon.ReadRequest) { query := req.URL.Query() if _, ok := query["consistent"]; ok { b.RequireConsistent = true diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index 93358e8e3..143bd97e3 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -32,23 +32,6 @@ type QueryOptionsCompat interface { SetFilter(string) } -// QueryMetaCompat is the interface that both the structs.QueryMeta -// and the proto/pbcommongogo.QueryMeta structs need to implement so that they -// can be operated on interchangeably -type QueryMetaCompat interface { - GetLastContact() (time.Duration, error) - SetLastContact(time.Duration) - GetKnownLeader() bool - SetKnownLeader(bool) - GetIndex() uint64 - SetIndex(uint64) - GetConsistencyLevel() string - SetConsistencyLevel(string) - GetBackend() QueryBackend - GetResultsFilteredByACLs() bool - SetResultsFilteredByACLs(bool) -} - // GetToken helps implement the QueryOptionsCompat interface // Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetToken() string { diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 9efd02c9b..ca4a7c849 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "github.com/golang/protobuf/proto" "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" @@ -2575,13 +2576,17 @@ type ProtoMarshaller interface { func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) { if marshaller, ok := message.(ProtoMarshaller); ok { + return EncodeProtoGogo(t, marshaller) + } + + if marshaller, ok := message.(proto.Message); ok { return EncodeProto(t, marshaller) } return nil, fmt.Errorf("message does not implement the ProtoMarshaller interface: %T", message) } -func EncodeProto(t MessageType, message ProtoMarshaller) ([]byte, error) { +func EncodeProtoGogo(t MessageType, message ProtoMarshaller) ([]byte, error) { data := make([]byte, message.Size()+1) data[0] = uint8(t) if _, err := message.MarshalTo(data[1:]); err != nil { @@ -2590,7 +2595,24 @@ func EncodeProto(t MessageType, message ProtoMarshaller) ([]byte, error) { return data, nil } -func DecodeProto(buf []byte, out ProtoMarshaller) error { +func EncodeProto(t MessageType, pb proto.Message) ([]byte, error) { + data := make([]byte, proto.Size(pb)+1) + data[0] = uint8(t) + + buf := proto.NewBuffer(data[1:1]) + if err := buf.Marshal(pb); err != nil { + return nil, err + } + + return data, nil +} + +func DecodeProto(buf []byte, pb proto.Message) error { + // Note that this assumes the leading byte indicating the type as already been stripped off. + return proto.Unmarshal(buf, pb) +} + +func DecodeProtoGogo(buf []byte, out ProtoMarshaller) error { // Note that this assumes the leading byte indicating the type as already been stripped off. return out.Unmarshal(buf) } @@ -2720,3 +2742,9 @@ func TimeToProto(s time.Time) *timestamp.Timestamp { ret, _ := ptypes.TimestampProto(s) return ret } + +// IsZeroProtoTime returns true if the time is the minimum protobuf timestamp +// (the Unix epoch). +func IsZeroProtoTime(t *timestamp.Timestamp) bool { + return t.Seconds == 0 && t.Nanos == 0 +} diff --git a/build-support/scripts/proto-gen-no-gogo.sh b/build-support/scripts/proto-gen-no-gogo.sh index c736d49de..0585a02d7 100755 --- a/build-support/scripts/proto-gen-no-gogo.sh +++ b/build-support/scripts/proto-gen-no-gogo.sh @@ -68,6 +68,8 @@ function main { return 1 fi + go mod download + local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") @@ -77,7 +79,7 @@ function main { local proto_go_path=${proto_path%%.proto}.pb.go local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go - + local go_proto_out="paths=source_relative" if is_set "${grpc}" then @@ -96,10 +98,17 @@ function main { # How we run protoc probably needs some documentation. # - # This is the path to where + # This is the path to where # -I="${golang_proto_path}/protobuf" \ local -i ret=0 status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} (NO GOGO)" + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" debug_run protoc \ -I="${golang_proto_path}" \ -I="${golang_proto_mod_path}" \ @@ -107,9 +116,22 @@ function main { --go_out="${go_proto_out}${SOURCE_DIR}" \ --go-binary_out="${SOURCE_DIR}" \ "${proto_path}" + + if test $? -ne 0 + then + err "Failed to run protoc for ${proto_path}" + return 1 + fi + debug_run protoc-go-inject-tag \ -input="${proto_go_path}" + if test $? -ne 0 + then + err "Failed to run protoc-go-inject-tag for ${proto_path}" + return 1 + fi + echo "debug_run protoc \ -I=\"${golang_proto_path}\" \ -I=\"${golang_proto_mod_path}\" \ @@ -117,11 +139,6 @@ function main { --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ --go-binary_out=\"${SOURCE_DIR}\" \ \"${proto_path}\"" - if test $? -ne 0 - then - err "Failed to generate outputs from ${proto_path}" - return 1 - fi BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') if test -n "${BUILD_TAGS}" @@ -129,7 +146,7 @@ function main { echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" cat "${proto_go_path}" >> "${proto_go_path}.new" mv "${proto_go_path}.new" "${proto_go_path}" - + echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" diff --git a/proto/pbcommon/common.gen.go b/proto/pbcommon/common.gen.go index 867e8089c..636931f81 100644 --- a/proto/pbcommon/common.gen.go +++ b/proto/pbcommon/common.gen.go @@ -68,3 +68,15 @@ func RaftIndexFromStructs(t *structs.RaftIndex, s *RaftIndex) { s.CreateIndex = t.CreateIndex s.ModifyIndex = t.ModifyIndex } +func WriteRequestToStructs(s *WriteRequest, t *structs.WriteRequest) { + if s == nil { + return + } + t.Token = s.Token +} +func WriteRequestFromStructs(t *structs.WriteRequest, s *WriteRequest) { + if s == nil { + return + } + s.Token = t.Token +} diff --git a/proto/pbcommon/common.go b/proto/pbcommon/common.go index fbff7e4ae..713089e48 100644 --- a/proto/pbcommon/common.go +++ b/proto/pbcommon/common.go @@ -88,35 +88,6 @@ func (q *QueryOptions) SetFilter(filter string) { q.Filter = filter } -// SetLastContact is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetLastContact(lastContact time.Duration) { - q.LastContact = structs.DurationToProto(lastContact) -} - -// SetKnownLeader is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetKnownLeader(knownLeader bool) { - q.KnownLeader = knownLeader -} - -// SetIndex is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetIndex(index uint64) { - q.Index = index -} - -// SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { - q.ConsistencyLevel = consistencyLevel -} - -func (q *QueryMeta) GetBackend() structs.QueryBackend { - return structs.QueryBackend(0) -} - -// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { - q.ResultsFilteredByACLs = v -} - // WriteRequest only applies to writes, always false // // IsRead implements structs.RPCInfo diff --git a/proto/pbcommon/common.pb.go b/proto/pbcommon/common.pb.go index 88a2d55b6..a04042cac 100644 --- a/proto/pbcommon/common.pb.go +++ b/proto/pbcommon/common.pb.go @@ -120,6 +120,12 @@ func (m *TargetDatacenter) GetDatacenter() string { return "" } +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.WriteRequest +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields type WriteRequest struct { // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. diff --git a/proto/pbcommon/common.proto b/proto/pbcommon/common.proto index 19efd232b..e21b677f5 100644 --- a/proto/pbcommon/common.proto +++ b/proto/pbcommon/common.proto @@ -29,6 +29,12 @@ message TargetDatacenter { string Datacenter = 1; } +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.WriteRequest +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields message WriteRequest { // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. From 906ac6576bf8ea01c84cd8b08cce46944ef90f74 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Fri, 25 Mar 2022 12:34:59 -0700 Subject: [PATCH 020/785] Fixups for error messages from ACL Errors (#12620) Fixups for error messages from ACL Errors Alter error messages to be more verbose and explanatory, something like: Permission denied: token with AccessorID '8a2d52a0-6b41-7077-8374-09d4fafa2d30 ' lacks permission 'service:read' on "foobar" on "foobar" in partition "foo" in namespace "bar" Signed-off-by: Mark Anderson --- acl/errors.go | 4 ++-- acl/errors_oss.go | 2 +- acl/errors_test.go | 4 ++-- acl/testing.go | 9 +++++++-- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/acl/errors.go b/acl/errors.go index c2363e2a1..7c88704b3 100644 --- a/acl/errors.go +++ b/acl/errors.go @@ -98,9 +98,9 @@ func (e PermissionDeniedError) Error() string { } if e.Accessor == "" { - message.WriteString(": provided accessor") + message.WriteString(": provided token") } else { - fmt.Fprintf(&message, ": accessor '%s'", e.Accessor) + fmt.Fprintf(&message, ": token with AccessorID '%s'", e.Accessor) } fmt.Fprintf(&message, " lacks permission '%s:%s'", e.Resource, e.AccessLevel.String()) diff --git a/acl/errors_oss.go b/acl/errors_oss.go index 9d605b34e..ef8dc993c 100644 --- a/acl/errors_oss.go +++ b/acl/errors_oss.go @@ -14,5 +14,5 @@ func NewResourceDescriptor(name string, _ *AuthorizerContext) ResourceDescriptor } func (od *ResourceDescriptor) ToString() string { - return od.Name + return "\"" + od.Name + "\"" } diff --git a/acl/errors_test.go b/acl/errors_test.go index 5b73a156e..7c651f1ec 100644 --- a/acl/errors_test.go +++ b/acl/errors_test.go @@ -29,11 +29,11 @@ func TestPermissionDeniedError(t *testing.T) { }, { err: PermissionDeniedByACL(&auth1, nil, ResourceService, AccessRead, "foobar"), - expected: "Permission denied: provided accessor lacks permission 'service:read' on foobar", + expected: "Permission denied: provided token lacks permission 'service:read' on \"foobar\"", }, { err: PermissionDeniedByACLUnnamed(&auth1, nil, ResourceService, AccessRead), - expected: "Permission denied: provided accessor lacks permission 'service:read'", + expected: "Permission denied: provided token lacks permission 'service:read'", }, } diff --git a/acl/testing.go b/acl/testing.go index 01399c630..303bd1de6 100644 --- a/acl/testing.go +++ b/acl/testing.go @@ -1,6 +1,7 @@ package acl import ( + "fmt" "github.com/stretchr/testify/require" "regexp" "testing" @@ -23,20 +24,24 @@ func RequirePermissionDeniedError(t testing.TB, err error, authz Authorizer, _ * func RequirePermissionDeniedMessage(t testing.TB, msg string, authz interface{}, _ *AuthorizerContext, resource Resource, accessLevel AccessLevel, resourceID string) { require.NotEmpty(t, msg, "expected non-empty error message") + baseRegex := ` lacks permission '(\S*):(\S*)' on \"([^\"]*)\"(?: in partition \"([^\"]*)\" in namespace \"([^\"]*)\")?\s*$` + var resourceIDFound string if authz == nil { - expr := "^Permission denied" + `: provided accessor lacks permission '(\S*):(\S*)' on (.*)\s*$` + expr := "^Permission denied" + `: provided token` + baseRegex re, _ := regexp.Compile(expr) matched := re.FindStringSubmatch(msg) + require.NotNil(t, matched, fmt.Sprintf("RE %q didn't match %q", expr, msg)) require.Equal(t, string(resource), matched[1], "resource") require.Equal(t, accessLevel.String(), matched[2], "access level") resourceIDFound = matched[3] } else { - expr := "^Permission denied" + `: accessor '(\S*)' lacks permission '(\S*):(\S*)' on (.*)\s*$` + expr := "^Permission denied" + `: token with AccessorID '(\S*)'` + baseRegex re, _ := regexp.Compile(expr) matched := re.FindStringSubmatch(msg) + require.NotNil(t, matched, fmt.Sprintf("RE %q didn't match %q", expr, msg)) require.Equal(t, extractAccessorID(authz), matched[1], "auth") require.Equal(t, string(resource), matched[2], "resource") require.Equal(t, accessLevel.String(), matched[3], "access level") From 9d3df6b08bd8b18e6a27a69e057c7cf87ad1f951 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Fri, 25 Mar 2022 13:00:14 -0700 Subject: [PATCH 021/785] Update consul-enterprise.mdx (#12622) --- .../deployment-configurations/consul-enterprise.mdx | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx b/website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx index f27bf3baf..cd49cd238 100644 --- a/website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx +++ b/website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx @@ -39,7 +39,6 @@ Add the name and key of the secret you just created to `server.enterpriseLicense ```yaml global: image: 'hashicorp/consul-enterprise:1.10.0-ent' -server: enterpriseLicense: secretName: 'consul-ent-license' secretKey: 'key' @@ -57,7 +56,6 @@ If the version of Consul is < 1.10, use the following config with the name and k ```yaml global: image: 'hashicorp/consul-enterprise:1.8.3-ent' -server: enterpriseLicense: secretName: 'consul-ent-license' secretKey: 'key' From f531f1e87de4f72d85e9b21614046e41994285f7 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 25 Mar 2022 15:55:40 -0500 Subject: [PATCH 022/785] regenerate rpc glue stubs in protobuf files using comments (#12625) --- build-support/scripts/proto-gen-no-gogo.sh | 24 +- internal/tools/proto-gen-rpc-glue/go.mod | 3 + internal/tools/proto-gen-rpc-glue/go.sum | 0 internal/tools/proto-gen-rpc-glue/main.go | 375 +++++++++++++++++++++ 4 files changed, 395 insertions(+), 7 deletions(-) create mode 100644 internal/tools/proto-gen-rpc-glue/go.mod create mode 100644 internal/tools/proto-gen-rpc-glue/go.sum create mode 100644 internal/tools/proto-gen-rpc-glue/main.go diff --git a/build-support/scripts/proto-gen-no-gogo.sh b/build-support/scripts/proto-gen-no-gogo.sh index 0585a02d7..50d51be0d 100755 --- a/build-support/scripts/proto-gen-no-gogo.sh +++ b/build-support/scripts/proto-gen-no-gogo.sh @@ -79,6 +79,7 @@ function main { local proto_go_path=${proto_path%%.proto}.pb.go local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go + local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go local go_proto_out="paths=source_relative" if is_set "${grpc}" @@ -132,13 +133,13 @@ function main { return 1 fi - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') if test -n "${BUILD_TAGS}" @@ -152,6 +153,15 @@ function main { mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" fi + # note: this has to run after we fix up the build tags above + rm -f "${proto_go_rpcglue_path}" + debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" + if test $? -ne 0 + then + err "Failed to generate consul rpc glue outputs from ${proto_path}" + return 1 + fi + return 0 } diff --git a/internal/tools/proto-gen-rpc-glue/go.mod b/internal/tools/proto-gen-rpc-glue/go.mod new file mode 100644 index 000000000..26535c929 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/consul/internal/tools/proto-gen-rpc-glue + +go 1.17 diff --git a/internal/tools/proto-gen-rpc-glue/go.sum b/internal/tools/proto-gen-rpc-glue/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go new file mode 100644 index 000000000..a666c9fa6 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -0,0 +1,375 @@ +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "log" + "os" + "os/exec" + "strings" +) + +var ( + flagPath = flag.String("path", "", "path of file to load") + verbose = flag.Bool("v", false, "verbose output") +) + +const ( + annotationPrefix = "@consul-rpc-glue:" + outputFileSuffix = ".rpcglue.pb.go" +) + +func main() { + flag.Parse() + + log.SetFlags(0) + + if *flagPath == "" { + log.Fatal("missing required -path argument") + } + + if err := run(*flagPath); err != nil { + log.Fatal(err) + } +} + +func run(path string) error { + fi, err := os.Stat(path) + if err != nil { + return err + } + + if fi.IsDir() { + return fmt.Errorf("argument must be a file: %s", path) + } + + if !strings.HasSuffix(path, ".pb.go") { + return fmt.Errorf("file must end with .pb.go: %s", path) + } + + if err := processFile(path); err != nil { + return fmt.Errorf("error processing file %q: %v", path, err) + } + + return nil +} + +func processFile(path string) error { + if *verbose { + log.Printf("visiting file %q", path) + } + + fset := token.NewFileSet() + tree, err := parser.ParseFile(fset, path, nil, parser.ParseComments) + if err != nil { + return err + } + + v := visitor{} + ast.Walk(&v, tree) + if err := v.Err(); err != nil { + return err + } + + if len(v.Types) == 0 { + return nil + } + + if *verbose { + log.Printf("Package: %s", v.Package) + log.Printf("BuildTags: %v", v.BuildTags) + log.Println() + for _, typ := range v.Types { + log.Printf("Type: %s", typ.Name) + ann := typ.Annotation + if ann.ReadRequest != "" { + log.Printf(" ReadRequest from %s", ann.ReadRequest) + } + if ann.WriteRequest != "" { + log.Printf(" WriteRequest from %s", ann.WriteRequest) + } + if ann.TargetDatacenter != "" { + log.Printf(" TargetDatacenter from %s", ann.TargetDatacenter) + } + } + } + + // generate output + + var buf bytes.Buffer + + if len(v.BuildTags) > 0 { + for _, line := range v.BuildTags { + buf.WriteString(line + "\n") + } + buf.WriteString("\n") + } + buf.WriteString("// Code generated by proto-gen-rpc-glue. DO NOT EDIT.\n\n") + buf.WriteString("package " + v.Package + "\n") + buf.WriteString(` +import ( + "time" +) + +`) + for _, typ := range v.Types { + if typ.Annotation.WriteRequest != "" { + buf.WriteString(fmt.Sprintf(` +func (msg *%[1]s) AllowStaleRead() bool { + return false +} + +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.%[2]s == nil { + return false, nil + } + return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *%[1]s) IsRead() bool { + return false +} + +func (msg *%[1]s) SetTokenSecret(s string) { + msg.%[2]s.SetTokenSecret(s) +} + +func (msg *%[1]s) TokenSecret() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.TokenSecret() +} + +func (msg *%[1]s) Token() string { + if msg.%[2]s == nil { + return "" + } + return msg.%[2]s.Token +} +`, typ.Name, typ.Annotation.WriteRequest)) + } + if typ.Annotation.ReadRequest != "" { + buf.WriteString(fmt.Sprintf(` +func (msg *%[1]s) IsRead() bool { + return true +} + +func (msg *%[1]s) AllowStaleRead() bool { + return msg.%[2]s.AllowStaleRead() +} + +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.%[2]s == nil { + return false, nil + } + return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *%[1]s) SetTokenSecret(s string) { + msg.%[2]s.SetTokenSecret(s) +} + +func (msg *%[1]s) TokenSecret() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.TokenSecret() +} + +func (msg *%[1]s) Token() string { + if msg.%[2]s == nil { + return "" + } + return msg.%[2]s.Token +} +`, typ.Name, typ.Annotation.ReadRequest)) + } + if typ.Annotation.TargetDatacenter != "" { + buf.WriteString(fmt.Sprintf(` +func (msg *%[1]s) RequestDatacenter() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.GetDatacenter() +} +`, typ.Name, typ.Annotation.TargetDatacenter)) + } + } + + // write to disk + outFile := strings.TrimSuffix(path, ".pb.go") + outputFileSuffix + if err := os.WriteFile(outFile, buf.Bytes(), 0644); err != nil { + return err + } + + // clean up + cmd := exec.Command("gofmt", "-s", "-w", outFile) + cmd.Stdout = nil + cmd.Stderr = os.Stderr + cmd.Stdin = nil + if err := cmd.Run(); err != nil { + return fmt.Errorf("error running 'gofmt -s -w %q': %v", outFile, err) + } + + return nil +} + +type TypeInfo struct { + Name string + Annotation Annotation +} + +type visitor struct { + Package string + BuildTags []string + Types []TypeInfo + Errs []error +} + +func (v *visitor) Err() error { + switch len(v.Errs) { + case 0: + return nil + case 1: + return v.Errs[0] + default: + // + var s []string + for _, e := range v.Errs { + s = append(s, e.Error()) + } + return errors.New(strings.Join(s, "; ")) + } +} + +var _ ast.Visitor = (*visitor)(nil) + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + return v + } + + switch x := node.(type) { + case *ast.File: + v.Package = x.Name.Name + v.BuildTags = getRawBuildTags(x) + for _, d := range x.Decls { + gd, ok := d.(*ast.GenDecl) + if !ok { + continue + } + + if gd.Doc == nil { + continue + } else if len(gd.Specs) != 1 { + continue + } + spec := gd.Specs[0] + + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + + ann, err := getAnnotation(gd.Doc.List) + if err != nil { + v.Errs = append(v.Errs, err) + continue + } else if ann.IsZero() { + continue + } + + v.Types = append(v.Types, TypeInfo{ + Name: typeSpec.Name.Name, + Annotation: ann, + }) + + } + } + return v +} + +type Annotation struct { + ReadRequest string + WriteRequest string + TargetDatacenter string +} + +func (a Annotation) IsZero() bool { + return a == Annotation{} +} + +func getAnnotation(doc []*ast.Comment) (Annotation, error) { + raw, ok := getRawStructAnnotation(doc) + if !ok { + return Annotation{}, nil + } + + var ann Annotation + + parts := strings.Split(raw, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + switch { + case part == "ReadRequest": + ann.ReadRequest = "ReadRequest" + case strings.HasPrefix(part, "ReadRequest"): + ann.TargetDatacenter = strings.TrimPrefix(part, "ReadRequest") + + case part == "WriteRequest": + ann.WriteRequest = "WriteRequest" + case strings.HasPrefix(part, "WriteRequest"): + ann.TargetDatacenter = strings.TrimPrefix(part, "WriteRequest") + + case part == "TargetDatacenter": + ann.TargetDatacenter = "TargetDatacenter" + case strings.HasPrefix(part, "TargetDatacenter"): + ann.TargetDatacenter = strings.TrimPrefix(part, "TargetDatacenter") + + default: + return Annotation{}, fmt.Errorf("unexpected annotation part: %s", part) + } + } + + return ann, nil +} + +func getRawStructAnnotation(doc []*ast.Comment) (string, bool) { + for _, line := range doc { + text := strings.TrimSpace(strings.TrimLeft(line.Text, "/")) + + ann := strings.TrimSpace(strings.TrimPrefix(text, annotationPrefix)) + + if text != ann { + return ann, true + } + } + return "", false +} + +func getRawBuildTags(file *ast.File) []string { + // build tags are always the first group, at the very top + if len(file.Comments) == 0 { + return nil + } + cg := file.Comments[0] + + var out []string + for _, line := range cg.List { + text := strings.TrimSpace(strings.TrimLeft(line.Text, "/")) + + if !strings.HasPrefix(text, "go:build ") && !strings.HasPrefix(text, "+build") { + break // stop at first non-build-tag + } + + out = append(out, line.Text) + } + + return out +} From 8bd05b1fb0908fa9c61f31546b8ba132cebf02f7 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Fri, 25 Mar 2022 18:40:51 -0700 Subject: [PATCH 023/785] Fix logic for website checker (#12627) Workflow should run when no docs/cherry-pick label && no pr/docs-label --- .github/workflows/website-checker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/website-checker.yml b/.github/workflows/website-checker.yml index 8e4360b96..17d69dc8c 100644 --- a/.github/workflows/website-checker.yml +++ b/.github/workflows/website-checker.yml @@ -23,7 +23,7 @@ jobs: website-check: # If there's already a `type/docs-cherrypick` label or an explicit `pr/no-docs` label, we ignore this check if: >- - !contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick') || + !contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick') && !contains(github.event.pull_request.labels.*.name, 'pr/no-docs') runs-on: ubuntu-latest From 7010948f3111abf14988b291fceca2fcb32f90e1 Mon Sep 17 00:00:00 2001 From: driesgroblerw <52571246+driesgroblerw@users.noreply.github.com> Date: Mon, 28 Mar 2022 14:43:25 +0200 Subject: [PATCH 024/785] Updated the link to acl-policies https://www.consul.io/docs/security/acl was pointing to https://www.consul.io/docs/security/acl/policies (broken) and is now pointing to https://www.consul.io/docs/security/acl/acl-policies (working) --- website/content/docs/security/acl/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/security/acl/index.mdx b/website/content/docs/security/acl/index.mdx index 197d63250..2c05964a8 100644 --- a/website/content/docs/security/acl/index.mdx +++ b/website/content/docs/security/acl/index.mdx @@ -54,7 +54,7 @@ In addition to the rules that authenticate access to services, several attribute Refer to the following topics for details about policies: -- [Policies](/docs/security/acl/policies) +- [Policies](/docs/security/acl/acl-policies) - [ACL policy command line](/commands/acl/policy) - [ACL policy API](/api-docs/acl/policies) From 8b9387404d4ab157aef7fc66d5fce737484c4c3d Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Mon, 28 Mar 2022 10:10:52 -0400 Subject: [PATCH 025/785] Add example of goimports -local --- .github/CONTRIBUTING.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index a2e713e00..c2b2b9b05 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -85,6 +85,25 @@ To build Consul, run `make dev`. In a few moments, you'll have a working Go provides [tooling to apply consistent code formatting](https://golang.org/doc/effective_go#formatting). If you make any changes to the code, run `gofmt -s -w` to automatically format the code according to Go standards. +##### Organizing Imports + +Group imports using `goimports -local github.com/hashicorp/consul/` to keep [local packages](https://github.com/golang/tools/commit/ed69e84b1518b5857a9f4e01d1f9cefdcc45246e) in their own section. + +Example: +``` +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-cleanhttp" + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/lib" +) +``` + #### Updating Go Module Dependencies If a dependency is added or change, run `go mod tidy` to update `go.mod` and `go.sum`. @@ -148,4 +167,4 @@ When you're ready to submit a pull request: Some common changes that many PRs require are documented through checklists as `checklist-*.md` files in [docs/](../docs/), including: -- [Adding config fields](../docs/config/checklist-adding-config-fields.md) \ No newline at end of file +- [Adding config fields](../docs/config/checklist-adding-config-fields.md) From c7f4c48be57ab8e8deeda3635e5a9b5b4944c13f Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 28 Mar 2022 09:40:56 -0500 Subject: [PATCH 026/785] proto-gen-rpc-glue: fix behavior of renamed fields (#12633) --- internal/tools/proto-gen-rpc-glue/.gitignore | 1 + internal/tools/proto-gen-rpc-glue/e2e/go.mod | 11 + internal/tools/proto-gen-rpc-glue/e2e/go.sum | 657 ++++++++++++++++++ .../tools/proto-gen-rpc-glue/e2e/source.pb.go | 32 + .../e2e/source.rpcglue.pb.go.golden | 156 +++++ internal/tools/proto-gen-rpc-glue/go.mod | 8 + internal/tools/proto-gen-rpc-glue/go.sum | 11 + internal/tools/proto-gen-rpc-glue/main.go | 12 +- .../tools/proto-gen-rpc-glue/main_test.go | 49 ++ 9 files changed, 931 insertions(+), 6 deletions(-) create mode 100644 internal/tools/proto-gen-rpc-glue/.gitignore create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/go.mod create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/go.sum create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/source.pb.go create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden create mode 100644 internal/tools/proto-gen-rpc-glue/main_test.go diff --git a/internal/tools/proto-gen-rpc-glue/.gitignore b/internal/tools/proto-gen-rpc-glue/.gitignore new file mode 100644 index 000000000..14130d475 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/.gitignore @@ -0,0 +1 @@ +./e2e/source.rpcglue.pb.go diff --git a/internal/tools/proto-gen-rpc-glue/e2e/go.mod b/internal/tools/proto-gen-rpc-glue/e2e/go.mod new file mode 100644 index 000000000..b4b31db76 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/go.mod @@ -0,0 +1,11 @@ +module github.com/hashicorp/consul/internal/tools/proto-gen-rpc-glue/e2e + +go 1.13 + +replace github.com/hashicorp/consul => ../../../.. + +replace github.com/hashicorp/consul/api => ../../../../api + +replace github.com/hashicorp/consul/sdk => ../../../../sdk + +require github.com/hashicorp/consul v1.11.4 diff --git a/internal/tools/proto-gen-rpc-glue/e2e/go.sum b/internal/tools/proto-gen-rpc-glue/e2e/go.sum new file mode 100644 index 000000000..a0f384377 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/go.sum @@ -0,0 +1,657 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.42.34/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 h1:Com/5n/omNSBusX11zdyIYtidiqewLIanchbm//McZA= +github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= +github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= +github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-raftchunking v0.6.2/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= +github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft-autopilot v0.1.5 h1:onEfMH5uHVdXQqtas36zXUHEZxLdsJVu/nXHLcLdL1I= +github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= +github.com/hashicorp/raft-boltdb/v2 v2.2.0/go.mod h1:SgPUD5TP20z/bswEr210SnkUFvQP/YjKV95aaiTbeMQ= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 h1:brI5vBRUlAlM34VFmnLPwjnCL/FxAJp9XvOdX6Zt+XE= +github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go new file mode 100644 index 000000000..ed47e1d09 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go @@ -0,0 +1,32 @@ +//go:build example +// +build example + +package e2e + +import "github.com/hashicorp/consul/proto/pbcommon" + +// @consul-rpc-glue: WriteRequest,TargetDatacenter +type ExampleWriteRequest struct { + Value string + WriteRequest *pbcommon.WriteRequest + TargetDatacenter *pbcommon.TargetDatacenter +} + +// @consul-rpc-glue: ReadRequest,TargetDatacenter +type ExampleReadRequest struct { + Value string + ReadRequest *pbcommon.ReadRequest + TargetDatacenter *pbcommon.TargetDatacenter +} + +// @consul-rpc-glue: WriteRequest=AltWriteRequest +type AltExampleWriteRequest struct { + Value int + AltWriteRequest *pbcommon.WriteRequest +} + +// @consul-rpc-glue: ReadRequest=AltReadRequest +type AltExampleReadRequest struct { + Value int + AltReadRequest *pbcommon.ReadRequest +} diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden new file mode 100644 index 000000000..7f87f5477 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden @@ -0,0 +1,156 @@ +//go:build example +// +build example + +// Code generated by proto-gen-rpc-glue. DO NOT EDIT. + +package e2e + +import ( + "time" +) + +func (msg *ExampleWriteRequest) AllowStaleRead() bool { + return false +} + +func (msg *ExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.WriteRequest == nil { + return false, nil + } + return msg.WriteRequest.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *ExampleWriteRequest) IsRead() bool { + return false +} + +func (msg *ExampleWriteRequest) SetTokenSecret(s string) { + msg.WriteRequest.SetTokenSecret(s) +} + +func (msg *ExampleWriteRequest) TokenSecret() string { + if msg == nil || msg.WriteRequest == nil { + return "" + } + return msg.WriteRequest.TokenSecret() +} + +func (msg *ExampleWriteRequest) Token() string { + if msg.WriteRequest == nil { + return "" + } + return msg.WriteRequest.Token +} + +func (msg *ExampleWriteRequest) RequestDatacenter() string { + if msg == nil || msg.TargetDatacenter == nil { + return "" + } + return msg.TargetDatacenter.GetDatacenter() +} + +func (msg *ExampleReadRequest) IsRead() bool { + return true +} + +func (msg *ExampleReadRequest) AllowStaleRead() bool { + return msg.ReadRequest.AllowStaleRead() +} + +func (msg *ExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.ReadRequest == nil { + return false, nil + } + return msg.ReadRequest.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *ExampleReadRequest) SetTokenSecret(s string) { + msg.ReadRequest.SetTokenSecret(s) +} + +func (msg *ExampleReadRequest) TokenSecret() string { + if msg == nil || msg.ReadRequest == nil { + return "" + } + return msg.ReadRequest.TokenSecret() +} + +func (msg *ExampleReadRequest) Token() string { + if msg.ReadRequest == nil { + return "" + } + return msg.ReadRequest.Token +} + +func (msg *ExampleReadRequest) RequestDatacenter() string { + if msg == nil || msg.TargetDatacenter == nil { + return "" + } + return msg.TargetDatacenter.GetDatacenter() +} + +func (msg *AltExampleWriteRequest) AllowStaleRead() bool { + return false +} + +func (msg *AltExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.AltWriteRequest == nil { + return false, nil + } + return msg.AltWriteRequest.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *AltExampleWriteRequest) IsRead() bool { + return false +} + +func (msg *AltExampleWriteRequest) SetTokenSecret(s string) { + msg.AltWriteRequest.SetTokenSecret(s) +} + +func (msg *AltExampleWriteRequest) TokenSecret() string { + if msg == nil || msg.AltWriteRequest == nil { + return "" + } + return msg.AltWriteRequest.TokenSecret() +} + +func (msg *AltExampleWriteRequest) Token() string { + if msg.AltWriteRequest == nil { + return "" + } + return msg.AltWriteRequest.Token +} + +func (msg *AltExampleReadRequest) IsRead() bool { + return true +} + +func (msg *AltExampleReadRequest) AllowStaleRead() bool { + return msg.AltReadRequest.AllowStaleRead() +} + +func (msg *AltExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.AltReadRequest == nil { + return false, nil + } + return msg.AltReadRequest.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +func (msg *AltExampleReadRequest) SetTokenSecret(s string) { + msg.AltReadRequest.SetTokenSecret(s) +} + +func (msg *AltExampleReadRequest) TokenSecret() string { + if msg == nil || msg.AltReadRequest == nil { + return "" + } + return msg.AltReadRequest.TokenSecret() +} + +func (msg *AltExampleReadRequest) Token() string { + if msg.AltReadRequest == nil { + return "" + } + return msg.AltReadRequest.Token +} diff --git a/internal/tools/proto-gen-rpc-glue/go.mod b/internal/tools/proto-gen-rpc-glue/go.mod index 26535c929..1ae6e1d99 100644 --- a/internal/tools/proto-gen-rpc-glue/go.mod +++ b/internal/tools/proto-gen-rpc-glue/go.mod @@ -1,3 +1,11 @@ module github.com/hashicorp/consul/internal/tools/proto-gen-rpc-glue go 1.17 + +require github.com/stretchr/testify v1.7.1 + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) diff --git a/internal/tools/proto-gen-rpc-glue/go.sum b/internal/tools/proto-gen-rpc-glue/go.sum index e69de29bb..2dca7c9c6 100644 --- a/internal/tools/proto-gen-rpc-glue/go.sum +++ b/internal/tools/proto-gen-rpc-glue/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go index a666c9fa6..dd4956912 100644 --- a/internal/tools/proto-gen-rpc-glue/main.go +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -319,18 +319,18 @@ func getAnnotation(doc []*ast.Comment) (Annotation, error) { switch { case part == "ReadRequest": ann.ReadRequest = "ReadRequest" - case strings.HasPrefix(part, "ReadRequest"): - ann.TargetDatacenter = strings.TrimPrefix(part, "ReadRequest") + case strings.HasPrefix(part, "ReadRequest="): + ann.ReadRequest = strings.TrimPrefix(part, "ReadRequest=") case part == "WriteRequest": ann.WriteRequest = "WriteRequest" - case strings.HasPrefix(part, "WriteRequest"): - ann.TargetDatacenter = strings.TrimPrefix(part, "WriteRequest") + case strings.HasPrefix(part, "WriteRequest="): + ann.WriteRequest = strings.TrimPrefix(part, "WriteRequest=") case part == "TargetDatacenter": ann.TargetDatacenter = "TargetDatacenter" - case strings.HasPrefix(part, "TargetDatacenter"): - ann.TargetDatacenter = strings.TrimPrefix(part, "TargetDatacenter") + case strings.HasPrefix(part, "TargetDatacenter="): + ann.TargetDatacenter = strings.TrimPrefix(part, "TargetDatacenter=") default: return Annotation{}, fmt.Errorf("unexpected annotation part: %s", part) diff --git a/internal/tools/proto-gen-rpc-glue/main_test.go b/internal/tools/proto-gen-rpc-glue/main_test.go new file mode 100644 index 000000000..77ded532a --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/main_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "flag" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +// update allows golden files to be updated based on the current output. +var update = flag.Bool("update", false, "update golden files") + +func TestE2E(t *testing.T) { + // Generate new output + *flagPath = "./e2e/source.pb.go" + require.NoError(t, run(*flagPath)) + + raw, err := os.ReadFile("./e2e/source.rpcglue.pb.go") + require.NoError(t, err) + + got := string(raw) + + golden(t, got, "./e2e/source.rpcglue.pb.go") +} + +// golden reads the expected value from the file at path and returns the +// value. +// +// If the `-update` flag is used with `go test`, the golden file will be +// updated to the value of actual. +func golden(t *testing.T, actual, path string) string { + t.Helper() + + path += ".golden" + if *update { + if dir := filepath.Dir(path); dir != "." { + require.NoError(t, os.MkdirAll(dir, 0755)) + } + err := ioutil.WriteFile(path, []byte(actual), 0644) + require.NoError(t, err) + } + + expected, err := ioutil.ReadFile(path) + require.NoError(t, err) + return string(expected) +} From f8fc317731c339c6d9400a0c2d0da744dad1d857 Mon Sep 17 00:00:00 2001 From: Connor Date: Mon, 28 Mar 2022 09:58:16 -0500 Subject: [PATCH 027/785] Fix leaked Vault LifetimeRenewers (#12607) * Fix leaked Vault LifetimeRenewers When the Vault CA Provider is reconfigured we do not stop the LifetimeRenewers which can cause them to leak until the Consul processes recycles. On Configure execute stopWatcher if it exists and is not nil before starting a new renewal * Add jitter before restarting the LifetimeWatcher If we fail to login to Vault or our token is no longer valid we can overwhelm a Vault instance with many requests very quickly by restarting the LifetimeWatcher. Before restarting the LifetimeWatcher provide a backoff time of 1 second or less. * Use a retry.Waiter instead of RandomStagger * changelog * gofmt'd * Swap out bool for atomic.Unit32 in test * Provide some extra clarification in comment and changelog --- .changelog/12607.txt | 3 + agent/connect/ca/provider_vault.go | 42 +++++++++--- agent/connect/ca/provider_vault_test.go | 89 ++++++++++++++++++++----- 3 files changed, 108 insertions(+), 26 deletions(-) create mode 100644 .changelog/12607.txt diff --git a/.changelog/12607.txt b/.changelog/12607.txt new file mode 100644 index 000000000..65577d1a1 --- /dev/null +++ b/.changelog/12607.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. +``` \ No newline at end of file diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 91b92528c..beec649c3 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -12,13 +12,14 @@ import ( "strings" "time" + "github.com/hashicorp/consul/lib/decode" + "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/go-hclog" vaultapi "github.com/hashicorp/vault/api" "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib/decode" ) const ( @@ -43,6 +44,10 @@ const ( VaultAuthMethodTypeUserpass = "userpass" defaultK8SServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + retryMin = 1 * time.Second + retryMax = 5 * time.Second + retryJitter = 20 ) var ErrBackendNotMounted = fmt.Errorf("backend not mounted") @@ -52,7 +57,7 @@ type VaultProvider struct { config *structs.VaultCAProviderConfig client *vaultapi.Client - shutdown func() + stopWatcher func() isPrimary bool clusterID string @@ -63,8 +68,8 @@ type VaultProvider struct { func NewVaultProvider(logger hclog.Logger) *VaultProvider { return &VaultProvider{ - shutdown: func() {}, - logger: logger, + stopWatcher: func() {}, + logger: logger, } } @@ -153,7 +158,10 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { } ctx, cancel := context.WithCancel(context.Background()) - v.shutdown = cancel + if v.stopWatcher != nil { + v.stopWatcher() + } + v.stopWatcher = cancel go v.renewToken(ctx, lifetimeWatcher) } @@ -195,16 +203,33 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti go watcher.Start() defer watcher.Stop() + // TODO: Once we've upgraded to a later version of protobuf we can upgrade to github.com/hashicorp/vault/api@1.1.1 + // or later and rip this out. + retrier := retry.Waiter{ + MinFailures: 5, + MinWait: retryMin, + MaxWait: retryMax, + Jitter: retry.NewJitter(retryJitter), + } + for { select { case <-ctx.Done(): return case err := <-watcher.DoneCh(): + // In the event we fail to login to Vault or our token is no longer valid we can overwhelm a Vault instance + // with rate limit configured. We would make these requests to Vault as fast as we possibly could and start + // causing all client's to receive 429 response codes. To mitigate that we're sleeping 1 second or less + // before moving on to login again and restart the lifetime watcher. Once we can upgrade to + // github.com/hashicorp/vault/api@v1.1.1 or later the LifetimeWatcher _should_ perform that backoff for us. if err != nil { v.logger.Error("Error renewing token for Vault provider", "error", err) } + // wait at least 1 second after returning from the lifetime watcher + retrier.Wait(ctx) + // If the watcher has exited and auth method is enabled, // re-authenticate using the auth method and set up a new watcher. if v.config.AuthMethod != nil { @@ -212,7 +237,7 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti loginResp, err := vaultLogin(v.client, v.config.AuthMethod) if err != nil { v.logger.Error("Error login in to Vault with %q auth method", v.config.AuthMethod.Type) - // Restart the watcher. + // Restart the watcher go watcher.Start() continue } @@ -232,11 +257,12 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti continue } } - // Restart the watcher. + go watcher.Start() case <-watcher.RenewCh(): + retrier.Reset() v.logger.Info("Successfully renewed token for Vault provider") } } @@ -677,7 +703,7 @@ func (v *VaultProvider) Cleanup(providerTypeChange bool, otherConfig map[string] // Stop shuts down the token renew goroutine. func (v *VaultProvider) Stop() { - v.shutdown() + v.stopWatcher() } func (v *VaultProvider) PrimaryUsesIntermediate() {} diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 460507383..11689ae69 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "sync/atomic" "testing" "time" @@ -212,6 +213,52 @@ func TestVaultCAProvider_RenewToken(t *testing.T) { }) } +func TestVaultCAProvider_RenewTokenStopWatcherOnConfigure(t *testing.T) { + + SkipIfVaultNotPresent(t) + + testVault, err := runTestVault(t) + require.NoError(t, err) + testVault.WaitUntilReady(t) + + // Create a token with a short TTL to be renewed by the provider. + ttl := 1 * time.Second + tcr := &vaultapi.TokenCreateRequest{ + TTL: ttl.String(), + } + secret, err := testVault.client.Auth().Token().Create(tcr) + require.NoError(t, err) + providerToken := secret.Auth.ClientToken + + provider, err := createVaultProvider(t, true, testVault.Addr, providerToken, nil) + require.NoError(t, err) + + var gotStopped = uint32(0) + provider.stopWatcher = func() { + atomic.StoreUint32(&gotStopped, 1) + } + + // Check the last renewal time. + secret, err = testVault.client.Auth().Token().Lookup(providerToken) + require.NoError(t, err) + firstRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64() + require.NoError(t, err) + + // Wait past the TTL and make sure the token has been renewed. + retry.Run(t, func(r *retry.R) { + secret, err = testVault.client.Auth().Token().Lookup(providerToken) + require.NoError(r, err) + lastRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64() + require.NoError(r, err) + require.Greater(r, lastRenewal, firstRenewal) + }) + + providerConfig := vaultProviderConfig(t, testVault.Addr, providerToken, nil) + + require.NoError(t, provider.Configure(providerConfig)) + require.Equal(t, uint32(1), atomic.LoadUint32(&gotStopped)) +} + func TestVaultCAProvider_Bootstrap(t *testing.T) { SkipIfVaultNotPresent(t) @@ -762,27 +809,10 @@ func testVaultProviderWithConfig(t *testing.T, isPrimary bool, rawConf map[strin } func createVaultProvider(t *testing.T, isPrimary bool, addr, token string, rawConf map[string]interface{}) (*VaultProvider, error) { - conf := map[string]interface{}{ - "Address": addr, - "Token": token, - "RootPKIPath": "pki-root/", - "IntermediatePKIPath": "pki-intermediate/", - // Tests duration parsing after msgpack type mangling during raft apply. - "LeafCertTTL": []uint8("72h"), - } - for k, v := range rawConf { - conf[k] = v - } + cfg := vaultProviderConfig(t, addr, token, rawConf) provider := NewVaultProvider(hclog.New(nil)) - cfg := ProviderConfig{ - ClusterID: connect.TestClusterID, - Datacenter: "dc1", - IsPrimary: true, - RawConfig: conf, - } - if !isPrimary { cfg.IsPrimary = false cfg.Datacenter = "dc2" @@ -799,3 +829,26 @@ func createVaultProvider(t *testing.T, isPrimary bool, addr, token string, rawCo return provider, nil } + +func vaultProviderConfig(t *testing.T, addr, token string, rawConf map[string]interface{}) ProviderConfig { + conf := map[string]interface{}{ + "Address": addr, + "Token": token, + "RootPKIPath": "pki-root/", + "IntermediatePKIPath": "pki-intermediate/", + // Tests duration parsing after msgpack type mangling during raft apply. + "LeafCertTTL": []uint8("72h"), + } + for k, v := range rawConf { + conf[k] = v + } + + cfg := ProviderConfig{ + ClusterID: connect.TestClusterID, + Datacenter: "dc1", + IsPrimary: true, + RawConfig: conf, + } + + return cfg +} From 7ddeab2e50649125adf841a6e187f066652314e2 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 28 Mar 2022 10:08:41 -0500 Subject: [PATCH 028/785] proto-gen-rpc-glue: use a shallow copy of proto/pbcommon instead of a consul dependency (#12634) --- .../proto-gen-rpc-glue/e2e/consul/go.mod | 5 + .../proto-gen-rpc-glue/e2e/consul/go.sum | 2 + .../e2e/consul/proto/pbcommon/common.pb.go | 588 ++++++++++++++++ internal/tools/proto-gen-rpc-glue/e2e/go.mod | 6 +- internal/tools/proto-gen-rpc-glue/e2e/go.sum | 655 ------------------ 5 files changed, 596 insertions(+), 660 deletions(-) create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/consul/go.mod create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/consul/go.sum create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.pb.go diff --git a/internal/tools/proto-gen-rpc-glue/e2e/consul/go.mod b/internal/tools/proto-gen-rpc-glue/e2e/consul/go.mod new file mode 100644 index 000000000..78852ba84 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/consul/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/consul + +go 1.13 + +require github.com/golang/protobuf v1.3.5 diff --git a/internal/tools/proto-gen-rpc-glue/e2e/consul/go.sum b/internal/tools/proto-gen-rpc-glue/e2e/consul/go.sum new file mode 100644 index 000000000..6124ed3e4 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/consul/go.sum @@ -0,0 +1,2 @@ +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= diff --git a/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.pb.go b/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.pb.go new file mode 100644 index 000000000..a04042cac --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.pb.go @@ -0,0 +1,588 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto/pbcommon/common.proto + +package pbcommon + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.RaftIndex +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields +type RaftIndex struct { + // @gotags: bexpr:"-" + CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` + // @gotags: bexpr:"-" + ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftIndex) Reset() { *m = RaftIndex{} } +func (m *RaftIndex) String() string { return proto.CompactTextString(m) } +func (*RaftIndex) ProtoMessage() {} +func (*RaftIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{0} +} + +func (m *RaftIndex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RaftIndex.Unmarshal(m, b) +} +func (m *RaftIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) +} +func (m *RaftIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftIndex.Merge(m, src) +} +func (m *RaftIndex) XXX_Size() int { + return xxx_messageInfo_RaftIndex.Size(m) +} +func (m *RaftIndex) XXX_DiscardUnknown() { + xxx_messageInfo_RaftIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftIndex proto.InternalMessageInfo + +func (m *RaftIndex) GetCreateIndex() uint64 { + if m != nil { + return m.CreateIndex + } + return 0 +} + +func (m *RaftIndex) GetModifyIndex() uint64 { + if m != nil { + return m.ModifyIndex + } + return 0 +} + +// TargetDatacenter is intended to be used within other messages used for RPC routing +// amongst the various Consul datacenters +type TargetDatacenter struct { + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TargetDatacenter) Reset() { *m = TargetDatacenter{} } +func (m *TargetDatacenter) String() string { return proto.CompactTextString(m) } +func (*TargetDatacenter) ProtoMessage() {} +func (*TargetDatacenter) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{1} +} + +func (m *TargetDatacenter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TargetDatacenter.Unmarshal(m, b) +} +func (m *TargetDatacenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) +} +func (m *TargetDatacenter) XXX_Merge(src proto.Message) { + xxx_messageInfo_TargetDatacenter.Merge(m, src) +} +func (m *TargetDatacenter) XXX_Size() int { + return xxx_messageInfo_TargetDatacenter.Size(m) +} +func (m *TargetDatacenter) XXX_DiscardUnknown() { + xxx_messageInfo_TargetDatacenter.DiscardUnknown(m) +} + +var xxx_messageInfo_TargetDatacenter proto.InternalMessageInfo + +func (m *TargetDatacenter) GetDatacenter() string { + if m != nil { + return m.Datacenter + } + return "" +} + +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.WriteRequest +// output=common.gen.go +// name=Structs +// ignore-fields=state,sizeCache,unknownFields +type WriteRequest struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{2} +} + +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteRequest.Unmarshal(m, b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return xxx_messageInfo_WriteRequest.Size(m) +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +// ReadRequest is a type that may be embedded into any requests for read +// operations. +// It is a replacement for QueryOptions now that we no longer need any of those +// fields because we are moving away from using blocking queries. +// It is also similar to WriteRequest. It is a separate type so that in the +// future we can introduce fields that may only be relevant for reads. +type ReadRequest struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + // RequireConsistent indicates that the request must be sent to the leader. + RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{3} +} + +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadRequest.Unmarshal(m, b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return xxx_messageInfo_ReadRequest.Size(m) +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *ReadRequest) GetRequireConsistent() bool { + if m != nil { + return m.RequireConsistent + } + return false +} + +// QueryOptions is used to specify various flags for read queries +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryOptions +// output=common.gen.go +// name=Structs +// ignore-fields=StaleIfError,AllowNotModifiedResponse,state,sizeCache,unknownFields +type QueryOptions struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 `protobuf:"varint,2,opt,name=MinQueryIndex,proto3" json:"MinQueryIndex,omitempty"` + // Provided with MinQueryIndex to wait for change. + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxQueryTime *duration.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime,omitempty"` + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool `protobuf:"varint,4,opt,name=AllowStale,proto3" json:"AllowStale,omitempty"` + // If set, the leader must verify leadership prior to + // servicing the request. Prevents a stale read. + RequireConsistent bool `protobuf:"varint,5,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` + // If set, the local agent may respond with an arbitrarily stale locally + // cached response. The semantics differ from AllowStale since the agent may + // be entirely partitioned from the servers and still considered "healthy" by + // operators. Stale responses from Servers are also arbitrarily stale, but can + // provide additional bounds on the last contact time from the leader. It's + // expected that servers that are partitioned are noticed and replaced in a + // timely way by operators while the same may not be true for client agents. + UseCache bool `protobuf:"varint,6,opt,name=UseCache,proto3" json:"UseCache,omitempty"` + // If set and AllowStale is true, will try first a stale + // read, and then will perform a consistent read if stale + // read is older than value. + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxStaleDuration *duration.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration,omitempty"` + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + MaxAge *duration.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge,omitempty"` + // MustRevalidate forces the agent to fetch a fresh version of a cached + // resource or at least validate that the cached version is still fresh. It is + // implied by either max-age=0 or must-revalidate Cache-Control headers. It + // only makes sense when UseCache is true. We store it since MaxAge = 0 is the + // default unset value. + MustRevalidate bool `protobuf:"varint,9,opt,name=MustRevalidate,proto3" json:"MustRevalidate,omitempty"` + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + StaleIfError *duration.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError,omitempty"` + // Filter specifies the go-bexpr filter expression to be used for + // filtering the data prior to returning a response + Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryOptions) Reset() { *m = QueryOptions{} } +func (m *QueryOptions) String() string { return proto.CompactTextString(m) } +func (*QueryOptions) ProtoMessage() {} +func (*QueryOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{4} +} + +func (m *QueryOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryOptions.Unmarshal(m, b) +} +func (m *QueryOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) +} +func (m *QueryOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOptions.Merge(m, src) +} +func (m *QueryOptions) XXX_Size() int { + return xxx_messageInfo_QueryOptions.Size(m) +} +func (m *QueryOptions) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOptions proto.InternalMessageInfo + +func (m *QueryOptions) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *QueryOptions) GetMinQueryIndex() uint64 { + if m != nil { + return m.MinQueryIndex + } + return 0 +} + +func (m *QueryOptions) GetMaxQueryTime() *duration.Duration { + if m != nil { + return m.MaxQueryTime + } + return nil +} + +func (m *QueryOptions) GetAllowStale() bool { + if m != nil { + return m.AllowStale + } + return false +} + +func (m *QueryOptions) GetRequireConsistent() bool { + if m != nil { + return m.RequireConsistent + } + return false +} + +func (m *QueryOptions) GetUseCache() bool { + if m != nil { + return m.UseCache + } + return false +} + +func (m *QueryOptions) GetMaxStaleDuration() *duration.Duration { + if m != nil { + return m.MaxStaleDuration + } + return nil +} + +func (m *QueryOptions) GetMaxAge() *duration.Duration { + if m != nil { + return m.MaxAge + } + return nil +} + +func (m *QueryOptions) GetMustRevalidate() bool { + if m != nil { + return m.MustRevalidate + } + return false +} + +func (m *QueryOptions) GetStaleIfError() *duration.Duration { + if m != nil { + return m.StaleIfError + } + return nil +} + +func (m *QueryOptions) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +// +// mog annotation: +// +// target=github.com/hashicorp/consul/agent/structs.QueryMeta +// output=common.gen.go +// name=Structs +// ignore-fields=NotModified,Backend,state,sizeCache,unknownFields +type QueryMeta struct { + // This is the index associated with the read + Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto + LastContact *duration.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact,omitempty"` + // Used to indicate if there is a known leader node + KnownLeader bool `protobuf:"varint,3,opt,name=KnownLeader,proto3" json:"KnownLeader,omitempty"` + // Consistencylevel returns the consistency used to serve the query + // Having `discovery_max_stale` on the agent can affect whether + // the request was served by a leader. + ConsistencyLevel string `protobuf:"bytes,4,opt,name=ConsistencyLevel,proto3" json:"ConsistencyLevel,omitempty"` + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryMeta) Reset() { *m = QueryMeta{} } +func (m *QueryMeta) String() string { return proto.CompactTextString(m) } +func (*QueryMeta) ProtoMessage() {} +func (*QueryMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{5} +} + +func (m *QueryMeta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryMeta.Unmarshal(m, b) +} +func (m *QueryMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) +} +func (m *QueryMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryMeta.Merge(m, src) +} +func (m *QueryMeta) XXX_Size() int { + return xxx_messageInfo_QueryMeta.Size(m) +} +func (m *QueryMeta) XXX_DiscardUnknown() { + xxx_messageInfo_QueryMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryMeta proto.InternalMessageInfo + +func (m *QueryMeta) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *QueryMeta) GetLastContact() *duration.Duration { + if m != nil { + return m.LastContact + } + return nil +} + +func (m *QueryMeta) GetKnownLeader() bool { + if m != nil { + return m.KnownLeader + } + return false +} + +func (m *QueryMeta) GetConsistencyLevel() string { + if m != nil { + return m.ConsistencyLevel + } + return "" +} + +func (m *QueryMeta) GetResultsFilteredByACLs() bool { + if m != nil { + return m.ResultsFilteredByACLs + } + return false +} + +// EnterpriseMeta contains metadata that is only used by the Enterprise version +// of Consul. +type EnterpriseMeta struct { + // Namespace in which the entity exists. + Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + // Partition in which the entity exists. + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnterpriseMeta) Reset() { *m = EnterpriseMeta{} } +func (m *EnterpriseMeta) String() string { return proto.CompactTextString(m) } +func (*EnterpriseMeta) ProtoMessage() {} +func (*EnterpriseMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_a6f5ac44994d718c, []int{6} +} + +func (m *EnterpriseMeta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnterpriseMeta.Unmarshal(m, b) +} +func (m *EnterpriseMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) +} +func (m *EnterpriseMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnterpriseMeta.Merge(m, src) +} +func (m *EnterpriseMeta) XXX_Size() int { + return xxx_messageInfo_EnterpriseMeta.Size(m) +} +func (m *EnterpriseMeta) XXX_DiscardUnknown() { + xxx_messageInfo_EnterpriseMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_EnterpriseMeta proto.InternalMessageInfo + +func (m *EnterpriseMeta) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *EnterpriseMeta) GetPartition() string { + if m != nil { + return m.Partition + } + return "" +} + +func init() { + proto.RegisterType((*RaftIndex)(nil), "common.RaftIndex") + proto.RegisterType((*TargetDatacenter)(nil), "common.TargetDatacenter") + proto.RegisterType((*WriteRequest)(nil), "common.WriteRequest") + proto.RegisterType((*ReadRequest)(nil), "common.ReadRequest") + proto.RegisterType((*QueryOptions)(nil), "common.QueryOptions") + proto.RegisterType((*QueryMeta)(nil), "common.QueryMeta") + proto.RegisterType((*EnterpriseMeta)(nil), "common.EnterpriseMeta") +} + +func init() { + proto.RegisterFile("proto/pbcommon/common.proto", fileDescriptor_a6f5ac44994d718c) +} + +var fileDescriptor_a6f5ac44994d718c = []byte{ + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x51, 0x6f, 0xd3, 0x30, + 0x10, 0x56, 0xb7, 0x2e, 0x4b, 0xae, 0x65, 0x2a, 0x16, 0xa0, 0x30, 0xd0, 0x54, 0x45, 0x13, 0x9a, + 0xa6, 0xa9, 0x11, 0x83, 0x37, 0xc4, 0x43, 0xd7, 0x15, 0x69, 0xa3, 0x61, 0xcc, 0x14, 0x21, 0xf1, + 0xe6, 0x26, 0xd7, 0xd6, 0x22, 0x8d, 0x83, 0xed, 0x6c, 0xed, 0x7f, 0x46, 0xfc, 0x06, 0x14, 0xa7, + 0xed, 0x52, 0xba, 0xad, 0x4f, 0xd1, 0xf7, 0xdd, 0xe7, 0xf3, 0xdd, 0x7d, 0xe7, 0xc0, 0xab, 0x54, + 0x0a, 0x2d, 0xfc, 0x74, 0x10, 0x8a, 0xc9, 0x44, 0x24, 0x7e, 0xf1, 0x69, 0x19, 0x96, 0x58, 0x05, + 0xda, 0x3f, 0x18, 0x09, 0x31, 0x8a, 0xd1, 0x37, 0xec, 0x20, 0x1b, 0xfa, 0x51, 0x26, 0x99, 0xe6, + 0x0b, 0x9d, 0x77, 0x05, 0x0e, 0x65, 0x43, 0x7d, 0x91, 0x44, 0x38, 0x25, 0x4d, 0xa8, 0x75, 0x24, + 0x32, 0x8d, 0x06, 0xba, 0x95, 0x66, 0xe5, 0xa8, 0x4a, 0xcb, 0x54, 0xae, 0x08, 0x44, 0xc4, 0x87, + 0xb3, 0x42, 0xb1, 0x55, 0x28, 0x4a, 0x94, 0x77, 0x0a, 0x8d, 0x3e, 0x93, 0x23, 0xd4, 0xe7, 0x4c, + 0xb3, 0x10, 0x13, 0x8d, 0x92, 0x1c, 0x00, 0xdc, 0x21, 0x93, 0xd6, 0xa1, 0x25, 0xc6, 0x3b, 0x84, + 0xfa, 0x0f, 0xc9, 0x35, 0x52, 0xfc, 0x9d, 0xa1, 0xd2, 0xe4, 0x19, 0xec, 0xf4, 0xc5, 0x2f, 0x4c, + 0xe6, 0xd2, 0x02, 0x78, 0xd7, 0x50, 0xa3, 0xc8, 0xa2, 0x47, 0x45, 0xe4, 0x04, 0x9e, 0xe6, 0x02, + 0x2e, 0xb1, 0x23, 0x12, 0xc5, 0x95, 0xc6, 0x44, 0x9b, 0x32, 0x6d, 0xba, 0x1e, 0xf0, 0xfe, 0x6c, + 0x43, 0xfd, 0x3a, 0x43, 0x39, 0xbb, 0x4a, 0xf3, 0x99, 0xa8, 0x07, 0x92, 0x1e, 0xc2, 0x93, 0x80, + 0x27, 0x46, 0x58, 0xee, 0x7b, 0x95, 0x24, 0x1f, 0xa1, 0x1e, 0xb0, 0xa9, 0x21, 0xfa, 0x7c, 0x82, + 0xee, 0x76, 0xb3, 0x72, 0x54, 0x3b, 0x7d, 0xd9, 0x2a, 0x1c, 0x68, 0x2d, 0x1c, 0x68, 0x9d, 0xcf, + 0x1d, 0xa0, 0x2b, 0xf2, 0x7c, 0x48, 0xed, 0x38, 0x16, 0xb7, 0xdf, 0x34, 0x8b, 0xd1, 0xad, 0x9a, + 0x92, 0x4b, 0xcc, 0xfd, 0x9d, 0xed, 0x3c, 0xd0, 0x19, 0xd9, 0x07, 0xfb, 0xbb, 0xc2, 0x0e, 0x0b, + 0xc7, 0xe8, 0x5a, 0x46, 0xb4, 0xc4, 0xa4, 0x0b, 0x8d, 0x80, 0x4d, 0x4d, 0xd6, 0x45, 0x2d, 0xee, + 0xee, 0xa6, 0x62, 0xd7, 0x8e, 0x90, 0xb7, 0x60, 0x05, 0x6c, 0xda, 0x1e, 0xa1, 0x6b, 0x6f, 0x3a, + 0x3c, 0x17, 0x92, 0x37, 0xb0, 0x17, 0x64, 0x4a, 0x53, 0xbc, 0x61, 0x31, 0x8f, 0x98, 0x46, 0xd7, + 0x31, 0xb5, 0xfd, 0xc7, 0xe6, 0xa3, 0x34, 0x77, 0x5d, 0x0c, 0xbb, 0x52, 0x0a, 0xe9, 0xc2, 0xc6, + 0x51, 0x96, 0xe5, 0xe4, 0x05, 0x58, 0x9f, 0x78, 0x9c, 0xef, 0x5a, 0xcd, 0xd8, 0x38, 0x47, 0xde, + 0xdf, 0x0a, 0x38, 0x66, 0xe0, 0x01, 0x6a, 0x96, 0x7b, 0x5d, 0xde, 0xf3, 0x02, 0x90, 0x0f, 0x50, + 0xeb, 0x31, 0xa5, 0x3b, 0x22, 0xd1, 0x2c, 0x2c, 0x56, 0xe7, 0xd1, 0x9b, 0xcb, 0xea, 0xfc, 0x79, + 0x7c, 0x4e, 0xc4, 0x6d, 0xd2, 0x43, 0x16, 0xa1, 0x34, 0x1b, 0x60, 0xd3, 0x32, 0x45, 0x8e, 0xa1, + 0xb1, 0x74, 0x29, 0x9c, 0xf5, 0xf0, 0x06, 0x63, 0xe3, 0xb5, 0x43, 0xd7, 0x78, 0xf2, 0x1e, 0x9e, + 0x53, 0x54, 0x59, 0xac, 0x55, 0x51, 0x3f, 0x46, 0x67, 0xb3, 0x76, 0xa7, 0xa7, 0x8c, 0x59, 0x36, + 0xbd, 0x3f, 0x78, 0x59, 0xb5, 0x77, 0x1a, 0xd6, 0x65, 0xd5, 0xb6, 0x1a, 0xbb, 0x5e, 0x0f, 0xf6, + 0xba, 0xf9, 0x0b, 0x4b, 0x25, 0x57, 0x68, 0x9a, 0x7e, 0x0d, 0xce, 0x17, 0x36, 0x41, 0x95, 0xb2, + 0x10, 0xe7, 0x4b, 0x7e, 0x47, 0xe4, 0xd1, 0xaf, 0x4c, 0x6a, 0x6e, 0x56, 0x62, 0xab, 0x88, 0x2e, + 0x89, 0xb3, 0x93, 0x9f, 0xc7, 0x23, 0xae, 0xc7, 0xd9, 0xa0, 0x15, 0x8a, 0x89, 0x3f, 0x66, 0x6a, + 0xcc, 0x43, 0x21, 0x53, 0x3f, 0x14, 0x89, 0xca, 0x62, 0x7f, 0xf5, 0x77, 0x34, 0xb0, 0x0c, 0x7e, + 0xf7, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x35, 0xe5, 0x62, 0x05, 0xa7, 0x04, 0x00, 0x00, +} diff --git a/internal/tools/proto-gen-rpc-glue/e2e/go.mod b/internal/tools/proto-gen-rpc-glue/e2e/go.mod index b4b31db76..222f9f9c1 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/go.mod +++ b/internal/tools/proto-gen-rpc-glue/e2e/go.mod @@ -2,10 +2,6 @@ module github.com/hashicorp/consul/internal/tools/proto-gen-rpc-glue/e2e go 1.13 -replace github.com/hashicorp/consul => ../../../.. - -replace github.com/hashicorp/consul/api => ../../../../api - -replace github.com/hashicorp/consul/sdk => ../../../../sdk +replace github.com/hashicorp/consul => ./consul require github.com/hashicorp/consul v1.11.4 diff --git a/internal/tools/proto-gen-rpc-glue/e2e/go.sum b/internal/tools/proto-gen-rpc-glue/e2e/go.sum index a0f384377..6124ed3e4 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/go.sum +++ b/internal/tools/proto-gen-rpc-glue/e2e/go.sum @@ -1,657 +1,2 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.42.34/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 h1:Com/5n/omNSBusX11zdyIYtidiqewLIanchbm//McZA= -github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= -github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= -github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= -github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= -github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-raftchunking v0.6.2/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= -github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= -github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= -github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft-autopilot v0.1.5 h1:onEfMH5uHVdXQqtas36zXUHEZxLdsJVu/nXHLcLdL1I= -github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= -github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= -github.com/hashicorp/raft-boltdb/v2 v2.2.0/go.mod h1:SgPUD5TP20z/bswEr210SnkUFvQP/YjKV95aaiTbeMQ= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 h1:brI5vBRUlAlM34VFmnLPwjnCL/FxAJp9XvOdX6Zt+XE= -github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= -github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI= -github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= -github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= -github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= -github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From 484e1da6da8e4cb69aa332618cceba4aff65d701 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 28 Mar 2022 13:12:51 -0500 Subject: [PATCH 029/785] proto-gen-rpc-glue: support QueryMeta and QueryOptions (#12637) --- internal/tools/proto-gen-rpc-glue/.gitignore | 2 +- .../e2e/consul/agent/structs/structs.go | 57 ++++ .../e2e/consul/proto/pbcommon/common.go | 178 ++++++++++ .../tools/proto-gen-rpc-glue/e2e/source.pb.go | 24 ++ .../e2e/source.rpcglue.pb.go.golden | 275 +++++++++++++++ internal/tools/proto-gen-rpc-glue/main.go | 318 +++++++++++++----- 6 files changed, 777 insertions(+), 77 deletions(-) create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/consul/agent/structs/structs.go create mode 100644 internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.go diff --git a/internal/tools/proto-gen-rpc-glue/.gitignore b/internal/tools/proto-gen-rpc-glue/.gitignore index 14130d475..343d68c1c 100644 --- a/internal/tools/proto-gen-rpc-glue/.gitignore +++ b/internal/tools/proto-gen-rpc-glue/.gitignore @@ -1 +1 @@ -./e2e/source.rpcglue.pb.go +e2e/*.rpcglue.pb.go diff --git a/internal/tools/proto-gen-rpc-glue/e2e/consul/agent/structs/structs.go b/internal/tools/proto-gen-rpc-glue/e2e/consul/agent/structs/structs.go new file mode 100644 index 000000000..4d7741398 --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/consul/agent/structs/structs.go @@ -0,0 +1,57 @@ +package structs + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" +) + +type QueryOptions struct { + // NOTE: fields omitted from upstream if not necessary for compilation check + MinQueryIndex uint64 + MaxQueryTime time.Duration +} + +func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + // NOTE: body was omitted from upstream; we only need the signature to verify it compiles + return false, nil +} + +type RPCInfo interface { + // NOTE: methods omitted from upstream if not necessary for compilation check +} + +type QueryBackend int + +const ( + QueryBackendBlocking QueryBackend = iota + QueryBackendStreaming +) + +func DurationToProto(d time.Duration) *duration.Duration { + return ptypes.DurationProto(d) +} + +func DurationFromProto(d *duration.Duration) time.Duration { + ret, _ := ptypes.Duration(d) + return ret + +} + +func TimeFromProto(s *timestamp.Timestamp) time.Time { + ret, _ := ptypes.Timestamp(s) + return ret +} + +func TimeToProto(s time.Time) *timestamp.Timestamp { + ret, _ := ptypes.TimestampProto(s) + return ret +} + +// IsZeroProtoTime returns true if the time is the minimum protobuf timestamp +// (the Unix epoch). +func IsZeroProtoTime(t *timestamp.Timestamp) bool { + return t.Seconds == 0 && t.Nanos == 0 +} diff --git a/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.go b/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.go new file mode 100644 index 000000000..f8211604d --- /dev/null +++ b/internal/tools/proto-gen-rpc-glue/e2e/consul/proto/pbcommon/common.go @@ -0,0 +1,178 @@ +package pbcommon + +import ( + "time" + + "github.com/hashicorp/consul/agent/structs" +) + +// IsRead is always true for QueryOption +func (q *QueryOptions) IsRead() bool { + return true +} + +// AllowStaleRead returns whether a stale read should be allowed +func (q *QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +func (q *QueryOptions) TokenSecret() string { + return q.Token +} + +func (q *QueryOptions) SetTokenSecret(s string) { + q.Token = s +} + +// SetToken is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetToken(token string) { + q.Token = token +} + +// SetMinQueryIndex is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { + q.MinQueryIndex = minQueryIndex +} + +// SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { + q.MaxQueryTime = structs.DurationToProto(maxQueryTime) +} + +// SetAllowStale is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetAllowStale(allowStale bool) { + q.AllowStale = allowStale +} + +// SetRequireConsistent is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetRequireConsistent(requireConsistent bool) { + q.RequireConsistent = requireConsistent +} + +// SetUseCache is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetUseCache(useCache bool) { + q.UseCache = useCache +} + +// SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { + q.MaxStaleDuration = structs.DurationToProto(maxStaleDuration) +} + +// SetMaxAge is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { + q.MaxAge = structs.DurationToProto(maxAge) +} + +// SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { + q.MustRevalidate = mustRevalidate +} + +// SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { + q.StaleIfError = structs.DurationToProto(staleIfError) +} + +func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + maxTime := structs.DurationFromProto(q.MaxQueryTime) + o := structs.QueryOptions{ + MaxQueryTime: maxTime, + MinQueryIndex: q.MinQueryIndex, + } + return o.HasTimedOut(start, rpcHoldTimeout, maxQueryTime, defaultQueryTime) +} + +// SetFilter is needed to implement the structs.QueryOptionsCompat interface +func (q *QueryOptions) SetFilter(filter string) { + q.Filter = filter +} + +// WriteRequest only applies to writes, always false +// +// IsRead implements structs.RPCInfo +func (w WriteRequest) IsRead() bool { + return false +} + +// SetTokenSecret implements structs.RPCInfo +func (w WriteRequest) TokenSecret() string { + return w.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (w *WriteRequest) SetTokenSecret(s string) { + w.Token = s +} + +// AllowStaleRead returns whether a stale read should be allowed +// +// AllowStaleRead implements structs.RPCInfo +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +// HasTimedOut implements structs.RPCInfo +func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// IsRead implements structs.RPCInfo +func (r *ReadRequest) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (r *ReadRequest) AllowStaleRead() bool { + // TODO(partitions): plumb this? + return false +} + +// TokenSecret implements structs.RPCInfo +func (r *ReadRequest) TokenSecret() string { + return r.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (r *ReadRequest) SetTokenSecret(token string) { + r.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// RequestDatacenter implements structs.RPCInfo +func (td TargetDatacenter) RequestDatacenter() string { + return td.Datacenter +} + +// SetLastContact is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetLastContact(lastContact time.Duration) { + q.LastContact = structs.DurationToProto(lastContact) +} + +// SetKnownLeader is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetKnownLeader(knownLeader bool) { + q.KnownLeader = knownLeader +} + +// SetIndex is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetIndex(index uint64) { + q.Index = index +} + +// SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { + q.ConsistencyLevel = consistencyLevel +} + +func (q *QueryMeta) GetBackend() structs.QueryBackend { + return structs.QueryBackend(0) +} + +// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { + q.ResultsFilteredByACLs = v +} diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go index ed47e1d09..43ace7a84 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go @@ -19,6 +19,19 @@ type ExampleReadRequest struct { TargetDatacenter *pbcommon.TargetDatacenter } +// @consul-rpc-glue: QueryOptions,TargetDatacenter +type ExampleQueryOptions struct { + Value string + QueryOptions *pbcommon.QueryOptions + TargetDatacenter *pbcommon.TargetDatacenter +} + +// @consul-rpc-glue: QueryMeta +type ExampleQueryMeta struct { + Value string + QueryMeta *pbcommon.QueryMeta +} + // @consul-rpc-glue: WriteRequest=AltWriteRequest type AltExampleWriteRequest struct { Value int @@ -30,3 +43,14 @@ type AltExampleReadRequest struct { Value int AltReadRequest *pbcommon.ReadRequest } + +// @consul-rpc-glue: QueryOptions=AltQueryOptions +type AltExampleQueryOptions struct { + Value string + AltQueryOptions *pbcommon.QueryOptions +} + +// @consul-rpc-glue: QueryMeta=AltQueryMeta +type AltExampleQueryMeta struct { + AltQueryMeta *pbcommon.QueryMeta +} diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden index 7f87f5477..a035aab7a 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden @@ -7,12 +7,19 @@ package e2e import ( "time" + + "github.com/hashicorp/consul/agent/structs" ) +// Reference imports to suppress errors if they are not otherwise used. +var _ structs.RPCInfo + +// AllowStaleRead implements structs.RPCInfo func (msg *ExampleWriteRequest) AllowStaleRead() bool { return false } +// HasTimedOut implements structs.RPCInfo func (msg *ExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { if msg == nil || msg.WriteRequest == nil { return false, nil @@ -20,14 +27,18 @@ func (msg *ExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time return msg.WriteRequest.HasTimedOut(start, rpcHoldTimeout, a, b) } +// IsRead implements structs.RPCInfo func (msg *ExampleWriteRequest) IsRead() bool { return false } +// SetTokenSecret implements structs.RPCInfo func (msg *ExampleWriteRequest) SetTokenSecret(s string) { + // TODO: initialize if nil msg.WriteRequest.SetTokenSecret(s) } +// TokenSecret implements structs.RPCInfo func (msg *ExampleWriteRequest) TokenSecret() string { if msg == nil || msg.WriteRequest == nil { return "" @@ -35,6 +46,7 @@ func (msg *ExampleWriteRequest) TokenSecret() string { return msg.WriteRequest.TokenSecret() } +// Token implements structs.RPCInfo func (msg *ExampleWriteRequest) Token() string { if msg.WriteRequest == nil { return "" @@ -42,6 +54,7 @@ func (msg *ExampleWriteRequest) Token() string { return msg.WriteRequest.Token } +// RequestDatacenter implements structs.RPCInfo func (msg *ExampleWriteRequest) RequestDatacenter() string { if msg == nil || msg.TargetDatacenter == nil { return "" @@ -49,14 +62,18 @@ func (msg *ExampleWriteRequest) RequestDatacenter() string { return msg.TargetDatacenter.GetDatacenter() } +// IsRead implements structs.RPCInfo func (msg *ExampleReadRequest) IsRead() bool { return true } +// AllowStaleRead implements structs.RPCInfo func (msg *ExampleReadRequest) AllowStaleRead() bool { + // TODO: initialize if nil return msg.ReadRequest.AllowStaleRead() } +// HasTimedOut implements structs.RPCInfo func (msg *ExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { if msg == nil || msg.ReadRequest == nil { return false, nil @@ -64,10 +81,13 @@ func (msg *ExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time. return msg.ReadRequest.HasTimedOut(start, rpcHoldTimeout, a, b) } +// SetTokenSecret implements structs.RPCInfo func (msg *ExampleReadRequest) SetTokenSecret(s string) { + // TODO: initialize if nil msg.ReadRequest.SetTokenSecret(s) } +// TokenSecret implements structs.RPCInfo func (msg *ExampleReadRequest) TokenSecret() string { if msg == nil || msg.ReadRequest == nil { return "" @@ -75,6 +95,7 @@ func (msg *ExampleReadRequest) TokenSecret() string { return msg.ReadRequest.TokenSecret() } +// Token implements structs.RPCInfo func (msg *ExampleReadRequest) Token() string { if msg.ReadRequest == nil { return "" @@ -82,6 +103,7 @@ func (msg *ExampleReadRequest) Token() string { return msg.ReadRequest.Token } +// RequestDatacenter implements structs.RPCInfo func (msg *ExampleReadRequest) RequestDatacenter() string { if msg == nil || msg.TargetDatacenter == nil { return "" @@ -89,10 +111,135 @@ func (msg *ExampleReadRequest) RequestDatacenter() string { return msg.TargetDatacenter.GetDatacenter() } +// RequestDatacenter implements structs.RPCInfo +func (msg *ExampleQueryOptions) RequestDatacenter() string { + if msg == nil || msg.TargetDatacenter == nil { + return "" + } + return msg.TargetDatacenter.GetDatacenter() +} + +// IsRead implements structs.RPCInfo +func (msg *ExampleQueryOptions) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *ExampleQueryOptions) AllowStaleRead() bool { + return msg.QueryOptions.AllowStaleRead() +} + +// HasTimedOut implements structs.RPCInfo +func (msg *ExampleQueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.QueryOptions == nil { + return false, nil + } + return msg.QueryOptions.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *ExampleQueryOptions) SetTokenSecret(s string) { + // TODO: initialize if nil + msg.QueryOptions.SetTokenSecret(s) +} + +// TokenSecret implements structs.RPCInfo +func (msg *ExampleQueryOptions) TokenSecret() string { + if msg == nil || msg.QueryOptions == nil { + return "" + } + return msg.QueryOptions.TokenSecret() +} + +// Token implements structs.RPCInfo +func (msg *ExampleQueryOptions) Token() string { + if msg.QueryOptions == nil { + return "" + } + return msg.QueryOptions.Token +} + +// GetToken is required to implement blockingQueryOptions +func (msg *ExampleQueryOptions) GetToken() string { + if msg == nil || msg.QueryOptions == nil { + return "" + } + + return msg.QueryOptions.GetToken() +} + +// GetMinQueryIndex is required to implement blockingQueryOptions +func (msg *ExampleQueryOptions) GetMinQueryIndex() uint64 { + if msg == nil || msg.QueryOptions == nil { + return 0 + } + + return msg.QueryOptions.GetMinQueryIndex() +} + +// GetMaxQueryTime is required to implement blockingQueryOptions +func (msg *ExampleQueryOptions) GetMaxQueryTime() (time.Duration, error) { + if msg == nil || msg.QueryOptions == nil { + return 0, nil + } + + return structs.DurationFromProto(msg.QueryOptions.GetMaxQueryTime()), nil +} + +// GetRequireConsistent is required to implement blockingQueryOptions +func (msg *ExampleQueryOptions) GetRequireConsistent() bool { + if msg == nil || msg.QueryOptions == nil { + return false + } + return msg.QueryOptions.RequireConsistent +} + +// SetLastContact is required to implement blockingQueryResponseMeta +func (msg *ExampleQueryMeta) SetLastContact(d time.Duration) { + if msg == nil || msg.QueryMeta == nil { + return + } + msg.QueryMeta.SetLastContact(d) +} + +// SetKnownLeader is required to implement blockingQueryResponseMeta +func (msg *ExampleQueryMeta) SetKnownLeader(b bool) { + if msg == nil || msg.QueryMeta == nil { + return + } + msg.QueryMeta.SetKnownLeader(b) +} + +// GetIndex is required to implement blockingQueryResponseMeta +func (msg *ExampleQueryMeta) GetIndex() uint64 { + if msg == nil || msg.QueryMeta == nil { + return 0 + } + return msg.QueryMeta.GetIndex() +} + +// SetIndex is required to implement blockingQueryResponseMeta +func (msg *ExampleQueryMeta) SetIndex(i uint64) { + if msg == nil || msg.QueryMeta == nil { + return + } + msg.QueryMeta.SetIndex(i) +} + +// SetResultsFilteredByACLs is required to implement blockingQueryResponseMeta +func (msg *ExampleQueryMeta) SetResultsFilteredByACLs(b bool) { + if msg == nil || msg.QueryMeta == nil { + return + } + msg.QueryMeta.SetResultsFilteredByACLs(b) +} + +// AllowStaleRead implements structs.RPCInfo func (msg *AltExampleWriteRequest) AllowStaleRead() bool { return false } +// HasTimedOut implements structs.RPCInfo func (msg *AltExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { if msg == nil || msg.AltWriteRequest == nil { return false, nil @@ -100,14 +247,18 @@ func (msg *AltExampleWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout t return msg.AltWriteRequest.HasTimedOut(start, rpcHoldTimeout, a, b) } +// IsRead implements structs.RPCInfo func (msg *AltExampleWriteRequest) IsRead() bool { return false } +// SetTokenSecret implements structs.RPCInfo func (msg *AltExampleWriteRequest) SetTokenSecret(s string) { + // TODO: initialize if nil msg.AltWriteRequest.SetTokenSecret(s) } +// TokenSecret implements structs.RPCInfo func (msg *AltExampleWriteRequest) TokenSecret() string { if msg == nil || msg.AltWriteRequest == nil { return "" @@ -115,6 +266,7 @@ func (msg *AltExampleWriteRequest) TokenSecret() string { return msg.AltWriteRequest.TokenSecret() } +// Token implements structs.RPCInfo func (msg *AltExampleWriteRequest) Token() string { if msg.AltWriteRequest == nil { return "" @@ -122,14 +274,18 @@ func (msg *AltExampleWriteRequest) Token() string { return msg.AltWriteRequest.Token } +// IsRead implements structs.RPCInfo func (msg *AltExampleReadRequest) IsRead() bool { return true } +// AllowStaleRead implements structs.RPCInfo func (msg *AltExampleReadRequest) AllowStaleRead() bool { + // TODO: initialize if nil return msg.AltReadRequest.AllowStaleRead() } +// HasTimedOut implements structs.RPCInfo func (msg *AltExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { if msg == nil || msg.AltReadRequest == nil { return false, nil @@ -137,10 +293,13 @@ func (msg *AltExampleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout ti return msg.AltReadRequest.HasTimedOut(start, rpcHoldTimeout, a, b) } +// SetTokenSecret implements structs.RPCInfo func (msg *AltExampleReadRequest) SetTokenSecret(s string) { + // TODO: initialize if nil msg.AltReadRequest.SetTokenSecret(s) } +// TokenSecret implements structs.RPCInfo func (msg *AltExampleReadRequest) TokenSecret() string { if msg == nil || msg.AltReadRequest == nil { return "" @@ -148,9 +307,125 @@ func (msg *AltExampleReadRequest) TokenSecret() string { return msg.AltReadRequest.TokenSecret() } +// Token implements structs.RPCInfo func (msg *AltExampleReadRequest) Token() string { if msg.AltReadRequest == nil { return "" } return msg.AltReadRequest.Token } + +// IsRead implements structs.RPCInfo +func (msg *AltExampleQueryOptions) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *AltExampleQueryOptions) AllowStaleRead() bool { + return msg.AltQueryOptions.AllowStaleRead() +} + +// HasTimedOut implements structs.RPCInfo +func (msg *AltExampleQueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.AltQueryOptions == nil { + return false, nil + } + return msg.AltQueryOptions.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *AltExampleQueryOptions) SetTokenSecret(s string) { + // TODO: initialize if nil + msg.AltQueryOptions.SetTokenSecret(s) +} + +// TokenSecret implements structs.RPCInfo +func (msg *AltExampleQueryOptions) TokenSecret() string { + if msg == nil || msg.AltQueryOptions == nil { + return "" + } + return msg.AltQueryOptions.TokenSecret() +} + +// Token implements structs.RPCInfo +func (msg *AltExampleQueryOptions) Token() string { + if msg.AltQueryOptions == nil { + return "" + } + return msg.AltQueryOptions.Token +} + +// GetToken is required to implement blockingQueryOptions +func (msg *AltExampleQueryOptions) GetToken() string { + if msg == nil || msg.AltQueryOptions == nil { + return "" + } + + return msg.AltQueryOptions.GetToken() +} + +// GetMinQueryIndex is required to implement blockingQueryOptions +func (msg *AltExampleQueryOptions) GetMinQueryIndex() uint64 { + if msg == nil || msg.AltQueryOptions == nil { + return 0 + } + + return msg.AltQueryOptions.GetMinQueryIndex() +} + +// GetMaxQueryTime is required to implement blockingQueryOptions +func (msg *AltExampleQueryOptions) GetMaxQueryTime() (time.Duration, error) { + if msg == nil || msg.AltQueryOptions == nil { + return 0, nil + } + + return structs.DurationFromProto(msg.AltQueryOptions.GetMaxQueryTime()), nil +} + +// GetRequireConsistent is required to implement blockingQueryOptions +func (msg *AltExampleQueryOptions) GetRequireConsistent() bool { + if msg == nil || msg.AltQueryOptions == nil { + return false + } + return msg.AltQueryOptions.RequireConsistent +} + +// SetLastContact is required to implement blockingQueryResponseMeta +func (msg *AltExampleQueryMeta) SetLastContact(d time.Duration) { + if msg == nil || msg.AltQueryMeta == nil { + return + } + msg.AltQueryMeta.SetLastContact(d) +} + +// SetKnownLeader is required to implement blockingQueryResponseMeta +func (msg *AltExampleQueryMeta) SetKnownLeader(b bool) { + if msg == nil || msg.AltQueryMeta == nil { + return + } + msg.AltQueryMeta.SetKnownLeader(b) +} + +// GetIndex is required to implement blockingQueryResponseMeta +func (msg *AltExampleQueryMeta) GetIndex() uint64 { + if msg == nil || msg.AltQueryMeta == nil { + return 0 + } + return msg.AltQueryMeta.GetIndex() +} + +// SetIndex is required to implement blockingQueryResponseMeta +func (msg *AltExampleQueryMeta) SetIndex(i uint64) { + if msg == nil || msg.AltQueryMeta == nil { + return + } + msg.AltQueryMeta.SetIndex(i) +} + +// SetResultsFilteredByACLs is required to implement blockingQueryResponseMeta +func (msg *AltExampleQueryMeta) SetResultsFilteredByACLs(b bool) { + if msg == nil || msg.AltQueryMeta == nil { + return + } + msg.AltQueryMeta.SetResultsFilteredByACLs(b) +} diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go index dd4956912..0618b35b2 100644 --- a/internal/tools/proto-gen-rpc-glue/main.go +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -96,6 +96,12 @@ func processFile(path string) error { if ann.TargetDatacenter != "" { log.Printf(" TargetDatacenter from %s", ann.TargetDatacenter) } + if ann.QueryOptions != "" { + log.Printf(" QueryOptions from %s", ann.QueryOptions) + } + if ann.QueryMeta != "" { + log.Printf(" QueryMeta from %s", ann.QueryMeta) + } } } @@ -114,91 +120,29 @@ func processFile(path string) error { buf.WriteString(` import ( "time" + + "github.com/hashicorp/consul/agent/structs" ) +// Reference imports to suppress errors if they are not otherwise used. +var _ structs.RPCInfo + `) for _, typ := range v.Types { if typ.Annotation.WriteRequest != "" { - buf.WriteString(fmt.Sprintf(` -func (msg *%[1]s) AllowStaleRead() bool { - return false -} - -func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - if msg == nil || msg.%[2]s == nil { - return false, nil - } - return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) -} - -func (msg *%[1]s) IsRead() bool { - return false -} - -func (msg *%[1]s) SetTokenSecret(s string) { - msg.%[2]s.SetTokenSecret(s) -} - -func (msg *%[1]s) TokenSecret() string { - if msg == nil || msg.%[2]s == nil { - return "" - } - return msg.%[2]s.TokenSecret() -} - -func (msg *%[1]s) Token() string { - if msg.%[2]s == nil { - return "" - } - return msg.%[2]s.Token -} -`, typ.Name, typ.Annotation.WriteRequest)) + buf.WriteString(fmt.Sprintf(tmplWriteRequest, typ.Name, typ.Annotation.WriteRequest)) } if typ.Annotation.ReadRequest != "" { - buf.WriteString(fmt.Sprintf(` -func (msg *%[1]s) IsRead() bool { - return true -} - -func (msg *%[1]s) AllowStaleRead() bool { - return msg.%[2]s.AllowStaleRead() -} - -func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - if msg == nil || msg.%[2]s == nil { - return false, nil - } - return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) -} - -func (msg *%[1]s) SetTokenSecret(s string) { - msg.%[2]s.SetTokenSecret(s) -} - -func (msg *%[1]s) TokenSecret() string { - if msg == nil || msg.%[2]s == nil { - return "" - } - return msg.%[2]s.TokenSecret() -} - -func (msg *%[1]s) Token() string { - if msg.%[2]s == nil { - return "" - } - return msg.%[2]s.Token -} -`, typ.Name, typ.Annotation.ReadRequest)) + buf.WriteString(fmt.Sprintf(tmplReadRequest, typ.Name, typ.Annotation.ReadRequest)) } if typ.Annotation.TargetDatacenter != "" { - buf.WriteString(fmt.Sprintf(` -func (msg *%[1]s) RequestDatacenter() string { - if msg == nil || msg.%[2]s == nil { - return "" - } - return msg.%[2]s.GetDatacenter() -} -`, typ.Name, typ.Annotation.TargetDatacenter)) + buf.WriteString(fmt.Sprintf(tmplTargetDatacenter, typ.Name, typ.Annotation.TargetDatacenter)) + } + if typ.Annotation.QueryOptions != "" { + buf.WriteString(fmt.Sprintf(tmplQueryOptions, typ.Name, typ.Annotation.QueryOptions)) + } + if typ.Annotation.QueryMeta != "" { + buf.WriteString(fmt.Sprintf(tmplQueryMeta, typ.Name, typ.Annotation.QueryMeta)) } } @@ -296,6 +240,8 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { } type Annotation struct { + QueryMeta string + QueryOptions string ReadRequest string WriteRequest string TargetDatacenter string @@ -332,6 +278,16 @@ func getAnnotation(doc []*ast.Comment) (Annotation, error) { case strings.HasPrefix(part, "TargetDatacenter="): ann.TargetDatacenter = strings.TrimPrefix(part, "TargetDatacenter=") + case part == "QueryOptions": + ann.QueryOptions = "QueryOptions" + case strings.HasPrefix(part, "QueryOptions="): + ann.QueryOptions = strings.TrimPrefix(part, "QueryOptions=") + + case part == "QueryMeta": + ann.QueryMeta = "QueryMeta" + case strings.HasPrefix(part, "QueryMeta="): + ann.QueryMeta = strings.TrimPrefix(part, "QueryMeta=") + default: return Annotation{}, fmt.Errorf("unexpected annotation part: %s", part) } @@ -373,3 +329,213 @@ func getRawBuildTags(file *ast.File) []string { return out } + +const tmplWriteRequest = ` +// AllowStaleRead implements structs.RPCInfo +func (msg *%[1]s) AllowStaleRead() bool { + return false +} + +// HasTimedOut implements structs.RPCInfo +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.%[2]s == nil { + return false, nil + } + return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +// IsRead implements structs.RPCInfo +func (msg *%[1]s) IsRead() bool { + return false +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *%[1]s) SetTokenSecret(s string) { + // TODO: initialize if nil + msg.%[2]s.SetTokenSecret(s) +} + +// TokenSecret implements structs.RPCInfo +func (msg *%[1]s) TokenSecret() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.TokenSecret() +} + +// Token implements structs.RPCInfo +func (msg *%[1]s) Token() string { + if msg.%[2]s == nil { + return "" + } + return msg.%[2]s.Token +} +` + +const tmplReadRequest = ` +// IsRead implements structs.RPCInfo +func (msg *%[1]s) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *%[1]s) AllowStaleRead() bool { + // TODO: initialize if nil + return msg.%[2]s.AllowStaleRead() +} + +// HasTimedOut implements structs.RPCInfo +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.%[2]s == nil { + return false, nil + } + return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *%[1]s) SetTokenSecret(s string) { + // TODO: initialize if nil + msg.%[2]s.SetTokenSecret(s) +} + +// TokenSecret implements structs.RPCInfo +func (msg *%[1]s) TokenSecret() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.TokenSecret() +} + +// Token implements structs.RPCInfo +func (msg *%[1]s) Token() string { + if msg.%[2]s == nil { + return "" + } + return msg.%[2]s.Token +} +` + +const tmplTargetDatacenter = ` +// RequestDatacenter implements structs.RPCInfo +func (msg *%[1]s) RequestDatacenter() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.GetDatacenter() +} +` + +const tmplQueryOptions = ` +// IsRead implements structs.RPCInfo +func (msg *%[1]s) IsRead() bool { + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *%[1]s) AllowStaleRead() bool { + return msg.%[2]s.AllowStaleRead() +} + +// HasTimedOut implements structs.RPCInfo +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + if msg == nil || msg.%[2]s == nil { + return false, nil + } + return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) +} +// SetTokenSecret implements structs.RPCInfo +func (msg *%[1]s) SetTokenSecret(s string) { + // TODO: initialize if nil + msg.%[2]s.SetTokenSecret(s) +} + +// TokenSecret implements structs.RPCInfo +func (msg *%[1]s) TokenSecret() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + return msg.%[2]s.TokenSecret() +} + +// Token implements structs.RPCInfo +func (msg *%[1]s) Token() string { + if msg.%[2]s == nil { + return "" + } + return msg.%[2]s.Token +} +// GetToken is required to implement blockingQueryOptions +func (msg *%[1]s) GetToken() string { + if msg == nil || msg.%[2]s == nil { + return "" + } + + return msg.%[2]s.GetToken() +} +// GetMinQueryIndex is required to implement blockingQueryOptions +func (msg *%[1]s) GetMinQueryIndex() uint64 { + if msg == nil || msg.%[2]s == nil { + return 0 + } + + return msg.%[2]s.GetMinQueryIndex() +} +// GetMaxQueryTime is required to implement blockingQueryOptions +func (msg *%[1]s) GetMaxQueryTime() (time.Duration, error) { + if msg == nil || msg.%[2]s == nil { + return 0, nil + } + + return structs.DurationFromProto(msg.%[2]s.GetMaxQueryTime()), nil +} + +// GetRequireConsistent is required to implement blockingQueryOptions +func (msg *%[1]s) GetRequireConsistent() bool { + if msg == nil || msg.%[2]s == nil { + return false + } + return msg.%[2]s.RequireConsistent +} +` + +const tmplQueryMeta = ` +// SetLastContact is required to implement blockingQueryResponseMeta +func (msg *%[1]s) SetLastContact(d time.Duration) { + if msg == nil || msg.%[2]s == nil { + return + } + msg.%[2]s.SetLastContact(d) +} + +// SetKnownLeader is required to implement blockingQueryResponseMeta +func (msg *%[1]s) SetKnownLeader(b bool) { + if msg == nil || msg.%[2]s == nil { + return + } + msg.%[2]s.SetKnownLeader(b) +} + +// GetIndex is required to implement blockingQueryResponseMeta +func (msg *%[1]s) GetIndex() uint64 { + if msg == nil || msg.%[2]s == nil { + return 0 + } + return msg.%[2]s.GetIndex() +} + +// SetIndex is required to implement blockingQueryResponseMeta +func (msg *%[1]s) SetIndex(i uint64) { + if msg == nil || msg.%[2]s == nil { + return + } + msg.%[2]s.SetIndex(i) +} + +// SetResultsFilteredByACLs is required to implement blockingQueryResponseMeta +func (msg *%[1]s) SetResultsFilteredByACLs(b bool) { + if msg == nil || msg.%[2]s == nil { + return + } + msg.%[2]s.SetResultsFilteredByACLs(b) +} +` From 8fd73ede3ec92b5cc3add5fb19f616991309c6bc Mon Sep 17 00:00:00 2001 From: Eric Date: Mon, 28 Mar 2022 13:56:44 -0400 Subject: [PATCH 030/785] remove gogo from acl protobufs --- agent/consul/state/acl.go | 10 +++---- agent/consul/state/acl_test.go | 8 ++--- agent/http.go | 36 ++++++++++++++++++++--- agent/structs/protobuf_compat.go | 28 ------------------ agent/structs/structs.go | 18 ------------ api/namespace.go | 46 +++++++++++++++++++++++++++++ proto/pbacl/acl.go | 19 ++++++++++++ proto/pbacl/acl.pb.go | 50 +++++++++++++++++++++++--------- proto/pbacl/acl.proto | 19 ++---------- proto/pbcommon/common.go | 29 ++++++++++++++++++ proto/prototest/testing.go | 14 +++++++++ 11 files changed, 188 insertions(+), 89 deletions(-) create mode 100644 proto/pbacl/acl.go create mode 100644 proto/prototest/testing.go diff --git a/agent/consul/state/acl.go b/agent/consul/state/acl.go index 9b84c6c16..877037fe2 100644 --- a/agent/consul/state/acl.go +++ b/agent/consul/state/acl.go @@ -126,7 +126,7 @@ func (s *Store) CanBootstrapACLToken() (bool, uint64, error) { // to update the name. Unlike the older functions to operate specifically on role or policy links // this function does not itself handle the case where the id cannot be found. Instead the // getName function should handle that and return an error if necessary -func resolveACLLinks(tx ReadTxn, links []pbacl.ACLLink, getName func(ReadTxn, string) (string, error)) (int, error) { +func resolveACLLinks(tx ReadTxn, links []*pbacl.ACLLink, getName func(ReadTxn, string) (string, error)) (int, error) { var numValid int for linkIndex, link := range links { if link.ID != "" { @@ -152,12 +152,12 @@ func resolveACLLinks(tx ReadTxn, links []pbacl.ACLLink, getName func(ReadTxn, st // associated with the ID of the link. Ideally this will be a no-op if the names are already correct // however if a linked resource was renamed it might be stale. This function will treat the incoming // links with copy-on-write semantics and its output will indicate whether any modifications were made. -func fixupACLLinks(tx ReadTxn, original []pbacl.ACLLink, getName func(ReadTxn, string) (string, error)) ([]pbacl.ACLLink, bool, error) { +func fixupACLLinks(tx ReadTxn, original []*pbacl.ACLLink, getName func(ReadTxn, string) (string, error)) ([]*pbacl.ACLLink, bool, error) { owned := false links := original - cloneLinks := func(l []pbacl.ACLLink, copyNumLinks int) []pbacl.ACLLink { - clone := make([]pbacl.ACLLink, copyNumLinks) + cloneLinks := func(l []*pbacl.ACLLink, copyNumLinks int) []*pbacl.ACLLink { + clone := make([]*pbacl.ACLLink, copyNumLinks) copy(clone, l[:copyNumLinks]) return clone } @@ -183,7 +183,7 @@ func fixupACLLinks(tx ReadTxn, original []pbacl.ACLLink, getName func(ReadTxn, s } // append the corrected link - links = append(links, pbacl.ACLLink{ID: link.ID, Name: name}) + links = append(links, &pbacl.ACLLink{ID: link.ID, Name: name}) } else if owned { links = append(links, link) } diff --git a/agent/consul/state/acl_test.go b/agent/consul/state/acl_test.go index c86527cd1..358b1dea8 100644 --- a/agent/consul/state/acl_test.go +++ b/agent/consul/state/acl_test.go @@ -4110,7 +4110,7 @@ func TestStateStore_resolveACLLinks(t *testing.T) { tx := s.db.Txn(false) defer tx.Abort() - links := []pbacl.ACLLink{ + links := []*pbacl.ACLLink{ { Name: "foo", }, @@ -4133,7 +4133,7 @@ func TestStateStore_resolveACLLinks(t *testing.T) { tx := s.db.Txn(false) defer tx.Abort() - links := []pbacl.ACLLink{ + links := []*pbacl.ACLLink{ { ID: "b985e082-25d3-45a9-9dd8-fd1a41b83b0d", }, @@ -4166,7 +4166,7 @@ func TestStateStore_resolveACLLinks(t *testing.T) { tx := s.db.Txn(false) defer tx.Abort() - links := []pbacl.ACLLink{ + links := []*pbacl.ACLLink{ { ID: "b985e082-25d3-45a9-9dd8-fd1a41b83b0d", }, @@ -4186,7 +4186,7 @@ func TestStateStore_resolveACLLinks(t *testing.T) { func TestStateStore_fixupACLLinks(t *testing.T) { t.Parallel() - links := []pbacl.ACLLink{ + links := []*pbacl.ACLLink{ { ID: "40b57f86-97ea-40e4-a99a-c399cc81f4dd", Name: "foo", diff --git a/agent/http.go b/agent/http.go index e039c2c7c..ba0067b62 100644 --- a/agent/http.go +++ b/agent/http.go @@ -843,7 +843,7 @@ func serveHandlerWithHeaders(h http.Handler, headers map[string]string) http.Han // parseWait is used to parse the ?wait and ?index query params // Returns true on error -func parseWait(resp http.ResponseWriter, req *http.Request, b structs.QueryOptionsCompat) bool { +func parseWait(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool { query := req.URL.Query() if wait := query.Get("wait"); wait != "" { dur, err := time.ParseDuration(wait) @@ -868,7 +868,7 @@ func parseWait(resp http.ResponseWriter, req *http.Request, b structs.QueryOptio // parseCacheControl parses the CacheControl HTTP header value. So far we only // support maxage directive. -func parseCacheControl(resp http.ResponseWriter, req *http.Request, b structs.QueryOptionsCompat) bool { +func parseCacheControl(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool { raw := strings.ToLower(req.Header.Get("Cache-Control")) if raw == "" { @@ -926,7 +926,7 @@ func parseCacheControl(resp http.ResponseWriter, req *http.Request, b structs.Qu // parseConsistency is used to parse the ?stale and ?consistent query params. // Returns true on error -func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Request, b structs.QueryOptionsCompat) bool { +func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool { query := req.URL.Query() defaults := true if _, ok := query["stale"]; ok { @@ -1130,7 +1130,7 @@ func parseMetaPair(raw string) (string, string) { // parse is a convenience method for endpoints that need to use both parseWait // and parseDC. -func (s *HTTPHandlers) parse(resp http.ResponseWriter, req *http.Request, dc *string, b structs.QueryOptionsCompat) bool { +func (s *HTTPHandlers) parse(resp http.ResponseWriter, req *http.Request, dc *string, b QueryOptionsCompat) bool { s.parseDC(req, dc) var token string s.parseTokenWithDefault(req, &token) @@ -1190,3 +1190,31 @@ func getPathSuffixUnescaped(path string, prefixToTrim string) (string, error) { return suffixUnescaped, nil } + +func setMetaProtobuf(resp http.ResponseWriter, queryMeta *pbcommon.QueryMeta) { + qm := new(structs.QueryMeta) + pbcommon.QueryMetaToStructs(queryMeta, qm) + setMeta(resp, qm) +} + +type QueryOptionsCompat interface { + GetAllowStale() bool + SetAllowStale(bool) + + GetRequireConsistent() bool + SetRequireConsistent(bool) + + GetUseCache() bool + SetUseCache(bool) + + SetFilter(string) + SetToken(string) + + SetMustRevalidate(bool) + SetMaxAge(time.Duration) + SetMaxStaleDuration(time.Duration) + SetStaleIfError(time.Duration) + + SetMaxQueryTime(time.Duration) + SetMinQueryIndex(uint64) +} diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index 143bd97e3..860d971c3 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -4,34 +4,6 @@ import ( "time" ) -// QueryOptionsCompat is the interface that both the structs.QueryOptions -// and the proto/pbcommongogo.QueryOptions structs need to implement so that they -// can be operated on interchangeably -type QueryOptionsCompat interface { - GetToken() string - SetToken(string) - GetMinQueryIndex() uint64 - SetMinQueryIndex(uint64) - GetMaxQueryTime() (time.Duration, error) - SetMaxQueryTime(time.Duration) - GetAllowStale() bool - SetAllowStale(bool) - GetRequireConsistent() bool - SetRequireConsistent(bool) - GetUseCache() bool - SetUseCache(bool) - GetMaxStaleDuration() (time.Duration, error) - SetMaxStaleDuration(time.Duration) - GetMaxAge() (time.Duration, error) - SetMaxAge(time.Duration) - GetMustRevalidate() bool - SetMustRevalidate(bool) - GetStaleIfError() (time.Duration, error) - SetStaleIfError(time.Duration) - GetFilter() string - SetFilter(string) -} - // GetToken helps implement the QueryOptionsCompat interface // Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetToken() string { diff --git a/agent/structs/structs.go b/agent/structs/structs.go index ca4a7c849..96af7c471 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -2575,10 +2575,6 @@ type ProtoMarshaller interface { } func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) { - if marshaller, ok := message.(ProtoMarshaller); ok { - return EncodeProtoGogo(t, marshaller) - } - if marshaller, ok := message.(proto.Message); ok { return EncodeProto(t, marshaller) } @@ -2586,15 +2582,6 @@ func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) { return nil, fmt.Errorf("message does not implement the ProtoMarshaller interface: %T", message) } -func EncodeProtoGogo(t MessageType, message ProtoMarshaller) ([]byte, error) { - data := make([]byte, message.Size()+1) - data[0] = uint8(t) - if _, err := message.MarshalTo(data[1:]); err != nil { - return nil, err - } - return data, nil -} - func EncodeProto(t MessageType, pb proto.Message) ([]byte, error) { data := make([]byte, proto.Size(pb)+1) data[0] = uint8(t) @@ -2612,11 +2599,6 @@ func DecodeProto(buf []byte, pb proto.Message) error { return proto.Unmarshal(buf, pb) } -func DecodeProtoGogo(buf []byte, out ProtoMarshaller) error { - // Note that this assumes the leading byte indicating the type as already been stripped off. - return out.Unmarshal(buf) -} - // CompoundResponse is an interface for gathering multiple responses. It is // used in cross-datacenter RPC calls where more than 1 datacenter is // expected to reply. diff --git a/api/namespace.go b/api/namespace.go index bfc5aff17..65cc6f3f3 100644 --- a/api/namespace.go +++ b/api/namespace.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "fmt" "time" ) @@ -38,6 +39,25 @@ type Namespace struct { ModifyIndex uint64 `json:"ModifyIndex,omitempty"` } +func (n *Namespace) UnmarshalJSON(data []byte) error { + type Alias Namespace + aux := struct { + DeletedAtSnake *time.Time `json:"deleted_at"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.DeletedAt == nil && aux.DeletedAtSnake != nil { + n.DeletedAt = aux.DeletedAtSnake + } + + return nil +} + // NamespaceACLConfig is the Namespace specific ACL configuration container type NamespaceACLConfig struct { // PolicyDefaults is the list of policies that should be used for the parent authorizer @@ -48,6 +68,32 @@ type NamespaceACLConfig struct { RoleDefaults []ACLLink `json:"RoleDefaults" alias:"role_defaults"` } +func (n *NamespaceACLConfig) UnmarshalJSON(data []byte) error { + type Alias NamespaceACLConfig + aux := struct { + PolicyDefaultsSnake []ACLLink `json:"policy_defaults"` + RoleDefaultsSnake []ACLLink `json:"role_defaults"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.PolicyDefaults == nil { + for _, pd := range aux.PolicyDefaultsSnake { + n.PolicyDefaults = append(n.PolicyDefaults, pd) + } + } + if n.RoleDefaults == nil { + for _, pd := range aux.RoleDefaultsSnake { + n.RoleDefaults = append(n.RoleDefaults, pd) + } + } + return nil +} + // Namespaces can be used to manage Namespaces in Consul Enterprise.. type Namespaces struct { c *Client diff --git a/proto/pbacl/acl.go b/proto/pbacl/acl.go new file mode 100644 index 000000000..ec64d5929 --- /dev/null +++ b/proto/pbacl/acl.go @@ -0,0 +1,19 @@ +package pbacl + +import ( + "github.com/hashicorp/consul/api" +) + +func (a *ACLLink) ToAPI() api.ACLLink { + return api.ACLLink{ + ID: a.ID, + Name: a.Name, + } +} + +func ACLLinkFromAPI(a api.ACLLink) *ACLLink { + return &ACLLink{ + ID: a.ID, + Name: a.Name, + } +} diff --git a/proto/pbacl/acl.pb.go b/proto/pbacl/acl.pb.go index 6c52c8615..d91f00ed5 100644 --- a/proto/pbacl/acl.pb.go +++ b/proto/pbacl/acl.pb.go @@ -5,7 +5,6 @@ package pbacl import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" io "io" math "math" @@ -24,8 +23,12 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ACLLink struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty" hash:"ignore"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // @gotags: hash:ignore-" + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ACLLink) Reset() { *m = ACLLink{} } @@ -61,6 +64,20 @@ func (m *ACLLink) XXX_DiscardUnknown() { var xxx_messageInfo_ACLLink proto.InternalMessageInfo +func (m *ACLLink) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *ACLLink) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func init() { proto.RegisterType((*ACLLink)(nil), "acl.ACLLink") } @@ -68,19 +85,16 @@ func init() { func init() { proto.RegisterFile("proto/pbacl/acl.proto", fileDescriptor_ad2d2c73a6a0d8b5) } var fileDescriptor_ad2d2c73a6a0d8b5 = []byte{ - // 193 bytes of a gzipped FileDescriptorProto + // 145 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x48, 0x4a, 0x4c, 0xce, 0xd1, 0x4f, 0x4c, 0xce, 0xd1, 0x03, 0xf3, 0x85, 0x98, - 0x13, 0x93, 0x73, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0x21, 0xf2, 0x20, 0x16, 0x44, 0x4a, 0xc9, - 0x81, 0x8b, 0xdd, 0xd1, 0xd9, 0xc7, 0x27, 0x33, 0x2f, 0x5b, 0x88, 0x8f, 0x8b, 0xc9, 0xd3, 0x45, - 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x88, 0xc9, 0xd3, 0x45, 0x48, 0x95, 0x8b, 0xc5, 0x2f, 0x31, - 0x37, 0x55, 0x82, 0x09, 0x24, 0xe2, 0x24, 0xf8, 0xe9, 0x9e, 0x3c, 0x6f, 0x46, 0x62, 0x71, 0x86, - 0x95, 0x52, 0x66, 0x7a, 0x5e, 0x7e, 0x51, 0xaa, 0x52, 0x10, 0x58, 0xda, 0xc9, 0xf3, 0xc4, 0x43, - 0x39, 0x86, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, - 0x63, 0x39, 0x86, 0x19, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, - 0x4a, 0x3d, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x64, 0x42, 0x66, - 0x72, 0x7e, 0x51, 0x81, 0x7e, 0x72, 0x7e, 0x5e, 0x71, 0x69, 0x8e, 0x3e, 0x92, 0x8b, 0x93, 0xd8, - 0xc0, 0x1c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0x0f, 0xd4, 0xf1, 0xc7, 0x00, 0x00, + 0x13, 0x93, 0x73, 0x94, 0x74, 0xb9, 0xd8, 0x1d, 0x9d, 0x7d, 0x7c, 0x32, 0xf3, 0xb2, 0x85, 0xf8, + 0xb8, 0x98, 0x3c, 0x5d, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, + 0xb8, 0x58, 0xfc, 0x12, 0x73, 0x53, 0x25, 0x98, 0xc0, 0x22, 0x60, 0xb6, 0x93, 0xe5, 0x89, 0x47, + 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x94, + 0x7a, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x46, 0x62, 0x71, 0x46, + 0x66, 0x72, 0x7e, 0x51, 0x81, 0x7e, 0x72, 0x7e, 0x5e, 0x71, 0x69, 0x8e, 0x3e, 0x92, 0xc5, 0x49, + 0x6c, 0x60, 0x8e, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe6, 0xfd, 0xff, 0x8e, 0x00, 0x00, 0x00, } @@ -104,6 +118,10 @@ func (m *ACLLink) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) @@ -146,6 +164,9 @@ func (m *ACLLink) Size() (n int) { if l > 0 { n += 1 + l + sovAcl(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -260,6 +281,7 @@ func (m *ACLLink) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } diff --git a/proto/pbacl/acl.proto b/proto/pbacl/acl.proto index 60cfefb3d..58fa65ae1 100644 --- a/proto/pbacl/acl.proto +++ b/proto/pbacl/acl.proto @@ -4,21 +4,8 @@ package acl; option go_package = "github.com/hashicorp/consul/proto/pbacl"; - -// Go Modules now includes the version in the filepath for packages within GOPATH/pkg/mode -// Therefore unless we want to hardcode a version here like -// github.com/gogo/protobuf@v1.3.0/gogoproto/gogo.proto then the only other choice is to -// have a more relative import and pass the right import path to protoc. I don't like it -// but its necessary. -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; - message ACLLink { string ID = 1; - string Name = 2 - [(gogoproto.moretags) = "hash:\"ignore\""]; -} \ No newline at end of file + // @gotags: hash:ignore-" + string Name = 2; +} diff --git a/proto/pbcommon/common.go b/proto/pbcommon/common.go index 713089e48..f8211604d 100644 --- a/proto/pbcommon/common.go +++ b/proto/pbcommon/common.go @@ -147,3 +147,32 @@ func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, func (td TargetDatacenter) RequestDatacenter() string { return td.Datacenter } + +// SetLastContact is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetLastContact(lastContact time.Duration) { + q.LastContact = structs.DurationToProto(lastContact) +} + +// SetKnownLeader is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetKnownLeader(knownLeader bool) { + q.KnownLeader = knownLeader +} + +// SetIndex is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetIndex(index uint64) { + q.Index = index +} + +// SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { + q.ConsistencyLevel = consistencyLevel +} + +func (q *QueryMeta) GetBackend() structs.QueryBackend { + return structs.QueryBackend(0) +} + +// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat interface +func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { + q.ResultsFilteredByACLs = v +} diff --git a/proto/prototest/testing.go b/proto/prototest/testing.go new file mode 100644 index 000000000..5b5468ea6 --- /dev/null +++ b/proto/prototest/testing.go @@ -0,0 +1,14 @@ +package prototest + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func AssertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { + t.Helper() + if diff := cmp.Diff(x, y, opts...); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} From 2f84aabffed9757789ee3e92dbd01fc283dba3a5 Mon Sep 17 00:00:00 2001 From: Michele Degges Date: Mon, 28 Mar 2022 13:45:53 -0700 Subject: [PATCH 031/785] [RelAPI Onboarding] Add release API metadata file (#12591) --- .release/release-metadata.hcl | 6 ++++ .../content/docs/nia/release-notes/index.mdx | 12 +++++++ website/content/docs/releases/index.mdx | 10 ++++++ .../docs/releases/release-notes/index.mdx | 12 +++++++ .../release-notes/v1_10_0.mdx} | 0 .../release-notes/v1_11_0.mdx} | 0 .../release-notes/v1_9_0.mdx} | 0 website/data/docs-nav-data.json | 35 ++++++++++++++----- website/redirects.next.js | 15 ++++++++ 9 files changed, 81 insertions(+), 9 deletions(-) create mode 100644 .release/release-metadata.hcl create mode 100644 website/content/docs/nia/release-notes/index.mdx create mode 100644 website/content/docs/releases/index.mdx create mode 100644 website/content/docs/releases/release-notes/index.mdx rename website/content/docs/{release-notes/1-10-0.mdx => releases/release-notes/v1_10_0.mdx} (100%) rename website/content/docs/{release-notes/1-11-0.mdx => releases/release-notes/v1_11_0.mdx} (100%) rename website/content/docs/{release-notes/1-9-0.mdx => releases/release-notes/v1_9_0.mdx} (100%) diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl new file mode 100644 index 000000000..fedd5c53f --- /dev/null +++ b/.release/release-metadata.hcl @@ -0,0 +1,6 @@ +url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/consul" +url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/consul" +url_license = "https://github.com/hashicorp/consul/blob/main/LICENSE" +url_project_website = "https://www.consul.io" +url_release_notes = "https://www.consul.io/docs/release-notes" +url_source_repository = "https://github.com/hashicorp/consul" \ No newline at end of file diff --git a/website/content/docs/nia/release-notes/index.mdx b/website/content/docs/nia/release-notes/index.mdx new file mode 100644 index 000000000..f6936d891 --- /dev/null +++ b/website/content/docs/nia/release-notes/index.mdx @@ -0,0 +1,12 @@ +--- +layout: docs +page_title: Release Notes +description: |- + Consul-Terraform-Sync release notes +--- + +# Release Notes + +The side bar to the left has release notes for all major releases of CTS. + +Documentation for patch releases is available at the [Consul-Terraform-Sync changelog](https://github.com/hashicorp/consul-terraform-sync/blob/main/CHANGELOG.md). diff --git a/website/content/docs/releases/index.mdx b/website/content/docs/releases/index.mdx new file mode 100644 index 000000000..0dae70591 --- /dev/null +++ b/website/content/docs/releases/index.mdx @@ -0,0 +1,10 @@ +--- +layout: docs +page_title: Releases +description: |- + Consul releases +--- + +# Downloads + +Downloads of Consul can be found on the [HashiCorp Release Page](https://github.com/hashicorp/consul/releases/). diff --git a/website/content/docs/releases/release-notes/index.mdx b/website/content/docs/releases/release-notes/index.mdx new file mode 100644 index 000000000..839fe0bd2 --- /dev/null +++ b/website/content/docs/releases/release-notes/index.mdx @@ -0,0 +1,12 @@ +--- +layout: docs +page_title: Release Notes +description: |- + Consul release notes +--- + +# Release Notes + +The side bar to the left has release notes for all major releases of Consul. + +Documentation for patch releases (0.1.x) is available at the [Consul changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md). diff --git a/website/content/docs/release-notes/1-10-0.mdx b/website/content/docs/releases/release-notes/v1_10_0.mdx similarity index 100% rename from website/content/docs/release-notes/1-10-0.mdx rename to website/content/docs/releases/release-notes/v1_10_0.mdx diff --git a/website/content/docs/release-notes/1-11-0.mdx b/website/content/docs/releases/release-notes/v1_11_0.mdx similarity index 100% rename from website/content/docs/release-notes/1-11-0.mdx rename to website/content/docs/releases/release-notes/v1_11_0.mdx diff --git a/website/content/docs/release-notes/1-9-0.mdx b/website/content/docs/releases/release-notes/v1_9_0.mdx similarity index 100% rename from website/content/docs/release-notes/1-9-0.mdx rename to website/content/docs/releases/release-notes/v1_9_0.mdx diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index d0097e387..2f5934a12 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -818,6 +818,10 @@ { "title": "Release Notes", "routes": [ + { + "title": "Overview", + "path": "nia/release-notes" + }, { "title": "v0.5.0", "path": "nia/release-notes/0-5-0" @@ -1145,19 +1149,32 @@ ] }, { - "title": "Release Notes", + "title": "Releases", "routes": [ { - "title": "1.11.0", - "path": "release-notes/1-11-0" + "title": "Overview", + "path": "releases" }, { - "title": "1.10.0", - "path": "release-notes/1-10-0" - }, - { - "title": "1.9.0", - "path": "release-notes/1-9-0" + "title": "Release Notes", + "routes": [ + { + "title": "Overview", + "path": "releases/release-notes" + }, + { + "title": "v1.11.0", + "path": "releases/release-notes/v1_11_0" + }, + { + "title": "v1.10.0", + "path": "releases/release-notes/v1_10_0" + }, + { + "title": "v1.9.0", + "path": "releases/release-notes/v1_9_0" + } + ] } ] }, diff --git a/website/redirects.next.js b/website/redirects.next.js index 00c51dab3..484d912a1 100644 --- a/website/redirects.next.js +++ b/website/redirects.next.js @@ -1239,4 +1239,19 @@ module.exports = [ destination: '/docs/k8s/operations/tls-on-existing-cluster', permanent: true, }, + { + source: '/docs/release-notes/1-11-0', + destination: '/docs/releases/release-notes/v1_11_0', + permanent: true, + }, + { + source: '/docs/release-notes/1-10-0', + destination: '/docs/releases/release-notes/v1_10_0', + permanent: true, + }, + { + source: '/docs/release-notes/1-9-0', + destination: '/docs/releases/release-notes/v1_9_0', + permanent: true, + }, ] From ab5b5e85f50da64faee174f0d714d10909473325 Mon Sep 17 00:00:00 2001 From: Eric Date: Mon, 28 Mar 2022 17:17:50 -0400 Subject: [PATCH 032/785] remove the rest of gogo --- .circleci/config.yml | 4 +- GNUmakefile | 4 +- .../private/internal/testservice/simple.pb.go | 483 +--- .../services/subscribe/subscribe_test.go | 7 +- agent/rpcclient/health/view_test.go | 5 +- agent/structs/protobuf_compat.go | 29 - agent/structs/structs.go | 20 - agent/submatview/store_test.go | 12 +- build-support/scripts/proto-gen-entry.sh | 191 +- build-support/scripts/proto-gen-no-gogo.sh | 169 -- build-support/scripts/proto-gen.sh | 74 +- go.mod | 2 +- proto/pbacl/acl.pb.go | 314 +-- proto/pbcommongogo/common.gen.go | 70 - proto/pbcommongogo/common.go | 303 --- proto/pbcommongogo/common.pb.binary.go | 78 - proto/pbcommongogo/common.pb.go | 2036 ----------------- proto/pbcommongogo/common.proto | 182 -- proto/pbcommongogo/common_oss.go | 25 - proto/pbservice/convert_pbstruct.go | 1 - proto/pbutil/pbutil.go | 23 - proto/translate.go | 68 - proto/translate_test.go | 86 - 23 files changed, 273 insertions(+), 3913 deletions(-) mode change 100644 => 100755 build-support/scripts/proto-gen-entry.sh delete mode 100755 build-support/scripts/proto-gen-no-gogo.sh delete mode 100644 proto/pbcommongogo/common.gen.go delete mode 100644 proto/pbcommongogo/common.go delete mode 100644 proto/pbcommongogo/common.pb.binary.go delete mode 100644 proto/pbcommongogo/common.pb.go delete mode 100644 proto/pbcommongogo/common.proto delete mode 100644 proto/pbcommongogo/common_oss.go delete mode 100644 proto/pbutil/pbutil.go delete mode 100644 proto/translate.go delete mode 100644 proto/translate_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index e637d0d08..db57fb786 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -235,11 +235,9 @@ jobs: sudo chmod -R a+Xr /usr/local/include/google/ rm protoc-*.zip - run: - name: Install gogo/protobuf + name: Install protobuf command: | - gogo_version=$(go list -m github.com/gogo/protobuf | awk '{print $2}') go install -v github.com/hashicorp/protoc-gen-go-binary@master - go install -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version} go install -v github.com/favadi/protoc-go-inject-tag@v1.3.0 go install -v github.com/golang/protobuf/protoc-gen-go@v1.3.5 diff --git a/GNUmakefile b/GNUmakefile index 578e826c5..fa9fcb600 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -2,13 +2,11 @@ # https://www.consul.io/docs/install#compiling-from-source SHELL = bash -GOGOVERSION?=$(shell grep github.com/gogo/protobuf go.mod | awk '{print $$2}') GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ golang.org/x/tools/cmd/cover@master \ golang.org/x/tools/cmd/stringer@master \ - github.com/gogo/protobuf/protoc-gen-gofast@$(GOGOVERSION) \ github.com/hashicorp/protoc-gen-go-binary@master \ github.com/vektra/mockery/cmd/mockery@master \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ @@ -347,7 +345,7 @@ proto: $(PROTOGOFILES) $(PROTOGOBINFILES) %.pb.go %.pb.binary.go: %.proto - @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen-entry.sh --grpc --import-replace "$<" + @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --import-replace "$<" .PHONY: module-versions # Print a list of modules which can be updated. diff --git a/agent/grpc/private/internal/testservice/simple.pb.go b/agent/grpc/private/internal/testservice/simple.pb.go index dc2835664..bfd847a28 100644 --- a/agent/grpc/private/internal/testservice/simple.pb.go +++ b/agent/grpc/private/internal/testservice/simple.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: agent/grpc/private/internal/testservice/simple.proto package testservice @@ -10,9 +10,7 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - io "io" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -39,26 +37,18 @@ func (*Req) ProtoMessage() {} func (*Req) Descriptor() ([]byte, []int) { return fileDescriptor_98af0751f806f450, []int{0} } + func (m *Req) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Req.Unmarshal(m, b) } func (m *Req) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Req.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Req.Marshal(b, m, deterministic) } func (m *Req) XXX_Merge(src proto.Message) { xxx_messageInfo_Req.Merge(m, src) } func (m *Req) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Req.Size(m) } func (m *Req) XXX_DiscardUnknown() { xxx_messageInfo_Req.DiscardUnknown(m) @@ -87,26 +77,18 @@ func (*Resp) ProtoMessage() {} func (*Resp) Descriptor() ([]byte, []int) { return fileDescriptor_98af0751f806f450, []int{1} } + func (m *Resp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_Resp.Unmarshal(m, b) } func (m *Resp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_Resp.Marshal(b, m, deterministic) } func (m *Resp) XXX_Merge(src proto.Message) { xxx_messageInfo_Resp.Merge(m, src) } func (m *Resp) XXX_Size() int { - return m.Size() + return xxx_messageInfo_Resp.Size(m) } func (m *Resp) XXX_DiscardUnknown() { xxx_messageInfo_Resp.DiscardUnknown(m) @@ -138,30 +120,28 @@ func init() { } var fileDescriptor_98af0751f806f450 = []byte{ - // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x49, 0x4c, 0x4f, 0xcd, - 0x2b, 0xd1, 0x4f, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x28, 0xca, 0x2c, 0x4b, 0x2c, 0x49, 0xd5, 0xcf, - 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x2f, 0x49, 0x2d, 0x2e, 0x29, 0x4e, 0x2d, 0x2a, - 0xcb, 0x4c, 0x4e, 0xd5, 0x2f, 0xce, 0xcc, 0x2d, 0xc8, 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0xe2, 0x46, 0x92, 0x51, 0x52, 0xe5, 0x62, 0x0e, 0x4a, 0x2d, 0x14, 0x92, 0xe3, 0xe2, 0x72, - 0x49, 0x2c, 0x49, 0x4c, 0x4e, 0x05, 0xe9, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x42, 0x12, - 0x51, 0x72, 0xe3, 0x62, 0x09, 0x4a, 0x2d, 0x2e, 0x00, 0xa9, 0x0b, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, - 0xf2, 0x4b, 0xcc, 0x4d, 0x85, 0xa9, 0x43, 0x88, 0xa0, 0x99, 0xc3, 0x84, 0x6e, 0x8e, 0x51, 0x2e, - 0x17, 0x5b, 0x30, 0xd8, 0x2d, 0x42, 0x46, 0x5c, 0x9c, 0xc1, 0xf9, 0xb9, 0xa9, 0x25, 0x19, 0x99, - 0x79, 0xe9, 0x42, 0x02, 0x7a, 0x48, 0x6e, 0xd2, 0x0b, 0x4a, 0x2d, 0x94, 0x12, 0x44, 0x13, 0x29, - 0x2e, 0x50, 0x62, 0x10, 0xd2, 0xe7, 0x62, 0x71, 0xcb, 0xc9, 0x2f, 0x27, 0x52, 0xb9, 0x01, 0xa3, - 0x93, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, - 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0x38, 0x0c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x76, 0xce, - 0x88, 0x7d, 0x3b, 0x01, 0x00, 0x00, + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xcd, 0x0a, 0x82, 0x40, + 0x14, 0x85, 0xb3, 0x44, 0xf0, 0xb6, 0xa9, 0x59, 0x45, 0x8b, 0x08, 0x21, 0x68, 0xe5, 0x84, 0xf5, + 0x08, 0xe1, 0xb2, 0xc5, 0xf8, 0x04, 0x93, 0x5c, 0x6c, 0xc0, 0xf9, 0x71, 0xe6, 0x62, 0xaf, 0x1f, + 0x0a, 0x91, 0xb8, 0x6a, 0xfb, 0xdd, 0xef, 0x1e, 0xce, 0x81, 0x9b, 0x6c, 0xd0, 0x10, 0x6f, 0xbc, + 0xab, 0xb9, 0xf3, 0xaa, 0x97, 0x84, 0x5c, 0x19, 0x42, 0x6f, 0x64, 0xcb, 0x09, 0x03, 0x05, 0xf4, + 0xbd, 0xaa, 0x91, 0x07, 0xa5, 0x5d, 0x8b, 0xb9, 0xf3, 0x96, 0x2c, 0x5b, 0x4f, 0x2e, 0xd9, 0x09, + 0x56, 0x02, 0x3b, 0x76, 0x00, 0xb8, 0x4b, 0x92, 0x35, 0x0e, 0xdf, 0xbb, 0xe8, 0x18, 0x9d, 0x53, + 0x31, 0x21, 0x59, 0x09, 0xb1, 0xc0, 0xe0, 0x06, 0xaf, 0x42, 0xdf, 0xa3, 0x7f, 0x48, 0x8d, 0x5f, + 0xef, 0x47, 0x66, 0x39, 0xcb, 0x79, 0x4e, 0xa1, 0x21, 0xa9, 0xc6, 0x2e, 0xac, 0x80, 0xb4, 0xb2, + 0x1a, 0xe9, 0xa5, 0x4c, 0xc3, 0x36, 0xf9, 0xa4, 0x53, 0x2e, 0xb0, 0xdb, 0x6f, 0x67, 0x24, 0xb8, + 0x6c, 0xc1, 0x38, 0xc4, 0x65, 0x6b, 0xdf, 0x7f, 0xea, 0x97, 0xe8, 0x99, 0x8c, 0x8b, 0xaf, 0x9f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x5c, 0xf5, 0xcb, 0x29, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // SimpleClient is the client API for Simple service. // @@ -172,10 +152,10 @@ type SimpleClient interface { } type simpleClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSimpleClient(cc *grpc.ClientConn) SimpleClient { +func NewSimpleClient(cc grpc.ClientConnInterface) SimpleClient { return &simpleClient{cc} } @@ -298,414 +278,3 @@ var _Simple_serviceDesc = grpc.ServiceDesc{ }, Metadata: "agent/grpc/private/internal/testservice/simple.proto", } - -func (m *Req) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Req) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Req) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintSimple(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Resp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintSimple(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0x12 - } - if len(m.ServerName) > 0 { - i -= len(m.ServerName) - copy(dAtA[i:], m.ServerName) - i = encodeVarintSimple(dAtA, i, uint64(len(m.ServerName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSimple(dAtA []byte, offset int, v uint64) int { - offset -= sovSimple(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Req) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovSimple(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Resp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ServerName) - if l > 0 { - n += 1 + l + sovSimple(uint64(l)) - } - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovSimple(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSimple(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSimple(x uint64) (n int) { - return sovSimple(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Req) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSimple - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Req: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Req: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSimple - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSimple - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSimple - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSimple(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSimple - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Resp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSimple - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSimple - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSimple - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSimple - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSimple - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSimple - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSimple - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSimple(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSimple - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSimple(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSimple - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSimple - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSimple - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSimple - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSimple - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSimple - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSimple = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSimple = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSimple = fmt.Errorf("proto: unexpected end of group") -) diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index 0a52c0f49..aea7669c9 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -26,7 +26,6 @@ import ( grpc "github.com/hashicorp/consul/agent/grpc/private" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/proto/pbcommongogo" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -124,7 +123,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { streamHandle, err := streamClient.Subscribe(ctx, &pbsubscribe.SubscribeRequest{ Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis", - Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) @@ -489,7 +488,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis", Datacenter: "dc2", - Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) go recvEvents(chEvents, streamHandle) @@ -746,7 +745,7 @@ node "node1" { Topic: pbsubscribe.Topic_ServiceHealth, Key: "foo", Token: token, - Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, }) require.NoError(t, err) diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index 9dc00150f..96bae37a1 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/hashicorp/consul/proto/pbcommon" "strings" "testing" "time" @@ -18,7 +17,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/submatview" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" @@ -75,7 +74,7 @@ func TestHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T) { t.Skip("too slow for testing.Short") } - namespace := getNamespace(pbcommongogo.DefaultEnterpriseMeta.Namespace) + namespace := getNamespace(pbcommon.DefaultEnterpriseMeta.Namespace) streamClient := newStreamClient(validateNamespace(namespace)) ctx, cancel := context.WithCancel(context.Background()) diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index 860d971c3..e3a01cadf 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -5,7 +5,6 @@ import ( ) // GetToken helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetToken() string { if m != nil { return m.Token @@ -14,7 +13,6 @@ func (m *QueryOptions) GetToken() string { } // GetMinQueryIndex helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMinQueryIndex() uint64 { if m != nil { return m.MinQueryIndex @@ -23,7 +21,6 @@ func (m *QueryOptions) GetMinQueryIndex() uint64 { } // GetMaxQueryTime helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxQueryTime() (time.Duration, error) { if m != nil { return m.MaxQueryTime, nil @@ -32,7 +29,6 @@ func (m *QueryOptions) GetMaxQueryTime() (time.Duration, error) { } // GetAllowStale helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetAllowStale() bool { if m != nil { return m.AllowStale @@ -41,7 +37,6 @@ func (m *QueryOptions) GetAllowStale() bool { } // GetRequireConsistent helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetRequireConsistent() bool { if m != nil { return m.RequireConsistent @@ -50,7 +45,6 @@ func (m *QueryOptions) GetRequireConsistent() bool { } // GetUseCache helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetUseCache() bool { if m != nil { return m.UseCache @@ -59,7 +53,6 @@ func (m *QueryOptions) GetUseCache() bool { } // GetMaxStaleDuration helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { if m != nil { return m.MaxStaleDuration, nil @@ -68,7 +61,6 @@ func (m *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { } // GetMaxAge helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMaxAge() (time.Duration, error) { if m != nil { return m.MaxAge, nil @@ -77,7 +69,6 @@ func (m *QueryOptions) GetMaxAge() (time.Duration, error) { } // GetMustRevalidate helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetMustRevalidate() bool { if m != nil { return m.MustRevalidate @@ -86,7 +77,6 @@ func (m *QueryOptions) GetMustRevalidate() bool { } // GetStaleIfError helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetStaleIfError() (time.Duration, error) { if m != nil { return m.StaleIfError, nil @@ -95,7 +85,6 @@ func (m *QueryOptions) GetStaleIfError() (time.Duration, error) { } // GetFilter helps implement the QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryOptions) GetFilter() string { if m != nil { return m.Filter @@ -104,67 +93,56 @@ func (m *QueryOptions) GetFilter() string { } // SetToken is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetToken(token string) { q.Token = token } // SetMinQueryIndex is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { q.MinQueryIndex = minQueryIndex } // SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { q.MaxQueryTime = maxQueryTime } // SetAllowStale is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetAllowStale(allowStale bool) { q.AllowStale = allowStale } // SetRequireConsistent is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetRequireConsistent(requireConsistent bool) { q.RequireConsistent = requireConsistent } // SetUseCache is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetUseCache(useCache bool) { q.UseCache = useCache } // SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { q.MaxStaleDuration = maxStaleDuration } // SetMaxAge is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { q.MaxAge = maxAge } // SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { q.MustRevalidate = mustRevalidate } // SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { q.StaleIfError = staleIfError } // SetFilter is needed to implement the structs.QueryOptionsCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryOptions) SetFilter(filter string) { q.Filter = filter } @@ -178,7 +156,6 @@ func (m *QueryMeta) GetIndex() uint64 { } // GetLastContact helps implement the QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetLastContact() (time.Duration, error) { if m != nil { return m.LastContact, nil @@ -187,7 +164,6 @@ func (m *QueryMeta) GetLastContact() (time.Duration, error) { } // GetKnownLeader helps implement the QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetKnownLeader() bool { if m != nil { return m.KnownLeader @@ -196,7 +172,6 @@ func (m *QueryMeta) GetKnownLeader() bool { } // GetConsistencyLevel helps implement the QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.pb.go func (m *QueryMeta) GetConsistencyLevel() string { if m != nil { return m.ConsistencyLevel @@ -205,25 +180,21 @@ func (m *QueryMeta) GetConsistencyLevel() string { } // SetLastContact is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetLastContact(lastContact time.Duration) { q.LastContact = lastContact } // SetKnownLeader is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetKnownLeader(knownLeader bool) { q.KnownLeader = knownLeader } // SetIndex is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetIndex(index uint64) { q.Index = index } // SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface -// Copied from proto/pbcommongogo/common.go func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { q.ConsistencyLevel = consistencyLevel } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 96af7c471..80f27dd95 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -22,7 +22,6 @@ import ( "github.com/hashicorp/serf/coordinate" "github.com/mitchellh/hashstructure" - gtype "github.com/gogo/protobuf/types" ptypes "github.com/golang/protobuf/ptypes" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" @@ -2686,25 +2685,6 @@ func (m MessageType) String() string { } -func DurationToProtoGogo(d time.Duration) gtype.Duration { - return *gtype.DurationProto(d) -} - -func DurationFromProtoGogo(d gtype.Duration) time.Duration { - duration, _ := gtype.DurationFromProto(&d) - return duration -} - -func TimeFromProtoGogo(s *gtype.Timestamp) time.Time { - time, _ := gtype.TimestampFromProto(s) - return time -} - -func TimeToProtoGogo(s time.Time) *gtype.Timestamp { - proto, _ := gtype.TimestampProto(s) - return proto -} - func DurationToProto(d time.Duration) *duration.Duration { return ptypes.DurationProto(d) } diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index 2055cf911..93b04d1e8 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/lib/ttlcache" - "github.com/hashicorp/consul/proto/pbcommongogo" + "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -25,7 +25,7 @@ func TestStore_Get(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( newEndOfSnapshotEvent(2), @@ -232,7 +232,7 @@ func (r *fakeRequest) NewMaterializer() (*Materializer, error) { Token: "abcd", Datacenter: "dc1", Index: index, - Namespace: pbcommongogo.DefaultEnterpriseMeta.Namespace, + Namespace: pbcommon.DefaultEnterpriseMeta.Namespace, } return req }, @@ -292,7 +292,7 @@ func TestStore_Notify(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( newEndOfSnapshotEvent(2), @@ -361,7 +361,7 @@ func TestStore_Notify_ManyRequests(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) @@ -473,7 +473,7 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { go store.Run(ctx) req := &fakeRequest{ - client: NewTestStreamingClient(pbcommongogo.DefaultEnterpriseMeta.Namespace), + client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) diff --git a/build-support/scripts/proto-gen-entry.sh b/build-support/scripts/proto-gen-entry.sh old mode 100644 new mode 100755 index 639f725cc..50d51be0d --- a/build-support/scripts/proto-gen-entry.sh +++ b/build-support/scripts/proto-gen-entry.sh @@ -1,26 +1,169 @@ #!/usr/bin/env bash -FILENAME=$3 -echo $PWD -if [[ "$FILENAME" =~ .*pbcommon/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -elif [[ "$FILENAME" =~ .*pbconnect/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -elif [[ "$FILENAME" =~ .*pbconfig/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -elif [[ "$FILENAME" =~ .*pbautoconf/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -elif [[ "$FILENAME" =~ .*pbservice/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -elif [[ "$FILENAME" =~ .*pbsubscribe/.* ]]; then - echo "$FILENAME no gogo" - ./build-support/scripts/proto-gen-no-gogo.sh $1 $2 $3 -else - echo "$FILENAME gogo" - ./build-support/scripts/proto-gen.sh $1 $2 $3 -fi \ No newline at end of file +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Description: + Generate the Go files from protobuf definitions. In addition to + running the protoc generator it will also fixup build tags in the + generated code. + +Options: + --import-replace Replace imports of google types with those from the protobuf repo. + --grpc Enable the gRPC plugin + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + local -i grpc=0 + local -i imp_replace=0 + local proto_path= + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + --grpc ) + grpc=1 + shift + ;; + --import-replace ) + imp_replace=1 + shift + ;; + * ) + proto_path="$1" + shift + ;; + esac + done + + if test -z "${proto_path}" + then + err_usage "ERROR: No proto file specified" + return 1 + fi + + go mod download + + local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) + local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") + + + local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" + golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" + + local proto_go_path=${proto_path%%.proto}.pb.go + local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go + local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go + + local go_proto_out="paths=source_relative" + if is_set "${grpc}" + then + go_proto_out="${go_proto_out},plugins=grpc" + fi + + if is_set "${imp_replace}" + then + go_proto_out="${go_proto_out},${golang_proto_imp_replace}" + fi + + if test -n "${go_proto_out}" + then + go_proto_out="${go_proto_out}:" + fi + + # How we run protoc probably needs some documentation. + # + # This is the path to where + # -I="${golang_proto_path}/protobuf" \ + local -i ret=0 + status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} (NO GOGO)" + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" + debug_run protoc \ + -I="${golang_proto_path}" \ + -I="${golang_proto_mod_path}" \ + -I="${SOURCE_DIR}" \ + --go_out="${go_proto_out}${SOURCE_DIR}" \ + --go-binary_out="${SOURCE_DIR}" \ + "${proto_path}" + + if test $? -ne 0 + then + err "Failed to run protoc for ${proto_path}" + return 1 + fi + + debug_run protoc-go-inject-tag \ + -input="${proto_go_path}" + + if test $? -ne 0 + then + err "Failed to run protoc-go-inject-tag for ${proto_path}" + return 1 + fi + + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" + + BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') + if test -n "${BUILD_TAGS}" + then + echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" + cat "${proto_go_path}" >> "${proto_go_path}.new" + mv "${proto_go_path}.new" "${proto_go_path}" + + echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" + cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" + mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" + fi + + # note: this has to run after we fix up the build tags above + rm -f "${proto_go_rpcglue_path}" + debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" + if test $? -ne 0 + then + err "Failed to generate consul rpc glue outputs from ${proto_path}" + return 1 + fi + + return 0 +} + +main "$@" +exit $? diff --git a/build-support/scripts/proto-gen-no-gogo.sh b/build-support/scripts/proto-gen-no-gogo.sh deleted file mode 100755 index 50d51be0d..000000000 --- a/build-support/scripts/proto-gen-no-gogo.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null - -source "${SCRIPT_DIR}/functions.sh" - -function usage { -cat <<-EOF -Usage: ${SCRIPT_NAME} [] - -Description: - Generate the Go files from protobuf definitions. In addition to - running the protoc generator it will also fixup build tags in the - generated code. - -Options: - --import-replace Replace imports of google types with those from the protobuf repo. - --grpc Enable the gRPC plugin - -h | --help Print this help text. -EOF -} - -function err_usage { - err "$1" - err "" - err "$(usage)" -} - -function main { - local -i grpc=0 - local -i imp_replace=0 - local proto_path= - - while test $# -gt 0 - do - case "$1" in - -h | --help ) - usage - return 0 - ;; - --grpc ) - grpc=1 - shift - ;; - --import-replace ) - imp_replace=1 - shift - ;; - * ) - proto_path="$1" - shift - ;; - esac - done - - if test -z "${proto_path}" - then - err_usage "ERROR: No proto file specified" - return 1 - fi - - go mod download - - local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) - local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") - - - local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" - golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" - - local proto_go_path=${proto_path%%.proto}.pb.go - local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go - local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go - - local go_proto_out="paths=source_relative" - if is_set "${grpc}" - then - go_proto_out="${go_proto_out},plugins=grpc" - fi - - if is_set "${imp_replace}" - then - go_proto_out="${go_proto_out},${golang_proto_imp_replace}" - fi - - if test -n "${go_proto_out}" - then - go_proto_out="${go_proto_out}:" - fi - - # How we run protoc probably needs some documentation. - # - # This is the path to where - # -I="${golang_proto_path}/protobuf" \ - local -i ret=0 - status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} (NO GOGO)" - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" - debug_run protoc \ - -I="${golang_proto_path}" \ - -I="${golang_proto_mod_path}" \ - -I="${SOURCE_DIR}" \ - --go_out="${go_proto_out}${SOURCE_DIR}" \ - --go-binary_out="${SOURCE_DIR}" \ - "${proto_path}" - - if test $? -ne 0 - then - err "Failed to run protoc for ${proto_path}" - return 1 - fi - - debug_run protoc-go-inject-tag \ - -input="${proto_go_path}" - - if test $? -ne 0 - then - err "Failed to run protoc-go-inject-tag for ${proto_path}" - return 1 - fi - - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" - - BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') - if test -n "${BUILD_TAGS}" - then - echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" - cat "${proto_go_path}" >> "${proto_go_path}.new" - mv "${proto_go_path}.new" "${proto_go_path}" - - echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" - cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" - mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" - fi - - # note: this has to run after we fix up the build tags above - rm -f "${proto_go_rpcglue_path}" - debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" - if test $? -ne 0 - then - err "Failed to generate consul rpc glue outputs from ${proto_path}" - return 1 - fi - - return 0 -} - -main "$@" -exit $? diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh index 82230bb74..4ecf9acd8 100755 --- a/build-support/scripts/proto-gen.sh +++ b/build-support/scripts/proto-gen.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash + SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null SCRIPT_DIR=$(pwd) @@ -22,7 +23,7 @@ Description: generated code. Options: - --import-replace Replace imports of google types with those from the gogo/protobuf repo. + --import-replace Replace imports of google types with those from the protobuf repo. --grpc Enable the gRPC plugin -h | --help Print this help text. EOF @@ -67,21 +68,19 @@ function main { return 1 fi - local gogo_proto_path=$(go list -f '{{ .Dir }}' -m github.com/gogo/protobuf) - local gogo_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${gogo_proto_path}") + go mod download - local gogo_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/api/annotations.proto=github.com/gogo/googleapis/google/api" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types" - gogo_proto_imp_replace="${gogo_proto_imp_replace},Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types" + local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) + local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") + + + local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" + golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" local proto_go_path=${proto_path%%.proto}.pb.go local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go - + local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go + local go_proto_out="paths=source_relative" if is_set "${grpc}" then @@ -90,7 +89,7 @@ function main { if is_set "${imp_replace}" then - go_proto_out="${go_proto_out},${gogo_proto_imp_replace}" + go_proto_out="${go_proto_out},${golang_proto_imp_replace}" fi if test -n "${go_proto_out}" @@ -100,36 +99,69 @@ function main { # How we run protoc probably needs some documentation. # - # This is the path to where - # -I="${gogo_proto_path}/protobuf" \ + # This is the path to where + # -I="${golang_proto_path}/protobuf" \ local -i ret=0 status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path}" + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" debug_run protoc \ - -I="${gogo_proto_path}/protobuf" \ - -I="${gogo_proto_path}" \ - -I="${gogo_proto_mod_path}" \ + -I="${golang_proto_path}" \ + -I="${golang_proto_mod_path}" \ -I="${SOURCE_DIR}" \ - --gofast_out="${go_proto_out}${SOURCE_DIR}" \ + --go_out="${go_proto_out}${SOURCE_DIR}" \ --go-binary_out="${SOURCE_DIR}" \ "${proto_path}" + if test $? -ne 0 then - err "Failed to generate outputs from ${proto_path}" + err "Failed to run protoc for ${proto_path}" return 1 fi + debug_run protoc-go-inject-tag \ + -input="${proto_go_path}" + + if test $? -ne 0 + then + err "Failed to run protoc-go-inject-tag for ${proto_path}" + return 1 + fi + + echo "debug_run protoc \ + -I=\"${golang_proto_path}\" \ + -I=\"${golang_proto_mod_path}\" \ + -I=\"${SOURCE_DIR}\" \ + --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ + --go-binary_out=\"${SOURCE_DIR}\" \ + \"${proto_path}\"" + BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') if test -n "${BUILD_TAGS}" then echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" cat "${proto_go_path}" >> "${proto_go_path}.new" mv "${proto_go_path}.new" "${proto_go_path}" - + echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" fi + # note: this has to run after we fix up the build tags above + rm -f "${proto_go_rpcglue_path}" + debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" + if test $? -ne 0 + then + err "Failed to generate consul rpc glue outputs from ${proto_path}" + return 1 + fi + return 0 } diff --git a/go.mod b/go.mod index 628a30202..e438f1bf2 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/envoyproxy/go-control-plane v0.9.5 github.com/frankban/quicktest v1.11.0 // indirect github.com/fsnotify/fsnotify v1.5.1 - github.com/gogo/protobuf v1.3.2 + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.3.5 github.com/google/go-cmp v0.5.6 github.com/google/go-querystring v1.0.0 // indirect diff --git a/proto/pbacl/acl.pb.go b/proto/pbacl/acl.pb.go index d91f00ed5..baaa994e9 100644 --- a/proto/pbacl/acl.pb.go +++ b/proto/pbacl/acl.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pbacl/acl.proto package pbacl @@ -6,9 +6,7 @@ package pbacl import ( fmt "fmt" proto "github.com/golang/protobuf/proto" - io "io" math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -37,26 +35,18 @@ func (*ACLLink) ProtoMessage() {} func (*ACLLink) Descriptor() ([]byte, []int) { return fileDescriptor_ad2d2c73a6a0d8b5, []int{0} } + func (m *ACLLink) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return xxx_messageInfo_ACLLink.Unmarshal(m, b) } func (m *ACLLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ACLLink.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return xxx_messageInfo_ACLLink.Marshal(b, m, deterministic) } func (m *ACLLink) XXX_Merge(src proto.Message) { xxx_messageInfo_ACLLink.Merge(m, src) } func (m *ACLLink) XXX_Size() int { - return m.Size() + return xxx_messageInfo_ACLLink.Size(m) } func (m *ACLLink) XXX_DiscardUnknown() { xxx_messageInfo_ACLLink.DiscardUnknown(m) @@ -82,296 +72,18 @@ func init() { proto.RegisterType((*ACLLink)(nil), "acl.ACLLink") } -func init() { proto.RegisterFile("proto/pbacl/acl.proto", fileDescriptor_ad2d2c73a6a0d8b5) } +func init() { + proto.RegisterFile("proto/pbacl/acl.proto", fileDescriptor_ad2d2c73a6a0d8b5) +} var fileDescriptor_ad2d2c73a6a0d8b5 = []byte{ - // 145 bytes of a gzipped FileDescriptorProto + // 128 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x48, 0x4a, 0x4c, 0xce, 0xd1, 0x4f, 0x4c, 0xce, 0xd1, 0x03, 0xf3, 0x85, 0x98, 0x13, 0x93, 0x73, 0x94, 0x74, 0xb9, 0xd8, 0x1d, 0x9d, 0x7d, 0x7c, 0x32, 0xf3, 0xb2, 0x85, 0xf8, 0xb8, 0x98, 0x3c, 0x5d, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, - 0xb8, 0x58, 0xfc, 0x12, 0x73, 0x53, 0x25, 0x98, 0xc0, 0x22, 0x60, 0xb6, 0x93, 0xe5, 0x89, 0x47, - 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x94, - 0x7a, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x46, 0x62, 0x71, 0x46, - 0x66, 0x72, 0x7e, 0x51, 0x81, 0x7e, 0x72, 0x7e, 0x5e, 0x71, 0x69, 0x8e, 0x3e, 0x92, 0xc5, 0x49, - 0x6c, 0x60, 0x8e, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe6, 0xfd, 0xff, 0x8e, 0x00, 0x00, - 0x00, + 0xb8, 0x58, 0xfc, 0x12, 0x73, 0x53, 0x25, 0x98, 0xc0, 0x22, 0x60, 0xb6, 0x93, 0x66, 0x94, 0x7a, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x46, 0x62, 0x71, 0x46, 0x66, + 0x72, 0x7e, 0x51, 0x81, 0x7e, 0x72, 0x7e, 0x5e, 0x71, 0x69, 0x8e, 0x3e, 0x92, 0x45, 0x49, 0x6c, + 0x60, 0x8e, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x25, 0x54, 0x7f, 0x7e, 0x00, 0x00, 0x00, } - -func (m *ACLLink) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ACLLink) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ACLLink) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAcl(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintAcl(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintAcl(dAtA []byte, offset int, v uint64) int { - offset -= sovAcl(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ACLLink) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovAcl(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAcl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovAcl(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAcl(x uint64) (n int) { - return sovAcl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ACLLink) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAcl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ACLLink: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ACLLink: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAcl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAcl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAcl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAcl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAcl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAcl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAcl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAcl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAcl(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAcl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAcl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAcl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAcl - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAcl - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAcl - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAcl = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAcl = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAcl = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbcommongogo/common.gen.go b/proto/pbcommongogo/common.gen.go deleted file mode 100644 index 925ebb70d..000000000 --- a/proto/pbcommongogo/common.gen.go +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated by mog. DO NOT EDIT. - -package pbcommongogo - -import "github.com/hashicorp/consul/agent/structs" - -func QueryMetaToStructs(s *QueryMeta, t *structs.QueryMeta) { - if s == nil { - return - } - t.Index = s.Index - t.LastContact = structs.DurationFromProtoGogo(s.LastContact) - t.KnownLeader = s.KnownLeader - t.ConsistencyLevel = s.ConsistencyLevel - t.ResultsFilteredByACLs = s.ResultsFilteredByACLs -} -func QueryMetaFromStructs(t *structs.QueryMeta, s *QueryMeta) { - if s == nil { - return - } - s.Index = t.Index - s.LastContact = structs.DurationToProtoGogo(t.LastContact) - s.KnownLeader = t.KnownLeader - s.ConsistencyLevel = t.ConsistencyLevel - s.ResultsFilteredByACLs = t.ResultsFilteredByACLs -} -func QueryOptionsToStructs(s *QueryOptions, t *structs.QueryOptions) { - if s == nil { - return - } - t.Token = s.Token - t.MinQueryIndex = s.MinQueryIndex - t.MaxQueryTime = structs.DurationFromProtoGogo(s.MaxQueryTime) - t.AllowStale = s.AllowStale - t.RequireConsistent = s.RequireConsistent - t.UseCache = s.UseCache - t.MaxStaleDuration = structs.DurationFromProtoGogo(s.MaxStaleDuration) - t.MaxAge = structs.DurationFromProtoGogo(s.MaxAge) - t.MustRevalidate = s.MustRevalidate - t.Filter = s.Filter -} -func QueryOptionsFromStructs(t *structs.QueryOptions, s *QueryOptions) { - if s == nil { - return - } - s.Token = t.Token - s.MinQueryIndex = t.MinQueryIndex - s.MaxQueryTime = structs.DurationToProtoGogo(t.MaxQueryTime) - s.AllowStale = t.AllowStale - s.RequireConsistent = t.RequireConsistent - s.UseCache = t.UseCache - s.MaxStaleDuration = structs.DurationToProtoGogo(t.MaxStaleDuration) - s.MaxAge = structs.DurationToProtoGogo(t.MaxAge) - s.MustRevalidate = t.MustRevalidate - s.Filter = t.Filter -} -func RaftIndexToStructs(s *RaftIndex, t *structs.RaftIndex) { - if s == nil { - return - } - t.CreateIndex = s.CreateIndex - t.ModifyIndex = s.ModifyIndex -} -func RaftIndexFromStructs(t *structs.RaftIndex, s *RaftIndex) { - if s == nil { - return - } - s.CreateIndex = t.CreateIndex - s.ModifyIndex = t.ModifyIndex -} diff --git a/proto/pbcommongogo/common.go b/proto/pbcommongogo/common.go deleted file mode 100644 index 5cd6d4d64..000000000 --- a/proto/pbcommongogo/common.go +++ /dev/null @@ -1,303 +0,0 @@ -package pbcommongogo - -import ( - "time" - - "github.com/hashicorp/consul/agent/structs" -) - -// IsRead is always true for QueryOption -func (q *QueryOptions) IsRead() bool { - return true -} - -// AllowStaleRead returns whether a stale read should be allowed -func (q *QueryOptions) AllowStaleRead() bool { - return q.AllowStale -} - -func (q *QueryOptions) TokenSecret() string { - return q.Token -} - -func (q *QueryOptions) SetTokenSecret(s string) { - q.Token = s -} - -// SetToken is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetToken(token string) { - q.Token = token -} - -// SetMinQueryIndex is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetMinQueryIndex(minQueryIndex uint64) { - q.MinQueryIndex = minQueryIndex -} - -// SetMaxQueryTime is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetMaxQueryTime(maxQueryTime time.Duration) { - q.MaxQueryTime = structs.DurationToProtoGogo(maxQueryTime) -} - -// SetAllowStale is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetAllowStale(allowStale bool) { - q.AllowStale = allowStale -} - -// SetRequireConsistent is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetRequireConsistent(requireConsistent bool) { - q.RequireConsistent = requireConsistent -} - -// SetUseCache is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetUseCache(useCache bool) { - q.UseCache = useCache -} - -// SetMaxStaleDuration is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetMaxStaleDuration(maxStaleDuration time.Duration) { - q.MaxStaleDuration = structs.DurationToProtoGogo(maxStaleDuration) -} - -// SetMaxAge is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetMaxAge(maxAge time.Duration) { - q.MaxAge = structs.DurationToProtoGogo(maxAge) -} - -// SetMustRevalidate is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetMustRevalidate(mustRevalidate bool) { - q.MustRevalidate = mustRevalidate -} - -// SetStaleIfError is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { - q.StaleIfError = structs.DurationToProtoGogo(staleIfError) -} - -func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - maxTime := structs.DurationFromProtoGogo(q.MaxQueryTime) - o := structs.QueryOptions{ - MaxQueryTime: maxTime, - MinQueryIndex: q.MinQueryIndex, - } - return o.HasTimedOut(start, rpcHoldTimeout, maxQueryTime, defaultQueryTime) -} - -// SetFilter is needed to implement the structs.QueryOptionsCompat interface -func (q *QueryOptions) SetFilter(filter string) { - q.Filter = filter -} - -// GetMaxQueryTime is required to implement blockingQueryOptions -func (q *QueryOptions) GetMaxQueryTime() (time.Duration, error) { - return structs.DurationFromProtoGogo(q.MaxQueryTime), nil -} - -// GetMinQueryIndex is required to implement blockingQueryOptions -func (q *QueryOptions) GetMinQueryIndex() uint64 { - if q != nil { - return q.MinQueryIndex - } - return 0 -} - -// GetRequireConsistent is required to implement blockingQueryOptions -func (q *QueryOptions) GetRequireConsistent() bool { - if q != nil { - return q.RequireConsistent - } - return false -} - -// GetToken is required to implement blockingQueryOptions -func (q *QueryOptions) GetToken() string { - if q != nil { - return q.Token - } - return "" -} - -// GetAllowStale is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetAllowStale() bool { - if q != nil { - return q.AllowStale - } - return false -} - -// GetFilter is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetFilter() string { - if q != nil { - return q.Filter - } - return "" -} - -// GetMaxAge is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMaxAge() (time.Duration, error) { - if q != nil { - return structs.DurationFromProtoGogo(q.MaxAge), nil - } - return 0, nil -} - -// GetMaxStaleDuration is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMaxStaleDuration() (time.Duration, error) { - if q != nil { - return structs.DurationFromProtoGogo(q.MaxStaleDuration), nil - } - return 0, nil -} - -// GetMustRevalidate is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetMustRevalidate() bool { - if q != nil { - return q.MustRevalidate - } - return false -} - -// GetStaleIfError is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetStaleIfError() (time.Duration, error) { - if q != nil { - return structs.DurationFromProtoGogo(q.StaleIfError), nil - } - return 0, nil -} - -// GetUseCache is required to implement structs.QueryOptionsCompat -func (q *QueryOptions) GetUseCache() bool { - if q != nil { - return q.UseCache - } - return false -} - -// SetLastContact is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetLastContact(lastContact time.Duration) { - q.LastContact = structs.DurationToProtoGogo(lastContact) -} - -// SetKnownLeader is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetKnownLeader(knownLeader bool) { - q.KnownLeader = knownLeader -} - -// SetIndex is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetIndex(index uint64) { - q.Index = index -} - -// SetConsistencyLevel is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { - q.ConsistencyLevel = consistencyLevel -} - -func (q *QueryMeta) GetBackend() structs.QueryBackend { - return structs.QueryBackend(0) -} - -// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat interface -func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { - q.ResultsFilteredByACLs = v -} - -// GetIndex is required to implement blockingQueryResponseMeta -func (q *QueryMeta) GetIndex() uint64 { - if q != nil { - return q.Index - } - return 0 -} - -// GetConsistencyLevel is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetConsistencyLevel() string { - if q != nil { - return q.ConsistencyLevel - } - return "" -} - -// GetKnownLeader is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetKnownLeader() bool { - if q != nil { - return q.KnownLeader - } - return false -} - -// GetLastContact is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetLastContact() (time.Duration, error) { - if q != nil { - return structs.DurationFromProtoGogo(q.LastContact), nil - } - return 0, nil -} - -// GetResultsFilteredByACLs is required to implement structs.QueryMetaCompat -func (q *QueryMeta) GetResultsFilteredByACLs() bool { - if q != nil { - return q.ResultsFilteredByACLs - } - return false -} - -// WriteRequest only applies to writes, always false -// -// IsRead implements structs.RPCInfo -func (w WriteRequest) IsRead() bool { - return false -} - -// SetTokenSecret implements structs.RPCInfo -func (w WriteRequest) TokenSecret() string { - return w.Token -} - -// SetTokenSecret implements structs.RPCInfo -func (w *WriteRequest) SetTokenSecret(s string) { - w.Token = s -} - -// AllowStaleRead returns whether a stale read should be allowed -// -// AllowStaleRead implements structs.RPCInfo -func (w WriteRequest) AllowStaleRead() bool { - return false -} - -// HasTimedOut implements structs.RPCInfo -func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil -} - -// IsRead implements structs.RPCInfo -func (r *ReadRequest) IsRead() bool { - return true -} - -// AllowStaleRead implements structs.RPCInfo -func (r *ReadRequest) AllowStaleRead() bool { - // TODO(partitions): plumb this? - return false -} - -// TokenSecret implements structs.RPCInfo -func (r *ReadRequest) TokenSecret() string { - return r.Token -} - -// SetTokenSecret implements structs.RPCInfo -func (r *ReadRequest) SetTokenSecret(token string) { - r.Token = token -} - -// HasTimedOut implements structs.RPCInfo -func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil -} - -// RequestDatacenter implements structs.RPCInfo -func (td TargetDatacenter) RequestDatacenter() string { - return td.Datacenter -} diff --git a/proto/pbcommongogo/common.pb.binary.go b/proto/pbcommongogo/common.pb.binary.go deleted file mode 100644 index 2e6b57496..000000000 --- a/proto/pbcommongogo/common.pb.binary.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: proto/pbcommongogo/common.proto - -package pbcommongogo - -import ( - "github.com/golang/protobuf/proto" -) - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *RaftIndex) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *RaftIndex) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *TargetDatacenter) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *TargetDatacenter) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *WriteRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *WriteRequest) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *ReadRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *ReadRequest) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *QueryOptions) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *QueryOptions) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *QueryMeta) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *QueryMeta) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *EnterpriseMeta) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *EnterpriseMeta) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto/pbcommongogo/common.pb.go b/proto/pbcommongogo/common.pb.go deleted file mode 100644 index 07db0feaa..000000000 --- a/proto/pbcommongogo/common.pb.go +++ /dev/null @@ -1,2036 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: proto/pbcommongogo/common.proto - -package pbcommongogo - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - types "github.com/gogo/protobuf/types" - proto "github.com/golang/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// RaftIndex is used to track the index used while creating -// or modifying a given struct type. -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.RaftIndex -// output=common.gen.go -// name=Structs -type RaftIndex struct { - CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` - ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` -} - -func (m *RaftIndex) Reset() { *m = RaftIndex{} } -func (m *RaftIndex) String() string { return proto.CompactTextString(m) } -func (*RaftIndex) ProtoMessage() {} -func (*RaftIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{0} -} -func (m *RaftIndex) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RaftIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RaftIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_RaftIndex.Merge(m, src) -} -func (m *RaftIndex) XXX_Size() int { - return m.Size() -} -func (m *RaftIndex) XXX_DiscardUnknown() { - xxx_messageInfo_RaftIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_RaftIndex proto.InternalMessageInfo - -// TargetDatacenter is intended to be used within other messages used for RPC routing -// amongst the various Consul datacenters -type TargetDatacenter struct { - Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` -} - -func (m *TargetDatacenter) Reset() { *m = TargetDatacenter{} } -func (m *TargetDatacenter) String() string { return proto.CompactTextString(m) } -func (*TargetDatacenter) ProtoMessage() {} -func (*TargetDatacenter) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{1} -} -func (m *TargetDatacenter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TargetDatacenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TargetDatacenter) XXX_Merge(src proto.Message) { - xxx_messageInfo_TargetDatacenter.Merge(m, src) -} -func (m *TargetDatacenter) XXX_Size() int { - return m.Size() -} -func (m *TargetDatacenter) XXX_DiscardUnknown() { - xxx_messageInfo_TargetDatacenter.DiscardUnknown(m) -} - -var xxx_messageInfo_TargetDatacenter proto.InternalMessageInfo - -type WriteRequest struct { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (m *WriteRequest) String() string { return proto.CompactTextString(m) } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{2} -} -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -// ReadRequest is a type that may be embedded into any requests for read -// operations. -// It is a replacement for QueryOptions now that we no longer need any of those -// fields because we are moving away from using blocking queries. -// It is also similar to WriteRequest. It is a separate type so that in the -// future we can introduce fields that may only be relevant for reads. -type ReadRequest struct { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` - // RequireConsistent indicates that the request must be sent to the leader. - RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` -} - -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (m *ReadRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{3} -} -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -// QueryOptions is used to specify various flags for read queries -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.QueryOptions -// output=common.gen.go -// name=Structs -// ignore-fields=StaleIfError,AllowNotModifiedResponse -type QueryOptions struct { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` - // If set, wait until query exceeds given index. Must be provided - // with MaxQueryTime. - MinQueryIndex uint64 `protobuf:"varint,2,opt,name=MinQueryIndex,proto3" json:"MinQueryIndex,omitempty"` - // Provided with MinQueryIndex to wait for change. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - MaxQueryTime types.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime"` - // If set, any follower can service the request. Results - // may be arbitrarily stale. - AllowStale bool `protobuf:"varint,4,opt,name=AllowStale,proto3" json:"AllowStale,omitempty"` - // If set, the leader must verify leadership prior to - // servicing the request. Prevents a stale read. - RequireConsistent bool `protobuf:"varint,5,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` - // If set, the local agent may respond with an arbitrarily stale locally - // cached response. The semantics differ from AllowStale since the agent may - // be entirely partitioned from the servers and still considered "healthy" by - // operators. Stale responses from Servers are also arbitrarily stale, but can - // provide additional bounds on the last contact time from the leader. It's - // expected that servers that are partitioned are noticed and replaced in a - // timely way by operators while the same may not be true for client agents. - UseCache bool `protobuf:"varint,6,opt,name=UseCache,proto3" json:"UseCache,omitempty"` - // If set and AllowStale is true, will try first a stale - // read, and then will perform a consistent read if stale - // read is older than value. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - MaxStaleDuration types.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration"` - // MaxAge limits how old a cached value will be returned if UseCache is true. - // If there is a cached response that is older than the MaxAge, it is treated - // as a cache miss and a new fetch invoked. If the fetch fails, the error is - // returned. Clients that wish to allow for stale results on error can set - // StaleIfError to a longer duration to change this behavior. It is ignored - // if the endpoint supports background refresh caching. See - // https://www.consul.io/api/index.html#agent-caching for more details. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - MaxAge types.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge"` - // MustRevalidate forces the agent to fetch a fresh version of a cached - // resource or at least validate that the cached version is still fresh. It is - // implied by either max-age=0 or must-revalidate Cache-Control headers. It - // only makes sense when UseCache is true. We store it since MaxAge = 0 is the - // default unset value. - MustRevalidate bool `protobuf:"varint,9,opt,name=MustRevalidate,proto3" json:"MustRevalidate,omitempty"` - // StaleIfError specifies how stale the client will accept a cached response - // if the servers are unavailable to fetch a fresh one. Only makes sense when - // UseCache is true and MaxAge is set to a lower, non-zero value. It is - // ignored if the endpoint supports background refresh caching. See - // https://www.consul.io/api/index.html#agent-caching for more details. - StaleIfError types.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError"` - // Filter specifies the go-bexpr filter expression to be used for - // filtering the data prior to returning a response - Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` -} - -func (m *QueryOptions) Reset() { *m = QueryOptions{} } -func (m *QueryOptions) String() string { return proto.CompactTextString(m) } -func (*QueryOptions) ProtoMessage() {} -func (*QueryOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{4} -} -func (m *QueryOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOptions.Merge(m, src) -} -func (m *QueryOptions) XXX_Size() int { - return m.Size() -} -func (m *QueryOptions) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOptions proto.InternalMessageInfo - -// QueryMeta allows a query response to include potentially -// useful metadata about a query -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.QueryMeta -// output=common.gen.go -// name=Structs -// ignore-fields=NotModified,Backend -type QueryMeta struct { - // This is the index associated with the read - Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` - // If AllowStale is used, this is time elapsed since - // last contact between the follower and leader. This - // can be used to gauge staleness. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - LastContact types.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact"` - // Used to indicate if there is a known leader node - KnownLeader bool `protobuf:"varint,3,opt,name=KnownLeader,proto3" json:"KnownLeader,omitempty"` - // Consistencylevel returns the consistency used to serve the query - // Having `discovery_max_stale` on the agent can affect whether - // the request was served by a leader. - ConsistencyLevel string `protobuf:"bytes,4,opt,name=ConsistencyLevel,proto3" json:"ConsistencyLevel,omitempty"` - // ResultsFilteredByACLs is true when some of the query's results were - // filtered out by enforcing ACLs. It may be false because nothing was - // removed, or because the endpoint does not yet support this flag. - ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` -} - -func (m *QueryMeta) Reset() { *m = QueryMeta{} } -func (m *QueryMeta) String() string { return proto.CompactTextString(m) } -func (*QueryMeta) ProtoMessage() {} -func (*QueryMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{5} -} -func (m *QueryMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMeta.Merge(m, src) -} -func (m *QueryMeta) XXX_Size() int { - return m.Size() -} -func (m *QueryMeta) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryMeta proto.InternalMessageInfo - -// EnterpriseMeta contains metadata that is only used by the Enterprise version -// of Consul. -type EnterpriseMeta struct { - // Namespace in which the entity exists. - Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - // Partition in which the entity exists. - Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` -} - -func (m *EnterpriseMeta) Reset() { *m = EnterpriseMeta{} } -func (m *EnterpriseMeta) String() string { return proto.CompactTextString(m) } -func (*EnterpriseMeta) ProtoMessage() {} -func (*EnterpriseMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a834024536145257, []int{6} -} -func (m *EnterpriseMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EnterpriseMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EnterpriseMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnterpriseMeta.Merge(m, src) -} -func (m *EnterpriseMeta) XXX_Size() int { - return m.Size() -} -func (m *EnterpriseMeta) XXX_DiscardUnknown() { - xxx_messageInfo_EnterpriseMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_EnterpriseMeta proto.InternalMessageInfo - -func init() { - proto.RegisterType((*RaftIndex)(nil), "commongogo.RaftIndex") - proto.RegisterType((*TargetDatacenter)(nil), "commongogo.TargetDatacenter") - proto.RegisterType((*WriteRequest)(nil), "commongogo.WriteRequest") - proto.RegisterType((*ReadRequest)(nil), "commongogo.ReadRequest") - proto.RegisterType((*QueryOptions)(nil), "commongogo.QueryOptions") - proto.RegisterType((*QueryMeta)(nil), "commongogo.QueryMeta") - proto.RegisterType((*EnterpriseMeta)(nil), "commongogo.EnterpriseMeta") -} - -func init() { proto.RegisterFile("proto/pbcommongogo/common.proto", fileDescriptor_a834024536145257) } - -var fileDescriptor_a834024536145257 = []byte{ - // 639 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x4d, 0x4a, 0x9a, 0x26, 0x93, 0xb6, 0x0a, 0xab, 0x82, 0x4c, 0x85, 0xdc, 0xca, 0xaa, 0x50, - 0x85, 0x20, 0x96, 0x0a, 0x12, 0x12, 0xb7, 0x24, 0x2d, 0x52, 0xdb, 0x18, 0xda, 0xa5, 0x08, 0x89, - 0xdb, 0xc6, 0x9e, 0x38, 0x16, 0x8e, 0xd7, 0xec, 0xae, 0xdb, 0xe4, 0xce, 0x07, 0x70, 0xe4, 0x93, - 0x7a, 0xec, 0x91, 0x53, 0x05, 0xcd, 0x1f, 0x20, 0x3e, 0x00, 0x79, 0x9d, 0xb6, 0x2e, 0x69, 0x51, - 0x6e, 0x9e, 0x37, 0xef, 0xed, 0xce, 0xcc, 0x9b, 0x35, 0xac, 0xc5, 0x82, 0x2b, 0x6e, 0xc7, 0x5d, - 0x97, 0x0f, 0x06, 0x3c, 0xf2, 0xb9, 0xcf, 0xed, 0xec, 0xb3, 0xa1, 0x33, 0x04, 0xae, 0x13, 0xab, - 0xa6, 0xcf, 0xb9, 0x1f, 0xa2, 0xad, 0x33, 0xdd, 0xa4, 0x67, 0x7b, 0x89, 0x60, 0x2a, 0xb8, 0xe4, - 0xae, 0xae, 0xa4, 0xac, 0xec, 0xc0, 0xf4, 0x2b, 0x43, 0xad, 0x01, 0x54, 0x29, 0xeb, 0xa9, 0xdd, - 0xc8, 0xc3, 0x21, 0xb1, 0xa1, 0xd6, 0x16, 0xc8, 0x14, 0xea, 0xd0, 0x28, 0xae, 0x17, 0x37, 0x4b, - 0xad, 0xa5, 0xdf, 0xe7, 0x6b, 0xd5, 0x2e, 0x0e, 0x63, 0xf1, 0xda, 0x7a, 0x6e, 0xd1, 0x3c, 0x23, - 0x15, 0x38, 0xdc, 0x0b, 0x7a, 0xa3, 0x4c, 0x30, 0x77, 0xab, 0x20, 0xc7, 0xb0, 0xb6, 0xa0, 0x7e, - 0xc4, 0x84, 0x8f, 0x6a, 0x9b, 0x29, 0xe6, 0x62, 0xa4, 0x50, 0x10, 0x13, 0xe0, 0x3a, 0xd2, 0x97, - 0x56, 0x69, 0x0e, 0xb1, 0x36, 0x60, 0xf1, 0xa3, 0x08, 0x14, 0x52, 0xfc, 0x92, 0xa0, 0x54, 0x64, - 0x05, 0xe6, 0x8f, 0xf8, 0x67, 0x8c, 0x26, 0xd4, 0x2c, 0xb0, 0x0e, 0xa1, 0x46, 0x91, 0x79, 0xff, - 0x25, 0x91, 0x67, 0x70, 0x3f, 0x25, 0x04, 0x02, 0xdb, 0x3c, 0x92, 0x81, 0x54, 0x18, 0x29, 0x5d, - 0x75, 0x85, 0x4e, 0x27, 0xac, 0xaf, 0x25, 0x58, 0x3c, 0x4c, 0x50, 0x8c, 0xde, 0xc5, 0xe9, 0x1c, - 0xe5, 0x1d, 0x87, 0x6e, 0xc0, 0x92, 0x13, 0x44, 0x9a, 0x98, 0x1b, 0x03, 0xbd, 0x09, 0x92, 0x36, - 0x2c, 0x3a, 0x6c, 0xa8, 0x81, 0xa3, 0x60, 0x80, 0xc6, 0xbd, 0xf5, 0xe2, 0x66, 0x6d, 0xeb, 0x51, - 0x23, 0x73, 0xad, 0x71, 0xe9, 0x5a, 0x63, 0x7b, 0xe2, 0x5a, 0xab, 0x74, 0x7a, 0xbe, 0x56, 0xa0, - 0x37, 0x44, 0xe9, 0xa8, 0x9a, 0x61, 0xc8, 0x4f, 0xde, 0x2b, 0x16, 0xa2, 0x51, 0xd2, 0x85, 0xe7, - 0x90, 0xdb, 0xfb, 0x9b, 0xbf, 0xa3, 0x3f, 0xb2, 0x0a, 0x95, 0x0f, 0x12, 0xdb, 0xcc, 0xed, 0xa3, - 0x51, 0xd6, 0xa4, 0xab, 0x98, 0xec, 0x43, 0xdd, 0x61, 0x43, 0x7d, 0xea, 0x65, 0x45, 0xc6, 0xc2, - 0x6c, 0x25, 0x4f, 0x09, 0xc9, 0x2b, 0x28, 0x3b, 0x6c, 0xd8, 0xf4, 0xd1, 0xa8, 0xcc, 0x76, 0xc4, - 0x84, 0x4e, 0x9e, 0xc0, 0xb2, 0x93, 0x48, 0x45, 0xf1, 0x98, 0x85, 0x81, 0xc7, 0x14, 0x1a, 0x55, - 0x5d, 0xe7, 0x3f, 0x68, 0x3a, 0x5c, 0x7d, 0xe3, 0x6e, 0x6f, 0x47, 0x08, 0x2e, 0x0c, 0x98, 0x71, - 0xb8, 0x79, 0x11, 0x79, 0x08, 0xe5, 0x37, 0x41, 0x98, 0xee, 0x60, 0x4d, 0xdb, 0x3b, 0x89, 0xac, - 0x3f, 0x45, 0xa8, 0x6a, 0x0b, 0x1c, 0x54, 0x2c, 0xdd, 0x81, 0xdc, 0xeb, 0xa0, 0x59, 0x40, 0x9a, - 0x50, 0xeb, 0x30, 0xa9, 0xda, 0x3c, 0x52, 0xcc, 0xcd, 0x56, 0x6a, 0x86, 0xfb, 0xf3, 0x1a, 0xb2, - 0x0e, 0xb5, 0xfd, 0x88, 0x9f, 0x44, 0x1d, 0x64, 0x1e, 0x0a, 0xbd, 0x1f, 0x15, 0x9a, 0x87, 0xc8, - 0x53, 0xa8, 0x5f, 0xb9, 0xe7, 0x8e, 0x3a, 0x78, 0x8c, 0xa1, 0xde, 0x81, 0x2a, 0x9d, 0xc2, 0xc9, - 0x4b, 0x78, 0x40, 0x51, 0x26, 0xa1, 0x92, 0x59, 0x17, 0xe8, 0xb5, 0x46, 0xcd, 0x76, 0x47, 0x6a, - 0x13, 0x2b, 0xf4, 0xf6, 0xe4, 0x5e, 0xa9, 0x32, 0x5f, 0x2f, 0xef, 0x95, 0x2a, 0xe5, 0xfa, 0x82, - 0xd5, 0x81, 0xe5, 0x9d, 0xf4, 0xfd, 0xc5, 0x22, 0x90, 0xa8, 0x5b, 0x7f, 0x0c, 0xd5, 0xb7, 0x6c, - 0x80, 0x32, 0x66, 0x2e, 0x4e, 0x9e, 0xc0, 0x35, 0x90, 0x66, 0x0f, 0x98, 0x50, 0x81, 0x5e, 0x95, - 0xb9, 0x2c, 0x7b, 0x05, 0xb4, 0x0e, 0x4e, 0x7f, 0x99, 0x85, 0xd3, 0x0b, 0xb3, 0x78, 0x76, 0x61, - 0x16, 0x7f, 0x5e, 0x98, 0xc5, 0x6f, 0x63, 0xb3, 0xf0, 0x7d, 0x6c, 0x16, 0xce, 0xc6, 0x66, 0xe1, - 0xc7, 0xd8, 0x2c, 0x7c, 0x6a, 0xf8, 0x81, 0xea, 0x27, 0xdd, 0x86, 0xcb, 0x07, 0x76, 0x9f, 0xc9, - 0x7e, 0xe0, 0x72, 0x11, 0xdb, 0x2e, 0x8f, 0x64, 0x12, 0xda, 0xd3, 0x3f, 0xc2, 0x6e, 0x59, 0x63, - 0x2f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x17, 0x8b, 0xc5, 0x82, 0x25, 0x05, 0x00, 0x00, -} - -func (m *RaftIndex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RaftIndex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RaftIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ModifyIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.ModifyIndex)) - i-- - dAtA[i] = 0x10 - } - if m.CreateIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.CreateIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TargetDatacenter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TargetDatacenter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TargetDatacenter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Datacenter) > 0 { - i -= len(m.Datacenter) - copy(dAtA[i:], m.Datacenter) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Datacenter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RequireConsistent { - i-- - if m.RequireConsistent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0x5a - } - { - size, err := m.StaleIfError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - if m.MustRevalidate { - i-- - if m.MustRevalidate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - { - size, err := m.MaxAge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - { - size, err := m.MaxStaleDuration.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if m.UseCache { - i-- - if m.UseCache { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.RequireConsistent { - i-- - if m.RequireConsistent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.AllowStale { - i-- - if m.AllowStale { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - { - size, err := m.MaxQueryTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.MinQueryIndex != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.MinQueryIndex)) - i-- - dAtA[i] = 0x10 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResultsFilteredByACLs { - i-- - if m.ResultsFilteredByACLs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.ConsistencyLevel) > 0 { - i -= len(m.ConsistencyLevel) - copy(dAtA[i:], m.ConsistencyLevel) - i = encodeVarintCommon(dAtA, i, uint64(len(m.ConsistencyLevel))) - i-- - dAtA[i] = 0x22 - } - if m.KnownLeader { - i-- - if m.KnownLeader { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - { - size, err := m.LastContact.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if m.Index != 0 { - i = encodeVarintCommon(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *EnterpriseMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EnterpriseMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EnterpriseMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Partition) > 0 { - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Partition))) - i-- - dAtA[i] = 0x12 - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { - offset -= sovCommon(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RaftIndex) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CreateIndex != 0 { - n += 1 + sovCommon(uint64(m.CreateIndex)) - } - if m.ModifyIndex != 0 { - n += 1 + sovCommon(uint64(m.ModifyIndex)) - } - return n -} - -func (m *TargetDatacenter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Datacenter) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.RequireConsistent { - n += 2 - } - return n -} - -func (m *QueryOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.MinQueryIndex != 0 { - n += 1 + sovCommon(uint64(m.MinQueryIndex)) - } - l = m.MaxQueryTime.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.AllowStale { - n += 2 - } - if m.RequireConsistent { - n += 2 - } - if m.UseCache { - n += 2 - } - l = m.MaxStaleDuration.Size() - n += 1 + l + sovCommon(uint64(l)) - l = m.MaxAge.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.MustRevalidate { - n += 2 - } - l = m.StaleIfError.Size() - n += 1 + l + sovCommon(uint64(l)) - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *QueryMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovCommon(uint64(m.Index)) - } - l = m.LastContact.Size() - n += 1 + l + sovCommon(uint64(l)) - if m.KnownLeader { - n += 2 - } - l = len(m.ConsistencyLevel) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - if m.ResultsFilteredByACLs { - n += 2 - } - return n -} - -func (m *EnterpriseMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Partition) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func sovCommon(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommon(x uint64) (n int) { - return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RaftIndex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RaftIndex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RaftIndex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateIndex", wireType) - } - m.CreateIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModifyIndex", wireType) - } - m.ModifyIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModifyIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TargetDatacenter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TargetDatacenter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TargetDatacenter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datacenter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Datacenter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RequireConsistent = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinQueryIndex", wireType) - } - m.MinQueryIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinQueryIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxQueryTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxQueryTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowStale", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowStale = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequireConsistent", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RequireConsistent = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseCache", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseCache = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxStaleDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxStaleDuration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MaxAge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MustRevalidate", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MustRevalidate = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StaleIfError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StaleIfError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastContact", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastContact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KnownLeader", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.KnownLeader = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsistencyLevel", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConsistencyLevel = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsFilteredByACLs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ResultsFilteredByACLs = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnterpriseMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnterpriseMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnterpriseMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Partition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommon(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommon - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommon - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommon - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/pbcommongogo/common.proto b/proto/pbcommongogo/common.proto deleted file mode 100644 index 30cd847f3..000000000 --- a/proto/pbcommongogo/common.proto +++ /dev/null @@ -1,182 +0,0 @@ -syntax = "proto3"; - -package commongogo; - -option go_package = "github.com/hashicorp/consul/proto/pbcommongogo"; - -import "google/protobuf/duration.proto"; -// Go Modules now includes the version in the filepath for packages within GOPATH/pkg/mode -// Therefore unless we want to hardcode a version here like -// github.com/gogo/protobuf@v1.3.0/gogoproto/gogo.proto then the only other choice is to -// have a more relative import and pass the right import path to protoc. I don't like it -// but its necessary. -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_unkeyed_all) = false; -option (gogoproto.goproto_unrecognized_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_sizecache_all) = false; - -// RaftIndex is used to track the index used while creating -// or modifying a given struct type. -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.RaftIndex -// output=common.gen.go -// name=Structs -message RaftIndex { - uint64 CreateIndex = 1 [(gogoproto.moretags) = "bexpr:\"-\""]; - uint64 ModifyIndex = 2 [(gogoproto.moretags) = "bexpr:\"-\""]; -} - -// TargetDatacenter is intended to be used within other messages used for RPC routing -// amongst the various Consul datacenters -message TargetDatacenter { - string Datacenter = 1; -} - -message WriteRequest { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - string Token = 1; -} - -// ReadRequest is a type that may be embedded into any requests for read -// operations. -// It is a replacement for QueryOptions now that we no longer need any of those -// fields because we are moving away from using blocking queries. -// It is also similar to WriteRequest. It is a separate type so that in the -// future we can introduce fields that may only be relevant for reads. -message ReadRequest { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - string Token = 1; - - // RequireConsistent indicates that the request must be sent to the leader. - bool RequireConsistent = 2; -} - - -// QueryOptions is used to specify various flags for read queries -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.QueryOptions -// output=common.gen.go -// name=Structs -// ignore-fields=StaleIfError,AllowNotModifiedResponse -message QueryOptions { - // Token is the ACL token ID. If not provided, the 'anonymous' - // token is assumed for backwards compatibility. - string Token = 1; - - // If set, wait until query exceeds given index. Must be provided - // with MaxQueryTime. - uint64 MinQueryIndex = 2; - - // Provided with MinQueryIndex to wait for change. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - google.protobuf.Duration MaxQueryTime = 3 - [(gogoproto.nullable) = false]; - - // If set, any follower can service the request. Results - // may be arbitrarily stale. - bool AllowStale = 4; - - // If set, the leader must verify leadership prior to - // servicing the request. Prevents a stale read. - bool RequireConsistent = 5; - - // If set, the local agent may respond with an arbitrarily stale locally - // cached response. The semantics differ from AllowStale since the agent may - // be entirely partitioned from the servers and still considered "healthy" by - // operators. Stale responses from Servers are also arbitrarily stale, but can - // provide additional bounds on the last contact time from the leader. It's - // expected that servers that are partitioned are noticed and replaced in a - // timely way by operators while the same may not be true for client agents. - bool UseCache = 6; - - // If set and AllowStale is true, will try first a stale - // read, and then will perform a consistent read if stale - // read is older than value. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - google.protobuf.Duration MaxStaleDuration = 7 - [(gogoproto.nullable) = false]; - - // MaxAge limits how old a cached value will be returned if UseCache is true. - // If there is a cached response that is older than the MaxAge, it is treated - // as a cache miss and a new fetch invoked. If the fetch fails, the error is - // returned. Clients that wish to allow for stale results on error can set - // StaleIfError to a longer duration to change this behavior. It is ignored - // if the endpoint supports background refresh caching. See - // https://www.consul.io/api/index.html#agent-caching for more details. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - google.protobuf.Duration MaxAge = 8 - [(gogoproto.nullable) = false]; - - // MustRevalidate forces the agent to fetch a fresh version of a cached - // resource or at least validate that the cached version is still fresh. It is - // implied by either max-age=0 or must-revalidate Cache-Control headers. It - // only makes sense when UseCache is true. We store it since MaxAge = 0 is the - // default unset value. - bool MustRevalidate = 9; - - // StaleIfError specifies how stale the client will accept a cached response - // if the servers are unavailable to fetch a fresh one. Only makes sense when - // UseCache is true and MaxAge is set to a lower, non-zero value. It is - // ignored if the endpoint supports background refresh caching. See - // https://www.consul.io/api/index.html#agent-caching for more details. - google.protobuf.Duration StaleIfError = 10 - [(gogoproto.nullable) = false]; - - // Filter specifies the go-bexpr filter expression to be used for - // filtering the data prior to returning a response - string Filter = 11; -} - -// QueryMeta allows a query response to include potentially -// useful metadata about a query -// -// mog annotation: -// -// target=github.com/hashicorp/consul/agent/structs.QueryMeta -// output=common.gen.go -// name=Structs -// ignore-fields=NotModified,Backend -message QueryMeta { - // This is the index associated with the read - uint64 Index = 1; - - // If AllowStale is used, this is time elapsed since - // last contact between the follower and leader. This - // can be used to gauge staleness. - // mog: func-to=structs.DurationFromProtoGogo func-from=structs.DurationToProtoGogo - google.protobuf.Duration LastContact = 2 - [(gogoproto.nullable) = false]; - - // Used to indicate if there is a known leader node - bool KnownLeader = 3; - - // Consistencylevel returns the consistency used to serve the query - // Having `discovery_max_stale` on the agent can affect whether - // the request was served by a leader. - string ConsistencyLevel = 4; - - // Reserved for NotModified and Backend. - reserved 5, 6; - - // ResultsFilteredByACLs is true when some of the query's results were - // filtered out by enforcing ACLs. It may be false because nothing was - // removed, or because the endpoint does not yet support this flag. - bool ResultsFilteredByACLs = 7; -} - -// EnterpriseMeta contains metadata that is only used by the Enterprise version -// of Consul. -message EnterpriseMeta { - // Namespace in which the entity exists. - string Namespace = 1; - // Partition in which the entity exists. - string Partition = 2; -} diff --git a/proto/pbcommongogo/common_oss.go b/proto/pbcommongogo/common_oss.go deleted file mode 100644 index d24b27b69..000000000 --- a/proto/pbcommongogo/common_oss.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !consulent -// +build !consulent - -package pbcommongogo - -import ( - "github.com/hashicorp/consul/agent/structs" -) - -var DefaultEnterpriseMeta = EnterpriseMeta{} - -func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *EnterpriseMeta { - return &EnterpriseMeta{} -} - -func EnterpriseMetaToStructs(s *EnterpriseMeta, t *structs.EnterpriseMeta) { - if s == nil { - return - } -} -func EnterpriseMetaFromStructs(t *structs.EnterpriseMeta, s *EnterpriseMeta) { - if s == nil { - return - } -} diff --git a/proto/pbservice/convert_pbstruct.go b/proto/pbservice/convert_pbstruct.go index 1a09d81ef..8b1902942 100644 --- a/proto/pbservice/convert_pbstruct.go +++ b/proto/pbservice/convert_pbstruct.go @@ -4,7 +4,6 @@ import ( fmt "fmt" "reflect" - //TODO(gogo-remove): remove the types alias types "github.com/golang/protobuf/ptypes/struct" ) diff --git a/proto/pbutil/pbutil.go b/proto/pbutil/pbutil.go deleted file mode 100644 index 91736c061..000000000 --- a/proto/pbutil/pbutil.go +++ /dev/null @@ -1,23 +0,0 @@ -package pbutil - -import ( - "time" - - "github.com/gogo/protobuf/types" -) - -func DurationToProto(d time.Duration) *types.Duration { - return types.DurationProto(d) -} - -func DurationFromProto(d *types.Duration) (time.Duration, error) { - return types.DurationFromProto(d) -} - -func TimeFromProto(s *types.Timestamp) (time.Time, error) { - return types.TimestampFromProto(s) -} - -func TimeToProto(s time.Time) (*types.Timestamp, error) { - return types.TimestampProto(s) -} diff --git a/proto/translate.go b/proto/translate.go deleted file mode 100644 index 6ee90c084..000000000 --- a/proto/translate.go +++ /dev/null @@ -1,68 +0,0 @@ -package proto - -import ( - "reflect" - "time" - - "github.com/gogo/protobuf/types" -) - -var ( - tsType = reflect.TypeOf((*types.Timestamp)(nil)) - timePtrType = reflect.TypeOf((*time.Time)(nil)) - timeType = timePtrType.Elem() - mapStrInf = reflect.TypeOf((map[string]interface{})(nil)) - - epoch1970 = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) -) - -// HookPBTimestampToTime is a mapstructure decode hook to translate a protobuf timestamp -// to a time.Time value -func HookPBTimestampToTime(from, to reflect.Type, data interface{}) (interface{}, error) { - if to == timeType && from == tsType { - ts := data.(*types.Timestamp) - if ts.Seconds == 0 && ts.Nanos == 0 { - return time.Time{}, nil - } - return time.Unix(ts.Seconds, int64(ts.Nanos)).UTC(), nil - } - - return data, nil -} - -// HookTimeToPBtimestamp is a mapstructure decode hook to translate a time.Time value to -// a protobuf Timestamp value. -func HookTimeToPBTimestamp(from, to reflect.Type, data interface{}) (interface{}, error) { - // Note that mapstructure doesn't do direct struct to struct conversion in this case. I - // still don't completely understand why converting the PB TS to time.Time does but - // I suspect it has something to do with the struct containing a concrete time.Time - // as opposed to a pointer to a time.Time. Regardless this path through mapstructure - // first will decode the concrete time.Time into a map[string]interface{} before - // eventually decoding that map[string]interface{} into the *types.Timestamp. One - // other note is that mapstructure ends up creating a new Value and sets it it to - // the time.Time value and thats what gets passed to us. That is why we end up - // seeing a *time.Time instead of a time.Time. - if from == timePtrType && to == mapStrInf { - ts := data.(*time.Time) - - // protobuf only supports times from Jan 1 1970 onward but the time.Time type - // can represent values back to year 1. Basically - if ts.Before(epoch1970) { - return map[string]interface{}{}, nil - } - - nanos := ts.UnixNano() - if nanos < 0 { - return map[string]interface{}{}, nil - } - - seconds := nanos / 1000000000 - nanos = nanos % 1000000000 - - return map[string]interface{}{ - "Seconds": seconds, - "Nanos": int32(nanos), - }, nil - } - return data, nil -} diff --git a/proto/translate_test.go b/proto/translate_test.go deleted file mode 100644 index 0fbfa2b9b..000000000 --- a/proto/translate_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package proto - -import ( - "testing" - "time" - - "github.com/gogo/protobuf/types" - "github.com/mitchellh/mapstructure" - - "github.com/stretchr/testify/require" -) - -type pbTSWrapper struct { - Timestamp *types.Timestamp -} - -type timeTSWrapper struct { - Timestamp time.Time -} - -func TestHookPBTimestampToTime(t *testing.T) { - in := pbTSWrapper{ - Timestamp: &types.Timestamp{ - Seconds: 1000, - Nanos: 42, - }, - } - - expected := timeTSWrapper{ - Timestamp: time.Unix(1000, 42).UTC(), - } - - var actual timeTSWrapper - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: HookPBTimestampToTime, - Result: &actual, - }) - require.NoError(t, err) - require.NoError(t, decoder.Decode(in)) - - require.Equal(t, expected, actual) -} - -func TestHookTimeToPBTimestamp(t *testing.T) { - in := timeTSWrapper{ - Timestamp: time.Unix(999999, 123456).UTC(), - } - - expected := pbTSWrapper{ - Timestamp: &types.Timestamp{ - Seconds: 999999, - Nanos: 123456, - }, - } - - var actual pbTSWrapper - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: HookTimeToPBTimestamp, - Result: &actual, - }) - require.NoError(t, err) - require.NoError(t, decoder.Decode(in)) - - require.Equal(t, expected, actual) -} - -func TestHookTimeToPBTimestamp_ZeroTime(t *testing.T) { - in := timeTSWrapper{} - - expected := pbTSWrapper{ - Timestamp: &types.Timestamp{ - Seconds: 0, - Nanos: 0, - }, - } - - var actual pbTSWrapper - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: HookTimeToPBTimestamp, - Result: &actual, - }) - require.NoError(t, err) - require.NoError(t, decoder.Decode(in)) - - require.Equal(t, expected, actual) -} From 14c9389fa946e69b18e9c988873da02f43245f34 Mon Sep 17 00:00:00 2001 From: Eric Date: Tue, 29 Mar 2022 09:34:39 -0400 Subject: [PATCH 033/785] code review changes --- .circleci/config.yml | 3 +- GNUmakefile | 2 + build-support/scripts/proto-gen-entry.sh | 169 ----------------------- 3 files changed, 4 insertions(+), 170 deletions(-) delete mode 100755 build-support/scripts/proto-gen-entry.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index db57fb786..164c5ddb2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -237,9 +237,10 @@ jobs: - run: name: Install protobuf command: | + goproto_version=$(go list -m github.com/golang/protobuf | awk '{print $2}') go install -v github.com/hashicorp/protoc-gen-go-binary@master go install -v github.com/favadi/protoc-go-inject-tag@v1.3.0 - go install -v github.com/golang/protobuf/protoc-gen-go@v1.3.5 + go install -v github.com/golang/protobuf/protoc-gen-go@${goproto_version} - run: command: make --always-make proto diff --git a/GNUmakefile b/GNUmakefile index fa9fcb600..4710fbbaa 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -2,11 +2,13 @@ # https://www.consul.io/docs/install#compiling-from-source SHELL = bash +GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ golang.org/x/tools/cmd/cover@master \ golang.org/x/tools/cmd/stringer@master \ + github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ github.com/hashicorp/protoc-gen-go-binary@master \ github.com/vektra/mockery/cmd/mockery@master \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ diff --git a/build-support/scripts/proto-gen-entry.sh b/build-support/scripts/proto-gen-entry.sh deleted file mode 100755 index 50d51be0d..000000000 --- a/build-support/scripts/proto-gen-entry.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null - -source "${SCRIPT_DIR}/functions.sh" - -function usage { -cat <<-EOF -Usage: ${SCRIPT_NAME} [] - -Description: - Generate the Go files from protobuf definitions. In addition to - running the protoc generator it will also fixup build tags in the - generated code. - -Options: - --import-replace Replace imports of google types with those from the protobuf repo. - --grpc Enable the gRPC plugin - -h | --help Print this help text. -EOF -} - -function err_usage { - err "$1" - err "" - err "$(usage)" -} - -function main { - local -i grpc=0 - local -i imp_replace=0 - local proto_path= - - while test $# -gt 0 - do - case "$1" in - -h | --help ) - usage - return 0 - ;; - --grpc ) - grpc=1 - shift - ;; - --import-replace ) - imp_replace=1 - shift - ;; - * ) - proto_path="$1" - shift - ;; - esac - done - - if test -z "${proto_path}" - then - err_usage "ERROR: No proto file specified" - return 1 - fi - - go mod download - - local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) - local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") - - - local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" - golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" - - local proto_go_path=${proto_path%%.proto}.pb.go - local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go - local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go - - local go_proto_out="paths=source_relative" - if is_set "${grpc}" - then - go_proto_out="${go_proto_out},plugins=grpc" - fi - - if is_set "${imp_replace}" - then - go_proto_out="${go_proto_out},${golang_proto_imp_replace}" - fi - - if test -n "${go_proto_out}" - then - go_proto_out="${go_proto_out}:" - fi - - # How we run protoc probably needs some documentation. - # - # This is the path to where - # -I="${golang_proto_path}/protobuf" \ - local -i ret=0 - status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} (NO GOGO)" - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" - debug_run protoc \ - -I="${golang_proto_path}" \ - -I="${golang_proto_mod_path}" \ - -I="${SOURCE_DIR}" \ - --go_out="${go_proto_out}${SOURCE_DIR}" \ - --go-binary_out="${SOURCE_DIR}" \ - "${proto_path}" - - if test $? -ne 0 - then - err "Failed to run protoc for ${proto_path}" - return 1 - fi - - debug_run protoc-go-inject-tag \ - -input="${proto_go_path}" - - if test $? -ne 0 - then - err "Failed to run protoc-go-inject-tag for ${proto_path}" - return 1 - fi - - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" - - BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') - if test -n "${BUILD_TAGS}" - then - echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" - cat "${proto_go_path}" >> "${proto_go_path}.new" - mv "${proto_go_path}.new" "${proto_go_path}" - - echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" - cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" - mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" - fi - - # note: this has to run after we fix up the build tags above - rm -f "${proto_go_rpcglue_path}" - debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" - if test $? -ne 0 - then - err "Failed to generate consul rpc glue outputs from ${proto_path}" - return 1 - fi - - return 0 -} - -main "$@" -exit $? From dcfcac433d0089c93489eec81d1a50a2e6ca3eec Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 29 Mar 2022 12:02:43 -0500 Subject: [PATCH 034/785] build: enforce protoc binary is the expected version (#12641) --- .circleci/config.yml | 3 ++- GNUmakefile | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 164c5ddb2..f4c7567d5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -229,7 +229,8 @@ jobs: - run: name: Install protobuf command: | - wget https://github.com/protocolbuffers/protobuf/releases/download/v3.12.3/protoc-3.12.3-linux-x86_64.zip + protoc_version="$(make print-PROTOC_VERSION)" + wget https://github.com/protocolbuffers/protobuf/releases/download/v${protoc_version}/protoc-${protoc_version}-linux-x86_64.zip sudo unzip -d /usr/local protoc-*.zip sudo chmod +x /usr/local/bin/protoc sudo chmod -R a+Xr /usr/local/include/google/ diff --git a/GNUmakefile b/GNUmakefile index 4710fbbaa..d844cb6c3 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -2,6 +2,7 @@ # https://www.consul.io/docs/install#compiling-from-source SHELL = bash +PROTOC_VERSION=3.12.3 GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ @@ -342,13 +343,26 @@ else @go test -v ./agent -run Vault endif -proto: $(PROTOGOFILES) $(PROTOGOBINFILES) - @echo "Generated all protobuf Go files" +.PHONY: protoc-check +protoc-check: + $(info checking protocol buffer compiler version (expect: $(PROTOC_VERSION))) + @if ! command -v protoc &>/dev/null; then \ + echo "ERROR: protoc is not installed; please install version $(PROTOC_VERSION)" >&2 ; \ + exit 1 ; \ + fi + @if [[ "$$(protoc --version | cut -d' ' -f2)" != "$(PROTOC_VERSION)" ]]; then \ + echo "ERROR: protoc version $(PROTOC_VERSION) is required" >&2 ; \ + fi +proto: protoc-check $(PROTOGOFILES) $(PROTOGOBINFILES) + @echo "Generated all protobuf Go files" %.pb.go %.pb.binary.go: %.proto @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --import-replace "$<" +# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') +print-% : ; @echo $($*) + .PHONY: module-versions # Print a list of modules which can be updated. # Columns are: module current_version date_of_current_version latest_version From 609a83db5e8eae03bfb6737e53b3dab1f64aba32 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 29 Mar 2022 13:17:41 -0500 Subject: [PATCH 035/785] add missing line from prototest.AssertDeepEqual (#12645) --- proto/prototest/testing.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/proto/prototest/testing.go b/proto/prototest/testing.go index 5b5468ea6..1dbe03618 100644 --- a/proto/prototest/testing.go +++ b/proto/prototest/testing.go @@ -3,11 +3,15 @@ package prototest import ( "testing" + "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" ) func AssertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { t.Helper() + + opts = append(opts, cmp.Comparer(proto.Equal)) + if diff := cmp.Diff(x, y, opts...); diff != "" { t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) } From d1938482e7a64e62eb2888ac66cf3a6cd753ee4d Mon Sep 17 00:00:00 2001 From: Fulvio Date: Tue, 29 Mar 2022 20:56:01 +0200 Subject: [PATCH 036/785] remove DualStack field from check TCP #12629 (#12630) --- agent/checks/check.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/agent/checks/check.go b/agent/checks/check.go index 5bc00bc18..8f910901f 100644 --- a/agent/checks/check.go +++ b/agent/checks/check.go @@ -644,8 +644,7 @@ func (c *CheckTCP) Start() { if c.dialer == nil { // Create the socket dialer c.dialer = &net.Dialer{ - Timeout: 10 * time.Second, - DualStack: true, + Timeout: 10 * time.Second, } if c.Timeout > 0 { c.dialer.Timeout = c.Timeout From 356d068a4f130ccf84ca30a124f0bd542c6d493b Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 29 Mar 2022 15:18:05 -0500 Subject: [PATCH 037/785] build: install mog and execute it during protobuf compilation (#12647) - also import replace isn't needed anymore --- .circleci/config.yml | 6 +---- GNUmakefile | 28 ++++++++++++++++-------- build-support/scripts/proto-gen.sh | 35 ++++++++++-------------------- 3 files changed, 32 insertions(+), 37 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f4c7567d5..80b19a03e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -237,11 +237,7 @@ jobs: rm protoc-*.zip - run: name: Install protobuf - command: | - goproto_version=$(go list -m github.com/golang/protobuf | awk '{print $2}') - go install -v github.com/hashicorp/protoc-gen-go-binary@master - go install -v github.com/favadi/protoc-go-inject-tag@v1.3.0 - go install -v github.com/golang/protobuf/protoc-gen-go@${goproto_version} + command: make proto-tools - run: command: make --always-make proto diff --git a/GNUmakefile b/GNUmakefile index d844cb6c3..097e22539 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -2,19 +2,22 @@ # https://www.consul.io/docs/install#compiling-from-source SHELL = bash -PROTOC_VERSION=3.12.3 -GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ golang.org/x/tools/cmd/cover@master \ golang.org/x/tools/cmd/stringer@master \ - github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ - github.com/hashicorp/protoc-gen-go-binary@master \ github.com/vektra/mockery/cmd/mockery@master \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ - github.com/hashicorp/lint-consul-retry@master \ - github.com/favadi/protoc-go-inject-tag@v1.3.0 + github.com/hashicorp/lint-consul-retry@master + +PROTOC_VERSION=3.12.3 +GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') +GOPROTOTOOLS = \ + github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ + github.com/hashicorp/protoc-gen-go-binary@master \ + github.com/favadi/protoc-go-inject-tag@v1.3.0 \ + github.com/hashicorp/mog@v0.1.1 GOTAGS ?= GOPATH=$(shell go env GOPATH) @@ -287,13 +290,20 @@ static-assets: # Build the static web ui and build static assets inside a Docker container ui: ui-docker static-assets-docker -tools: +tools: proto-tools @if [[ -d .gotools ]]; then rm -rf .gotools ; fi @for TOOL in $(GOTOOLS); do \ echo "=== TOOL: $$TOOL" ; \ go install -v $$TOOL ; \ done +proto-tools: + @if [[ -d .gotools ]]; then rm -rf .gotools ; fi + @for TOOL in $(GOPROTOTOOLS); do \ + echo "=== TOOL: $$TOOL" ; \ + go install -v $$TOOL ; \ + done + version: @echo -n "Version: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh @@ -358,7 +368,7 @@ proto: protoc-check $(PROTOGOFILES) $(PROTOGOBINFILES) @echo "Generated all protobuf Go files" %.pb.go %.pb.binary.go: %.proto - @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --import-replace "$<" + @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc "$<" # utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') print-% : ; @echo $($*) @@ -385,6 +395,6 @@ envoy-regen: @find "command/connect/envoy/testdata" -name '*.golden' -delete @go test -tags '$(GOTAGS)' ./command/connect/envoy -update -.PHONY: all bin dev dist cov test test-internal cover lint ui static-assets tools +.PHONY: all bin dev dist cov test test-internal cover lint ui static-assets tools proto-tools protoc-check .PHONY: docker-images go-build-image ui-build-image static-assets-docker consul-docker ui-docker .PHONY: version proto test-envoy-integ diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh index 4ecf9acd8..c1dedc52e 100755 --- a/build-support/scripts/proto-gen.sh +++ b/build-support/scripts/proto-gen.sh @@ -37,7 +37,6 @@ function err_usage { function main { local -i grpc=0 - local -i imp_replace=0 local proto_path= while test $# -gt 0 @@ -51,10 +50,6 @@ function main { grpc=1 shift ;; - --import-replace ) - imp_replace=1 - shift - ;; * ) proto_path="$1" shift @@ -73,13 +68,10 @@ function main { local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") - - local golang_proto_imp_replace="Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp" - golang_proto_imp_replace="${golang_proto_imp_replace},Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration" - local proto_go_path=${proto_path%%.proto}.pb.go local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go + local mog_input_path="$(dirname "${proto_path}")" local go_proto_out="paths=source_relative" if is_set "${grpc}" @@ -87,23 +79,20 @@ function main { go_proto_out="${go_proto_out},plugins=grpc" fi - if is_set "${imp_replace}" - then - go_proto_out="${go_proto_out},${golang_proto_imp_replace}" - fi - if test -n "${go_proto_out}" then go_proto_out="${go_proto_out}:" fi + rm -f "${proto_go_path}" ${proto_go_bin_path}" ${proto_go_rpcglue_path}" "${mog_input_path}/*.gen.go" + # How we run protoc probably needs some documentation. # # This is the path to where # -I="${golang_proto_path}/protobuf" \ local -i ret=0 - status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path}" - echo "debug_run protoc \ + status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} ${mog_input_path}/*.gen.go" + echo "debug_run protoc \ -I=\"${golang_proto_path}\" \ -I=\"${golang_proto_mod_path}\" \ -I=\"${SOURCE_DIR}\" \ @@ -133,13 +122,13 @@ function main { return 1 fi - echo "debug_run protoc \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" + debug_run mog -source ./${mog_input_path} -tags ${GOTAGS} -ignore-package-load-errors + + if test $? -ne 0 + then + err "Failed to generate mog outputs from ${mog_input_path}" + return 1 + fi BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') if test -n "${BUILD_TAGS}" From 0ae60adff99194957aad13b6905997e8eee7a2e0 Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Tue, 29 Mar 2022 17:36:21 -0400 Subject: [PATCH 038/785] website(api-gateway): add consul namespace to helm install (#12644) * website: api-gateway helm install consul namespace To mirror instructions at https://learn.hashicorp.com/tutorials/consul/kubernetes-api-gateway * website(api-gateway): add notes on where to find available versions * website(api-gateway): fixup link to more clearly indicate Consul Helm chart releases * Update website/content/docs/api-gateway/api-gateway-usage.mdx --- website/content/docs/api-gateway/api-gateway-usage.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/content/docs/api-gateway/api-gateway-usage.mdx b/website/content/docs/api-gateway/api-gateway-usage.mdx index 04f8daf5a..ac8474892 100644 --- a/website/content/docs/api-gateway/api-gateway-usage.mdx +++ b/website/content/docs/api-gateway/api-gateway-usage.mdx @@ -7,7 +7,7 @@ description: >- # Consul API Gateway Usage -This topic describes how to use the Consul API Gateway add-on module. It includes instructions for installation and configuration. +This topic describes how to use the Consul API Gateway add-on module. It includes instructions for installation and configuration. ## Requirements @@ -25,7 +25,7 @@ Refer to [Technical Specifications](/docs/api-gateway/tech-specs) for minimum so -1. Create a `values.yaml` file for your Consul API Gateway deployment. Copy the content below into the `values.yaml` file. The `values.yaml` will be used by the Consul Helm chart. See [Helm Chart Configuration - apiGateway](https://www.consul.io/docs/k8s/helm#apigateway) for more available options on how to configure your Consul API Gateway deployment via the Helm chart. +1. Create a `values.yaml` file for your Consul API Gateway deployment. Copy the content below into the `values.yaml` file. The `values.yaml` will be used by the Consul Helm chart. Available versions of the [Consul](https://hub.docker.com/r/hashicorp/consul/tags) and [Consul API Gateway](https://hub.docker.com/r/hashicorp/consul-api-gateway/tags) Docker images can be found on DockerHub, with additional context on version compatibility published in [GitHub releases](https://github.com/hashicorp/consul-api-gateway/releases). See [Helm Chart Configuration - apiGateway](https://www.consul.io/docs/k8s/helm#apigateway) for more available options on how to configure your Consul API Gateway deployment via the Helm chart. @@ -44,12 +44,12 @@ Refer to [Technical Specifications](/docs/api-gateway/tech-specs) for minimum so -1. Install Consul API Gateway using the standard Consul Helm chart and specify the custom values file. +1. Install Consul API Gateway using the standard Consul Helm chart and specify the custom values file. Available versions of the [Consul Helm chart](https://github.com/hashicorp/consul-k8s/releases) can be found in GitHub releases. ```shell-session - $ helm install consul hashicorp/consul --version 0.41.1 --values values.yaml + $ helm install consul hashicorp/consul --version 0.41.1 --values values.yaml --create-namespace --namespace consul ``` @@ -58,7 +58,7 @@ Refer to [Technical Specifications](/docs/api-gateway/tech-specs) for minimum so 1. Verify that the [requirements](#requirements) have been met. 1. Verify that the Consul API Gateway CRDs and controller have been installed and applied (see [Installation](#installation)). -1. Configure the artifacts described below in [Configuration](#configuration). +1. Configure the artifacts described below in [Configuration](#configuration). From 0fd6cdc900205ac36bdafeeee8571223e5790304 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Tue, 29 Mar 2022 18:05:45 -0700 Subject: [PATCH 039/785] introduce EmptyReadRequest for status_endpoint (#12653) Co-authored-by: Daniel Nephin --- agent/consul/stats_fetcher.go | 7 ++++--- agent/consul/status_endpoint.go | 14 +++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/agent/consul/stats_fetcher.go b/agent/consul/stats_fetcher.go index d486ae504..334472be5 100644 --- a/agent/consul/stats_fetcher.go +++ b/agent/consul/stats_fetcher.go @@ -5,11 +5,12 @@ import ( "net" "sync" - "github.com/hashicorp/consul/agent/pool" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" + + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/agent/structs" ) // StatsFetcher has two functions for autopilot. First, lets us fetch all the @@ -42,7 +43,7 @@ func NewStatsFetcher(logger hclog.Logger, pool *pool.ConnPool, datacenter string // RPC to each server, so we let it finish and then clean up the in-flight // tracking. func (f *StatsFetcher) fetch(server *autopilot.Server, replyCh chan *autopilot.ServerStats) { - var args struct{} + var args EmptyReadRequest var reply structs.RaftStats // defer some cleanup to notify everything else that the fetching is no longer occurring diff --git a/agent/consul/status_endpoint.go b/agent/consul/status_endpoint.go index ac0dc0314..b82baf842 100644 --- a/agent/consul/status_endpoint.go +++ b/agent/consul/status_endpoint.go @@ -13,7 +13,7 @@ type Status struct { } // Ping is used to just check for connectivity -func (s *Status) Ping(args struct{}, reply *struct{}) error { +func (s *Status) Ping(args EmptyReadRequest, reply *struct{}) error { return nil } @@ -55,8 +55,16 @@ func (s *Status) Peers(args *structs.DCSpecificRequest, reply *[]string) error { return nil } -// Used by Autopilot to query the raft stats of the local server. -func (s *Status) RaftStats(args struct{}, reply *structs.RaftStats) error { +// EmptyReadRequest implements the interface used by middleware.RequestRecorder +// to communicate properties of requests. +type EmptyReadRequest struct{} + +func (e EmptyReadRequest) IsRead() bool { + return true +} + +// RaftStats is used by Autopilot to query the raft stats of the local server. +func (s *Status) RaftStats(args EmptyReadRequest, reply *structs.RaftStats) error { stats := s.server.raft.Stats() var err error From d4e80b880023516e1a35fdece003b6b5b2eba01f Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Wed, 30 Mar 2022 10:04:18 -0500 Subject: [PATCH 040/785] server: ensure that service-defaults meta is incorporated into the discovery chain response (#12511) Also add a new "Default" field to the discovery chain response to clients --- .changelog/12511.txt | 7 + agent/consul/discovery_chain_endpoint_test.go | 8 +- agent/consul/discoverychain/compile.go | 44 ++++++ agent/consul/discoverychain/compile_test.go | 139 +++++++++++++++--- agent/discovery_chain_endpoint_test.go | 6 +- agent/proxycfg/testing_connect_proxy.go | 2 +- agent/proxycfg/testing_ingress_gateway.go | 8 +- agent/structs/discovery_chain.go | 35 +---- agent/xds/clusters.go | 2 +- agent/xds/endpoints.go | 2 +- agent/xds/listeners.go | 4 +- agent/xds/listeners_ingress.go | 2 +- agent/xds/routes.go | 2 +- api/discovery_chain.go | 8 + api/discovery_chain_test.go | 2 + .../connect/l7-traffic/discovery-chain.mdx | 6 + 16 files changed, 214 insertions(+), 63 deletions(-) create mode 100644 .changelog/12511.txt diff --git a/.changelog/12511.txt b/.changelog/12511.txt new file mode 100644 index 000000000..a7ba31633 --- /dev/null +++ b/.changelog/12511.txt @@ -0,0 +1,7 @@ +```release-note:feature +server: ensure that service-defaults meta is incorporated into the discovery chain response +``` + +```release-note:feature +server: discovery chains now include a response field named "Default" to indicate if they were not constructed from any service-resolver, service-splitter, or service-router config entries +``` diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 174cab742..e875ec25d 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -98,6 +98,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { Datacenter: "dc1", Protocol: "tcp", StartNode: "resolver:web.default.default.dc1", + Default: true, Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:web.default.default.dc1": { Type: structs.DiscoveryGraphNodeTypeResolver, @@ -286,12 +287,7 @@ func TestDiscoveryChainEndpoint_Get_BlockOnNoChange(t *testing.T) { args.QueryOptions.MinQueryIndex = minQueryIndex var out structs.DiscoveryChainResponse - errCh := channelCallRPC(s1, "DiscoveryChain.Get", &args, &out, func() error { - if !out.Chain.IsDefault() { - return fmt.Errorf("expected default chain") - } - return nil - }) + errCh := channelCallRPC(s1, "DiscoveryChain.Get", &args, &out, nil) return &out.QueryMeta, errCh }, func(i int) <-chan error { diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index 4f78eb8bf..0567b8b90 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -161,6 +161,12 @@ type compiler struct { // This is an OUTPUT field. protocol string + // serviceMeta is the Meta field from the service-defaults entry that + // shares a name with this discovery chain. + // + // This is an OUTPUT field. + serviceMeta map[string]string + // startNode is computed inside of assembleChain() // // This is an OUTPUT field. @@ -327,14 +333,47 @@ func (c *compiler) compile() (*structs.CompiledDiscoveryChain, error) { Namespace: c.evaluateInNamespace, Partition: c.evaluateInPartition, Datacenter: c.evaluateInDatacenter, + Default: c.determineIfDefaultChain(), CustomizationHash: customizationHash, Protocol: c.protocol, + ServiceMeta: c.serviceMeta, StartNode: c.startNode, Nodes: c.nodes, Targets: c.loadedTargets, }, nil } +// determineIfDefaultChain returns true if the compiled chain represents no +// routing, no splitting, and only the default resolution. We have to be +// careful here to avoid returning "yep this is default" when the only +// resolver action being applied is redirection to another resolver that is +// default, so we double check the resolver matches the requested resolver. +// +// NOTE: "default chain" mostly means that this is compatible with how things +// worked (roughly) in consul 1.5 pre-discovery chain, not that there are zero +// config entries in play (like service-defaults). +func (c *compiler) determineIfDefaultChain() bool { + if c.startNode == "" || len(c.nodes) == 0 { + return true + } + + node := c.nodes[c.startNode] + if node == nil { + panic("not possible: missing node named '" + c.startNode + "' in chain '" + c.serviceName + "'") + } + + if node.Type != structs.DiscoveryGraphNodeTypeResolver { + return false + } + if !node.Resolver.Default { + return false + } + + target := c.loadedTargets[node.Resolver.Target] + + return target.Service == c.serviceName && target.Namespace == c.evaluateInNamespace && target.Partition == c.evaluateInPartition +} + func (c *compiler) detectCircularReferences() error { var ( todo stringStack @@ -515,6 +554,11 @@ func (c *compiler) assembleChain() error { sid := structs.NewServiceID(c.serviceName, c.GetEnterpriseMeta()) + // Extract the service meta for the service named by this discovery chain. + if serviceDefault := c.entries.GetService(sid); serviceDefault != nil { + c.serviceMeta = serviceDefault.GetMeta() + } + // Check for short circuit path. if len(c.resolvers) == 0 && c.entries.IsChainEmpty() { // Materialize defaults and cache. diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 94029465e..9a3dde647 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -12,14 +12,12 @@ import ( ) type compileTestCase struct { - entries *configentry.DiscoveryChainSet - setup func(req *CompileRequest) - expect *structs.CompiledDiscoveryChain - // expectIsDefault tests behavior of CompiledDiscoveryChain.IsDefault() - expectIsDefault bool - expectCustom bool - expectErr string - expectGraphErr bool + entries *configentry.DiscoveryChainSet + setup func(req *CompileRequest) + expect *structs.CompiledDiscoveryChain + expectCustom bool + expectErr string + expectGraphErr bool } func TestCompile(t *testing.T) { @@ -56,6 +54,8 @@ func TestCompile(t *testing.T) { "loadbalancer splitter and resolver": testcase_LBSplitterAndResolver(), "loadbalancer resolver": testcase_LBResolver(), "service redirect to service with default resolver is not a default chain": testcase_RedirectToDefaultResolverIsNotDefaultChain(), + "service meta projection": testcase_ServiceMetaProjection(), + "service meta projection with redirect": testcase_ServiceMetaProjectionWithRedirect(), "all the bells and whistles": testcase_AllBellsAndWhistles(), "multi dc canary": testcase_MultiDatacenterCanary(), @@ -141,7 +141,6 @@ func TestCompile(t *testing.T) { } require.Equal(t, tc.expect, res) - require.Equal(t, tc.expectIsDefault, res.IsDefault()) } }) } @@ -1429,6 +1428,7 @@ func testcase_DefaultResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "tcp", + Default: true, StartNode: "resolver:main.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { @@ -1446,7 +1446,7 @@ func testcase_DefaultResolver() compileTestCase { "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true} + return compileTestCase{entries: entries, expect: expect} } func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { @@ -1465,6 +1465,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "grpc", + Default: true, StartNode: "resolver:main.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { @@ -1485,11 +1486,69 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { }), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true} + return compileTestCase{entries: entries, expect: expect} } -func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { +func testcase_ServiceMetaProjection() compileTestCase { entries := newEntries() + entries.AddServices( + &structs.ServiceConfigEntry{ + Kind: structs.ServiceDefaults, + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "abc": "123", + }, + }, + ) + expect := &structs.CompiledDiscoveryChain{ + Protocol: "tcp", + Default: true, + ServiceMeta: map[string]string{ + "foo": "bar", + "abc": "123", + }, + StartNode: "resolver:main.default.default.dc1", + Nodes: map[string]*structs.DiscoveryGraphNode{ + "resolver:main.default.default.dc1": { + Type: structs.DiscoveryGraphNodeTypeResolver, + Name: "main.default.default.dc1", + Resolver: &structs.DiscoveryResolver{ + Default: true, + ConnectTimeout: 5 * time.Second, + Target: "main.default.default.dc1", + }, + }, + }, + Targets: map[string]*structs.DiscoveryTarget{ + "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + }, + } + + return compileTestCase{entries: entries, expect: expect} +} + +func testcase_ServiceMetaProjectionWithRedirect() compileTestCase { + entries := newEntries() + entries.AddServices( + &structs.ServiceConfigEntry{ + Kind: structs.ServiceDefaults, + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "abc": "123", + }, + }, + &structs.ServiceConfigEntry{ + Kind: structs.ServiceDefaults, + Name: "other", + Meta: map[string]string{ + "zim": "gir", + "abc": "456", + "xyz": "999", + }, + }, + ) entries.AddResolvers( &structs.ServiceResolverConfigEntry{ Kind: structs.ServiceResolver, @@ -1501,7 +1560,12 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { ) expect := &structs.CompiledDiscoveryChain{ - Protocol: "tcp", + Protocol: "tcp", + ServiceMeta: map[string]string{ + // Note this is main's meta, not other's. + "foo": "bar", + "abc": "123", + }, StartNode: "resolver:other.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:other.default.default.dc1": { @@ -1519,7 +1583,42 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: false /*being explicit here because this is the whole point of this test*/} + return compileTestCase{entries: entries, expect: expect} +} + +func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { + entries := newEntries() + entries.AddResolvers( + &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: "main", + Redirect: &structs.ServiceResolverRedirect{ + Service: "other", + }, + }, + ) + + expect := &structs.CompiledDiscoveryChain{ + Protocol: "tcp", + StartNode: "resolver:other.default.default.dc1", + Default: false, /*being explicit here because this is the whole point of this test*/ + Nodes: map[string]*structs.DiscoveryGraphNode{ + "resolver:other.default.default.dc1": { + Type: structs.DiscoveryGraphNodeTypeResolver, + Name: "other.default.default.dc1", + Resolver: &structs.DiscoveryResolver{ + Default: true, + ConnectTimeout: 5 * time.Second, + Target: "other.default.default.dc1", + }, + }, + }, + Targets: map[string]*structs.DiscoveryTarget{ + "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + }, + } + + return compileTestCase{entries: entries, expect: expect} } func testcase_Resolve_WithDefaultSubset() compileTestCase { @@ -1570,6 +1669,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "tcp", + Default: true, StartNode: "resolver:main.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { @@ -1589,7 +1689,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase { }), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true} + return compileTestCase{entries: entries, expect: expect} } func testcase_Resolver_ExternalSNI_FailoverNotAllowed() compileTestCase { @@ -2249,6 +2349,7 @@ func testcase_ResolverProtocolOverride() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http2", + Default: true, StartNode: "resolver:main.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { @@ -2266,7 +2367,7 @@ func testcase_ResolverProtocolOverride() compileTestCase { "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true, + return compileTestCase{entries: entries, expect: expect, expectCustom: true, setup: func(req *CompileRequest) { req.OverrideProtocol = "http2" @@ -2282,6 +2383,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http2", + Default: true, StartNode: "resolver:main.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { @@ -2299,7 +2401,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase { "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true, + return compileTestCase{entries: entries, expect: expect, setup: func(req *CompileRequest) { req.OverrideProtocol = "http2" }, @@ -2320,6 +2422,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "tcp", StartNode: "resolver:main.default.default.dc1", + Default: true, Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:main.default.default.dc1": { Type: structs.DiscoveryGraphNodeTypeResolver, @@ -2336,7 +2439,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase { "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), }, } - return compileTestCase{entries: entries, expect: expect, expectIsDefault: true, + return compileTestCase{entries: entries, expect: expect, expectCustom: true, setup: func(req *CompileRequest) { req.OverrideProtocol = "tcp" diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 78fcfe303..3db87ba52 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -8,11 +8,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" - "github.com/stretchr/testify/require" ) func TestDiscoveryChainRead(t *testing.T) { @@ -79,6 +80,7 @@ func TestDiscoveryChainRead(t *testing.T) { Partition: "default", Datacenter: "dc1", Protocol: "tcp", + Default: true, StartNode: "resolver:web.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ "resolver:web.default.default.dc1": { @@ -122,6 +124,7 @@ func TestDiscoveryChainRead(t *testing.T) { Namespace: "default", Partition: "default", Datacenter: "dc2", + Default: true, Protocol: "tcp", StartNode: "resolver:web.default.default.dc2", Nodes: map[string]*structs.DiscoveryGraphNode{ @@ -175,6 +178,7 @@ func TestDiscoveryChainRead(t *testing.T) { Namespace: "default", Partition: "default", Datacenter: "dc1", + Default: true, Protocol: "tcp", StartNode: "resolver:web.default.default.dc1", Nodes: map[string]*structs.DiscoveryGraphNode{ diff --git a/agent/proxycfg/testing_connect_proxy.go b/agent/proxycfg/testing_connect_proxy.go index b23f02585..35a8e6011 100644 --- a/agent/proxycfg/testing_connect_proxy.go +++ b/agent/proxycfg/testing_connect_proxy.go @@ -16,7 +16,7 @@ func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUp // no entries implies we'll get a default chain dbChain := discoverychain.TestCompileConfigEntries(t, "db", "default", "default", "dc1", connect.TestClusterID+".consul", nil) - assert.True(t, dbChain.IsDefault()) + assert.True(t, dbChain.Default) var ( upstreams = structs.TestUpstreams(t) diff --git a/agent/proxycfg/testing_ingress_gateway.go b/agent/proxycfg/testing_ingress_gateway.go index c3360fd96..7686993ba 100644 --- a/agent/proxycfg/testing_ingress_gateway.go +++ b/agent/proxycfg/testing_ingress_gateway.go @@ -685,10 +685,10 @@ func TestConfigSnapshotIngress_HTTPMultipleServices(t testing.T) *ConfigSnapshot quxChain = discoverychain.TestCompileConfigEntries(t, "qux", "default", "default", "dc1", connect.TestClusterID+".consul", nil, entries...) ) - require.False(t, fooChain.IsDefault()) - require.False(t, barChain.IsDefault()) - require.True(t, bazChain.IsDefault()) - require.True(t, quxChain.IsDefault()) + require.False(t, fooChain.Default) + require.False(t, barChain.Default) + require.True(t, bazChain.Default) + require.True(t, quxChain.Default) return TestConfigSnapshotIngressGateway(t, false, "http", "default", nil, func(entry *structs.IngressGatewayConfigEntry) { entry.Listeners = []structs.IngressListener{ diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 86c24515d..0dd3010e7 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -26,9 +26,17 @@ type CompiledDiscoveryChain struct { // non-customized versions. CustomizationHash string `json:",omitempty"` + // Default indicates if this discovery chain is based on no + // service-resolver, service-splitter, or service-router config entries. + Default bool `json:",omitempty"` + // Protocol is the overall protocol shared by everything in the chain. Protocol string `json:",omitempty"` + // ServiceMeta is the metadata from the underlying service-defaults config + // entry for the service named ServiceName. + ServiceMeta map[string]string `json:",omitempty"` + // StartNode is the first key into the Nodes map that should be followed // when walking the discovery chain. StartNode string `json:",omitempty"` @@ -62,33 +70,6 @@ func (c *CompiledDiscoveryChain) WillFailoverThroughMeshGateway(node *DiscoveryG return false } -// IsDefault returns true if the compiled chain represents no routing, no -// splitting, and only the default resolution. We have to be careful here to -// avoid returning "yep this is default" when the only resolver action being -// applied is redirection to another resolver that is default, so we double -// check the resolver matches the requested resolver. -func (c *CompiledDiscoveryChain) IsDefault() bool { - if c.StartNode == "" || len(c.Nodes) == 0 { - return true - } - - node := c.Nodes[c.StartNode] - if node == nil { - panic("not possible: missing node named '" + c.StartNode + "' in chain '" + c.ServiceName + "'") - } - - if node.Type != DiscoveryGraphNodeTypeResolver { - return false - } - if !node.Resolver.Default { - return false - } - - target := c.Targets[node.Resolver.Target] - - return target.Service == c.ServiceName && target.Namespace == c.Namespace && target.Partition == c.Partition -} - // ID returns an ID that encodes the service, namespace, partition, and datacenter. // This ID allows us to compare a discovery chain target to the chain upstream itself. func (c *CompiledDiscoveryChain) ID() string { diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 283df4125..716c320d3 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -612,7 +612,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( var escapeHatchCluster *envoy_cluster_v3.Cluster if cfg.EnvoyClusterJSON != "" { - if chain.IsDefault() { + if chain.Default { // If you haven't done anything to setup the discovery chain, then // you can use the envoy_cluster_json escape hatch. escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index 9981dc940..b1a38f0cd 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -393,7 +393,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( var escapeHatchCluster *envoy_cluster_v3.Cluster if cfg.EnvoyClusterJSON != "" { - if chain.IsDefault() { + if chain.Default { // If you haven't done anything to setup the discovery chain, then // you can use the envoy_cluster_json escape hatch. escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 68ab9c1bf..2a671c40d 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -115,7 +115,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. } // RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain. - useRDS := chain.Protocol != "tcp" && !chain.IsDefault() + useRDS := chain.Protocol != "tcp" && !chain.Default var clusterName string if !useRDS { @@ -1303,7 +1303,7 @@ func (s *ResourceGenerator) getAndModifyUpstreamConfigForListener( if u != nil { configMap = u.Config } - if chain == nil || chain.IsDefault() { + if chain == nil || chain.Default { cfg, err = structs.ParseUpstreamConfigNoDefaults(configMap) if err != nil { // Don't hard fail on a config typo, just warn. The parse func returns diff --git a/agent/xds/listeners_ingress.go b/agent/xds/listeners_ingress.go index 3ab7de3c0..cd2023d2b 100644 --- a/agent/xds/listeners_ingress.go +++ b/agent/xds/listeners_ingress.go @@ -48,7 +48,7 @@ func (s *ResourceGenerator) makeIngressGatewayListeners(address string, cfgSnap // RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain. // TODO(freddy): Why can the protocol of the listener be overridden here? - useRDS := cfg.Protocol != "tcp" && !chain.IsDefault() + useRDS := cfg.Protocol != "tcp" && !chain.Default var clusterName string if !useRDS { diff --git a/agent/xds/routes.go b/agent/xds/routes.go index e6ed55df8..0a772d3f5 100644 --- a/agent/xds/routes.go +++ b/agent/xds/routes.go @@ -45,7 +45,7 @@ func (s *ResourceGenerator) routesFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) func (s *ResourceGenerator) routesForConnectProxy(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) { var resources []proto.Message for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { - if chain.IsDefault() { + if chain.Default { continue } diff --git a/api/discovery_chain.go b/api/discovery_chain.go index 29bda8591..d198b2bb5 100644 --- a/api/discovery_chain.go +++ b/api/discovery_chain.go @@ -109,9 +109,17 @@ type CompiledDiscoveryChain struct { // non-customized versions. CustomizationHash string + // Default indicates if this discovery chain is based on no + // service-resolver, service-splitter, or service-router config entries. + Default bool + // Protocol is the overall protocol shared by everything in the chain. Protocol string + // ServiceMeta is the metadata from the underlying service-defaults config + // entry for the service named ServiceName. + ServiceMeta map[string]string + // StartNode is the first key into the Nodes map that should be followed // when walking the discovery chain. StartNode string diff --git a/api/discovery_chain_test.go b/api/discovery_chain_test.go index 99f97fa9d..049ce3963 100644 --- a/api/discovery_chain_test.go +++ b/api/discovery_chain_test.go @@ -32,6 +32,7 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { Namespace: "default", Datacenter: "dc1", Protocol: "tcp", + Default: true, StartNode: "resolver:web.default.default.dc1", Nodes: map[string]*DiscoveryGraphNode{ "resolver:web.default.default.dc1": { @@ -72,6 +73,7 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { Namespace: "default", Datacenter: "dc2", Protocol: "tcp", + Default: true, StartNode: "resolver:web.default.default.dc2", Nodes: map[string]*DiscoveryGraphNode{ "resolver:web.default.default.dc2": { diff --git a/website/content/docs/connect/l7-traffic/discovery-chain.mdx b/website/content/docs/connect/l7-traffic/discovery-chain.mdx index 807c92b98..41a32a9b5 100644 --- a/website/content/docs/connect/l7-traffic/discovery-chain.mdx +++ b/website/content/docs/connect/l7-traffic/discovery-chain.mdx @@ -121,9 +121,15 @@ resolved by name using the [`Targets`](#targets) field. balancer data plane objects to avoid sharing customized and non-customized versions. +- `Default` `(bool: )` - Indicates if this discovery chain is based on no + `service-resolver`, `service-splitter`, or `service-router` config entries. + - `Protocol` `(string)` - The overall protocol shared by everything in the chain. +- `ServiceMeta` `(map)` - The metadata from the underlying `service-defaults` config + entry for the service named `ServiceName`. + - `StartNode` `(string)` - The first key into the `Nodes` map that should be followed when traversing the discovery chain. From 232da6e8f42b9805385f1bbccc2fc00c9db8e6d6 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Wed, 30 Mar 2022 10:08:17 -0500 Subject: [PATCH 041/785] build: auto install correct version of protoc locally (#12651) --- .circleci/config.yml | 9 --------- .gitignore | 3 ++- GNUmakefile | 32 +++++++++++++++++++----------- build-support/scripts/proto-gen.sh | 21 ++++++++++++++++++-- 4 files changed, 41 insertions(+), 24 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 80b19a03e..c39652d7b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -226,15 +226,6 @@ jobs: <<: *ENVIRONMENT steps: - checkout - - run: - name: Install protobuf - command: | - protoc_version="$(make print-PROTOC_VERSION)" - wget https://github.com/protocolbuffers/protobuf/releases/download/v${protoc_version}/protoc-${protoc_version}-linux-x86_64.zip - sudo unzip -d /usr/local protoc-*.zip - sudo chmod +x /usr/local/bin/protoc - sudo chmod -R a+Xr /usr/local/include/google/ - rm protoc-*.zip - run: name: Install protobuf command: make proto-tools diff --git a/.gitignore b/.gitignore index 204a06653..e67312cbe 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.test .envrc .gotools +.protobuf .vagrant/ /pkg bin/ @@ -59,4 +60,4 @@ override.tf.json # Ignore CLI configuration files .terraformrc -terraform.rc \ No newline at end of file +terraform.rc diff --git a/GNUmakefile b/GNUmakefile index 097e22539..120a2dfd8 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -12,6 +12,11 @@ GOTOOLS = \ github.com/hashicorp/lint-consul-retry@master PROTOC_VERSION=3.12.3 +PROTOC_OS := $(shell if test "$(uname)" == "Darwin"; then echo osx; else echo linux; fi) +PROTOC_ZIP := protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-x86_64.zip +PROTOC_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/$(PROTOC_ZIP) +PROTOC_ROOT := .protobuf/protoc-$(PROTOC_OS)-$(PROTOC_VERSION) +PROTOC_BIN := $(PROTOC_ROOT)/bin/protoc GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') GOPROTOTOOLS = \ github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ @@ -33,7 +38,7 @@ GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true GIT_IMPORT=github.com/hashicorp/consul/version GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -PROTOFILES?=$(shell find . -name '*.proto' | grep -v 'vendor/') +PROTOFILES?=$(shell find . -name '*.proto' | grep -v 'vendor/' | grep -v '.protobuf' ) PROTOGOFILES=$(PROTOFILES:.proto=.pb.go) PROTOGOBINFILES=$(PROTOFILES:.proto=.pb.binary.go) @@ -353,22 +358,25 @@ else @go test -v ./agent -run Vault endif -.PHONY: protoc-check -protoc-check: - $(info checking protocol buffer compiler version (expect: $(PROTOC_VERSION))) - @if ! command -v protoc &>/dev/null; then \ - echo "ERROR: protoc is not installed; please install version $(PROTOC_VERSION)" >&2 ; \ - exit 1 ; \ - fi - @if [[ "$$(protoc --version | cut -d' ' -f2)" != "$(PROTOC_VERSION)" ]]; then \ - echo "ERROR: protoc version $(PROTOC_VERSION) is required" >&2 ; \ +.PHONY: protoc-install +protoc-install: + $(info locally installing protocol buffer compiler version if needed (expect: $(PROTOC_VERSION))) + @if [[ ! -x $(PROTOC_ROOT)/bin/protoc ]]; then \ + mkdir -p .protobuf/tmp ; \ + if [[ ! -f .protobuf/tmp/$(PROTOC_ZIP) ]]; then \ + ( cd .protobuf/tmp && curl -sSL "$(PROTOC_URL)" -o "$(PROTOC_ZIP)" ) ; \ + fi ; \ + mkdir -p $(PROTOC_ROOT) ; \ + unzip -d $(PROTOC_ROOT) .protobuf/tmp/$(PROTOC_ZIP) ; \ + chmod -R a+Xr $(PROTOC_ROOT) ; \ + chmod +x $(PROTOC_ROOT)/bin/protoc ; \ fi -proto: protoc-check $(PROTOGOFILES) $(PROTOGOBINFILES) +proto: protoc-install $(PROTOGOFILES) $(PROTOGOBINFILES) @echo "Generated all protobuf Go files" %.pb.go %.pb.binary.go: %.proto - @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc "$<" + @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --protoc-bin "$(PROTOC_BIN)" "$<" # utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') print-% : ; @echo $($*) diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh index c1dedc52e..022b2ff44 100755 --- a/build-support/scripts/proto-gen.sh +++ b/build-support/scripts/proto-gen.sh @@ -23,6 +23,7 @@ Description: generated code. Options: + --protoc-bin Path to protoc. --import-replace Replace imports of google types with those from the protobuf repo. --grpc Enable the gRPC plugin -h | --help Print this help text. @@ -38,6 +39,7 @@ function err_usage { function main { local -i grpc=0 local proto_path= + local protoc_bin= while test $# -gt 0 do @@ -50,6 +52,10 @@ function main { grpc=1 shift ;; + --protoc-bin ) + protoc_bin="$2" + shift 2 + ;; * ) proto_path="$1" shift @@ -63,6 +69,17 @@ function main { return 1 fi + if test -z "${protoc_bin}" + then + protoc_bin="$(command -v protoc)" + if test -z "${protoc_bin}" + then + err_usage "ERROR: no proto-bin specified and protoc could not be discovered" + return 1 + fi + fi + + go mod download local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) @@ -92,14 +109,14 @@ function main { # -I="${golang_proto_path}/protobuf" \ local -i ret=0 status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} ${mog_input_path}/*.gen.go" - echo "debug_run protoc \ + echo "debug_run ${protoc_bin} \ -I=\"${golang_proto_path}\" \ -I=\"${golang_proto_mod_path}\" \ -I=\"${SOURCE_DIR}\" \ --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ --go-binary_out=\"${SOURCE_DIR}\" \ \"${proto_path}\"" - debug_run protoc \ + debug_run ${protoc_bin} \ -I="${golang_proto_path}" \ -I="${golang_proto_mod_path}" \ -I="${SOURCE_DIR}" \ From c5a2de0c9b9b594efc3d78cceb85998fc1cb91b2 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Wed, 30 Mar 2022 10:37:44 -0500 Subject: [PATCH 042/785] need two dollar signs for a shell variable in makefiles (#12657) --- GNUmakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GNUmakefile b/GNUmakefile index 120a2dfd8..0649580b3 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -12,7 +12,7 @@ GOTOOLS = \ github.com/hashicorp/lint-consul-retry@master PROTOC_VERSION=3.12.3 -PROTOC_OS := $(shell if test "$(uname)" == "Darwin"; then echo osx; else echo linux; fi) +PROTOC_OS := $(shell if test "$$(uname)" == "Darwin"; then echo osx; else echo linux; fi) PROTOC_ZIP := protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-x86_64.zip PROTOC_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/$(PROTOC_ZIP) PROTOC_ROOT := .protobuf/protoc-$(PROTOC_OS)-$(PROTOC_VERSION) From 8d51e22d269535ca12136ae494bfcdb870607e31 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 30 Mar 2022 11:38:44 -0400 Subject: [PATCH 043/785] Update raft-boltdb to pull in new writeCapacity metric (#12646) --- .changelog/12646.txt | 3 +++ go.mod | 2 +- go.sum | 4 ++-- website/content/docs/agent/telemetry.mdx | 24 ++++++++++++------------ 4 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 .changelog/12646.txt diff --git a/.changelog/12646.txt b/.changelog/12646.txt new file mode 100644 index 000000000..1dddfc115 --- /dev/null +++ b/.changelog/12646.txt @@ -0,0 +1,3 @@ +```release-note:improvement +metrics: The `consul.raft.boltdb.writeCapacity` metric was added and indicates a theoretical number of writes/second that can be performed to Consul. +``` diff --git a/go.mod b/go.mod index e438f1bf2..9f43af2bb 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/hashicorp/raft v1.3.6 github.com/hashicorp/raft-autopilot v0.1.5 github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect - github.com/hashicorp/raft-boltdb/v2 v2.2.0 + github.com/hashicorp/raft-boltdb/v2 v2.2.2 github.com/hashicorp/serf v0.9.7 github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 diff --git a/go.sum b/go.sum index 688dd2b8f..972a1634c 100644 --- a/go.sum +++ b/go.sum @@ -304,8 +304,8 @@ github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pN github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 h1:Ye8SofeDHJzu9xvvaMmpMkqHELWW7rTcXwdUR0CWW48= github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= -github.com/hashicorp/raft-boltdb/v2 v2.2.0 h1:/CVN9LSAcH50L3yp2TsPFIpeyHn1m3VF6kiutlDE3Nw= -github.com/hashicorp/raft-boltdb/v2 v2.2.0/go.mod h1:SgPUD5TP20z/bswEr210SnkUFvQP/YjKV95aaiTbeMQ= +github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw= +github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 029ff9bcb..1c02cebf0 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -273,9 +273,10 @@ This metric should be monitored to ensure that the license doesn't expire to pre | Metric Name | Description | Unit | Type | | :-------------------------------- | :--------------------------------------------------------------- | :---- | :---- | -| `consul.raft.boltdb.freelistBytes` | Represents the number of bytes necessary to encode the freelist metadata. When [`raft_boltdb.NoFreelistSync`](/docs/agent/options#NoFreelistSync) is set to `false` these metadata bytes must also be written to disk for each committed log. | bytes | gauge | -| `consul.raft.boltdb.logsPerBatch` | Measures the number of logs being written per batch to the db. | logs | sample | -| `consul.raft.boltdb.storeLogs` | Measures the amount of time spent writing logs to the db. | ms | timer | +| `consul.raft.boltdb.freelistBytes` | Represents the number of bytes necessary to encode the freelist metadata. When [`raft_boltdb.NoFreelistSync`](/docs/agent/options#NoFreelistSync) is set to `false` these metadata bytes must also be written to disk for each committed log. | bytes | gauge | +| `consul.raft.boltdb.logsPerBatch` | Measures the number of logs being written per batch to the db. | logs | sample | +| `consul.raft.boltdb.storeLogs` | Measures the amount of time spent writing logs to the db. | ms | timer | +| `consul.raft.boltdb.writeCapacity` | Theoretical write capacity in terms of the number of logs that can be written per second. Each sample outputs what the capacity would be if future batched log write operations were similar to this one. This similarity encompasses 4 things: batch size, byte size, disk performance and boltdb performance. While none of these will be static and its highly likely individual samples of this metric will vary, aggregating this metric over a larger time window should provide a decent picture into how this BoltDB store can perform | logs/second | sample | ** Requirements: ** @@ -293,15 +294,13 @@ upper limit to the throughput of write operations within Consul. In Consul each write operation will turn into a single Raft log to be committed. Raft will process these logs and store them within Bolt DB in batches. Each call to store logs within Bolt DB is measured to record how long -it took as well as how many logs were contained in the batch. Writing logs is this fashion is serialized so that -a subsequent log storage operation can only be started after the previous one completed. Therefore the maximum number -of log storage operations that can be performed each second can be calculated with the following equation: -`(1000 ms) / (consul.raft.boltdb.storeLogs ms/op)`. From there we can extrapolate the maximum number of Consul writes -per second by multiplying that value by the `consul.raft.boltdb.logsPerBatch` metric's value. When log storage -operations are becoming slower you may not see an immediate decrease in write throughput to Consul due to increased -batch sizes of the each operation. However, the max batch size allowed is 64 logs. Therefore if the `logsPerBatch` -metric is near 64 and the `storeLogs` metric is seeing increased time to write each batch to disk, then it is likely -that increased write latencies and other errors may occur. +it took as well as how many logs were contained in the batch. Writing logs in this fashion is serialized so that +a subsequent log storage operation can only be started after the previous one completed. The maximum number +of log storage operations that can be performed each second is represented with the `consul.raft.boltdb.writeCapacity` +metric. When log storage operations are becoming slower you may not see an immediate decrease in write capacity +due to increased batch sizes of the each operation. However, the max batch size allowed is 64 logs. Therefore if +the `logsPerBatch` metric is near 64 and the `storeLogs` metric is seeing increased time to write each batch to disk, +then it is likely that increased write latencies and other errors may occur. There can be a number of potential issues that can cause this. Often times it could be performance of the underlying disks that is the issue. Other times it may be caused by Bolt DB behavior. Bolt DB keeps track of free space within @@ -421,6 +420,7 @@ These metrics are used to monitor the health of the Consul servers. | `consul.raft.boltdb.txstats.split` | Counts the number of nodes split in the db since Consul was started. | splits | counter | | `consul.raft.boltdb.txstats.write` | Counts the number of writes to the db since Consul was started. | writes | counter | | `consul.raft.boltdb.txstats.writeTime` | Measures the amount of time spent performing writes to the db. | ms | timer | +| `consul.raft.boltdb.writeCapacity` | Theoretical write capacity in terms of the number of logs that can be written per second. Each sample outputs what the capacity would be if future batched log write operations were similar to this one. This similarity encompasses 4 things: batch size, byte size, disk performance and boltdb performance. While none of these will be static and its highly likely individual samples of this metric will vary, aggregating this metric over a larger time window should provide a decent picture into how this BoltDB store can perform | logs/second | sample | | `consul.raft.commitNumLogs` | Measures the count of logs processed for application to the FSM in a single batch. | logs | gauge | | `consul.raft.commitTime` | Measures the time it takes to commit a new entry to the Raft log on the leader. | ms | timer | | `consul.raft.fsm.lastRestoreDuration` | Measures the time taken to restore the FSM from a snapshot on an agent restart or from the leader calling installSnapshot. This is a gauge that holds it's value since most servers only restore during restarts which are typically infrequent. | ms | gauge | From 91a493efe9cc3a95850dce1512ce498be756759f Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 30 Mar 2022 12:51:56 -0400 Subject: [PATCH 044/785] Bump go-control-plane * `go get cloud.google.com/go@v0.59.0` * `go get github.com/envoyproxy/go-control-plane@v0.9.9` * `make envoy-library` * Bumpprotoc to 3.15.8 --- GNUmakefile | 4 +- agent/auto-config/mock_test.go | 5 + agent/config/runtime_test.go | 13 +- agent/consul/auto_config_endpoint_test.go | 77 +- agent/consul/state/catalog_events_test.go | 14 +- agent/consul/state/connect_ca_test.go | 3 +- agent/grpc/private/handler_test.go | 2 +- .../private/internal/testservice/simple.pb.go | 288 ++-- .../services/subscribe/subscribe_test.go | 29 +- agent/grpc/private/stats_test.go | 12 +- agent/rpcclient/health/view.go | 6 +- agent/rpcclient/health/view_test.go | 24 +- agent/structs/structs.go | 9 +- agent/submatview/materializer.go | 6 +- agent/submatview/store_test.go | 4 +- agent/xds/clusters.go | 26 +- ...th-chain-and-overrides.envoy-1-20-x.golden | 9 +- ...grpc-new-cluster-http1.envoy-1-20-x.golden | 9 +- ...aths-new-cluster-http2.envoy-1-20-x.golden | 9 +- ...ss-gateway-no-services.envoy-1-20-x.golden | 2 - ...sh-gateway-no-services.envoy-1-20-x.golden | 2 - ...ng-gateway-no-services.envoy-1-20-x.golden | 2 - ...ss-gateway-no-services.envoy-1-20-x.golden | 2 - ...sh-gateway-no-services.envoy-1-20-x.golden | 2 - ...ng-gateway-no-services.envoy-1-20-x.golden | 2 - ...ss-gateway-no-services.envoy-1-20-x.golden | 2 - .../routes/defaults.envoy-1-20-x.golden | 2 - ...ress-defaults-no-chain.envoy-1-20-x.golden | 2 - ...ith-chain-external-sni.envoy-1-20-x.golden | 2 - .../ingress-with-chain.envoy-1-20-x.golden | 2 - agent/xds/xds_protocol_helpers_test.go | 23 +- agent/xds/z_xds_packages.go | 129 +- .../scripts/envoy-library-references.sh | 4 +- build-support/scripts/proto-gen.sh | 6 +- connect/tls_test.go | 10 +- go.mod | 15 +- go.sum | 250 ++- proto/pbacl/acl.pb.go | 183 ++- proto/pbautoconf/auto_config.pb.go | 359 ++-- proto/pbcommon/common.go | 12 +- proto/pbcommon/common.pb.go | 871 ++++++---- proto/pbconfig/config.pb.go | 1133 ++++++++----- proto/pbconnect/connect.pb.go | 645 +++++--- proto/pbservice/healthcheck.pb.go | 1120 ++++++++----- proto/pbservice/ids_test.go | 14 +- proto/pbservice/node.pb.go | 643 +++++--- proto/pbservice/service.pb.go | 1441 +++++++++++------ proto/pbsubscribe/subscribe.pb.go | 744 ++++++--- 48 files changed, 5269 insertions(+), 2904 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 0649580b3..873d1ad60 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -11,7 +11,7 @@ GOTOOLS = \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ github.com/hashicorp/lint-consul-retry@master -PROTOC_VERSION=3.12.3 +PROTOC_VERSION=3.15.8 PROTOC_OS := $(shell if test "$$(uname)" == "Darwin"; then echo osx; else echo linux; fi) PROTOC_ZIP := protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-x86_64.zip PROTOC_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/$(PROTOC_ZIP) @@ -22,7 +22,7 @@ GOPROTOTOOLS = \ github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ github.com/hashicorp/protoc-gen-go-binary@master \ github.com/favadi/protoc-go-inject-tag@v1.3.0 \ - github.com/hashicorp/mog@v0.1.1 + github.com/hashicorp/mog@v0.1.2 GOTAGS ?= GOPATH=$(shell go env GOPATH) diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go index 49d3ed29e..45fd42ef4 100644 --- a/agent/auto-config/mock_test.go +++ b/agent/auto-config/mock_test.go @@ -276,6 +276,11 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok for _, root := range indexedRoots.Roots { pems = append(pems, root.RootCert) } + for _, root := range indexedRoots.Roots { + if len(root.IntermediateCerts) == 0 { + root.IntermediateCerts = nil + } + } // we should update the TLS configurator with the proper certs m.tlsCfg.On("UpdateAutoTLS", diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index b239b1ed4..d74650c07 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -18,7 +18,6 @@ import ( "time" "github.com/armon/go-metrics/prometheus" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" @@ -30,6 +29,7 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -5604,7 +5604,7 @@ func (tc testCase) run(format string, dataDir string) func(t *testing.T) { expected.ACLResolverSettings.NodeName = expected.NodeName expected.ACLResolverSettings.EnterpriseMeta = *structs.NodeEnterpriseMetaInPartition(expected.PartitionOrDefault()) - assertDeepEqual(t, expected, actual, cmpopts.EquateEmpty()) + prototest.AssertDeepEqual(t, expected, actual, cmpopts.EquateEmpty()) } } @@ -5617,13 +5617,6 @@ func runCase(t *testing.T, name string, fn func(t *testing.T)) { }) } -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } -} - func TestLoad_InvalidConfigFormat(t *testing.T) { _, err := Load(LoadOpts{ConfigFormat: "yaml"}) require.Error(t, err) @@ -6432,7 +6425,7 @@ func TestLoad_FullConfig(t *testing.T) { opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn")) r, err := Load(opts) require.NoError(t, err) - assertDeepEqual(t, expected, r.RuntimeConfig) + prototest.AssertDeepEqual(t, expected, r.RuntimeConfig) require.ElementsMatch(t, expectedWarns, r.Warnings, "Warnings: %#v", r.Warnings) }) } diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index f81461bbb..676b126fd 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/consul/proto/pbautoconf" "github.com/hashicorp/consul/proto/pbconfig" "github.com/hashicorp/consul/proto/pbconnect" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -213,8 +214,8 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { // ------------------------------------------------------------------------- type testCase struct { - request pbautoconf.AutoConfigRequest - expected pbautoconf.AutoConfigResponse + request *pbautoconf.AutoConfigRequest + expected *pbautoconf.AutoConfigResponse patchResponse func(t *testing.T, srv *Server, resp *pbautoconf.AutoConfigResponse) err string } @@ -223,13 +224,13 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { cases := map[string]testCase{ "wrong-datacenter": { - request: pbautoconf.AutoConfigRequest{ + request: &pbautoconf.AutoConfigRequest{ Datacenter: "no-such-dc", }, err: `invalid datacenter "no-such-dc" - agent auto configuration cannot target a remote datacenter`, }, "unverifiable": { - request: pbautoconf.AutoConfigRequest{ + request: &pbautoconf.AutoConfigRequest{ Node: "test-node", // this is signed using an incorrect private key JWT: signJWTWithStandardClaims(t, altpriv, map[string]interface{}{"consul_node_name": "test-node"}), @@ -237,14 +238,14 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { err: "Permission denied: Failed JWT authorization: no known key successfully validated the token signature", }, "claim-assertion-failed": { - request: pbautoconf.AutoConfigRequest{ + request: &pbautoconf.AutoConfigRequest{ Node: "test-node", JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"wrong_claim": "test-node"}), }, err: "Permission denied: Failed JWT claim assertion", }, "bad-csr-id": { - request: pbautoconf.AutoConfigRequest{ + request: &pbautoconf.AutoConfigRequest{ Node: "test-node", JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"consul_node_name": "test-node"}), CSR: altCSR, @@ -252,12 +253,12 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { err: "Spiffe ID agent name (alt) of the certificate signing request is not for the correct node (test-node)", }, "good": { - request: pbautoconf.AutoConfigRequest{ + request: &pbautoconf.AutoConfigRequest{ Node: "test-node", JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"consul_node_name": "test-node"}), CSR: csr, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ CARoots: pbroots, ExtraCACertificates: []string{cacert}, Config: &pbconfig.Config{ @@ -323,16 +324,16 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { for testName, tcase := range cases { t.Run(testName, func(t *testing.T) { - var reply pbautoconf.AutoConfigResponse - err := msgpackrpc.CallWithCodec(codec, "AutoConfig.InitialConfiguration", &tcase.request, &reply) + reply := &pbautoconf.AutoConfigResponse{} + err := msgpackrpc.CallWithCodec(codec, "AutoConfig.InitialConfiguration", &tcase.request, reply) if tcase.err != "" { testutil.RequireErrorContains(t, err, tcase.err) } else { require.NoError(t, err) if tcase.patchResponse != nil { - tcase.patchResponse(t, s, &reply) + tcase.patchResponse(t, s, reply) } - require.Equal(t, tcase.expected, reply) + prototest.AssertDeepEqual(t, tcase.expected, reply) } }) } @@ -342,7 +343,7 @@ func TestAutoConfig_baseConfig(t *testing.T) { type testCase struct { serverConfig Config opts AutoConfigOptions - expected pbautoconf.AutoConfigResponse + expected *pbautoconf.AutoConfigResponse err string } @@ -356,7 +357,7 @@ func TestAutoConfig_baseConfig(t *testing.T) { NodeName: "lBdc0lsH", SegmentName: "HZiwlWpi", }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ Datacenter: "oSWzfhnU", PrimaryDatacenter: "53XO9mx4", @@ -380,8 +381,8 @@ func TestAutoConfig_baseConfig(t *testing.T) { config: &tcase.serverConfig, } - actual := pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} - err := ac.baseConfig(tcase.opts, &actual) + actual := &pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} + err := ac.baseConfig(tcase.opts, actual) if tcase.err == "" { require.NoError(t, err) require.Equal(t, tcase.expected, actual) @@ -403,7 +404,7 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { type testCase struct { tlsConfig tlsutil.Config - expected pbautoconf.AutoConfigResponse + expected *pbautoconf.AutoConfigResponse } cases := map[string]testCase{ @@ -417,7 +418,7 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ TLS: &pbconfig.TLS{ VerifyOutgoing: true, @@ -438,7 +439,7 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ TLS: &pbconfig.TLS{ VerifyOutgoing: true, @@ -461,8 +462,8 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { tlsConfigurator: configurator, } - actual := pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} - err = ac.updateTLSSettingsInConfig(AutoConfigOptions{}, &actual) + actual := &pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} + err = ac.updateTLSSettingsInConfig(AutoConfigOptions{}, actual) require.NoError(t, err) require.Equal(t, tcase.expected, actual) }) @@ -472,7 +473,7 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { func TestAutoConfig_updateGossipEncryptionInConfig(t *testing.T) { type testCase struct { conf memberlist.Config - expected pbautoconf.AutoConfigResponse + expected *pbautoconf.AutoConfigResponse } gossipKey := make([]byte, 32) @@ -492,7 +493,7 @@ func TestAutoConfig_updateGossipEncryptionInConfig(t *testing.T) { GossipVerifyIncoming: true, GossipVerifyOutgoing: true, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ Gossip: &pbconfig.Gossip{ Encryption: &pbconfig.GossipEncryption{ @@ -510,7 +511,7 @@ func TestAutoConfig_updateGossipEncryptionInConfig(t *testing.T) { GossipVerifyIncoming: false, GossipVerifyOutgoing: false, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ Gossip: &pbconfig.Gossip{ Encryption: &pbconfig.GossipEncryption{ @@ -525,7 +526,7 @@ func TestAutoConfig_updateGossipEncryptionInConfig(t *testing.T) { "encryption-disabled": { // zero values all around - if no keyring is configured then the gossip // encryption settings should not be set. - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{}, }, }, @@ -540,8 +541,8 @@ func TestAutoConfig_updateGossipEncryptionInConfig(t *testing.T) { config: cfg, } - actual := pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} - err := ac.updateGossipEncryptionInConfig(AutoConfigOptions{}, &actual) + actual := &pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} + err := ac.updateGossipEncryptionInConfig(AutoConfigOptions{}, actual) require.NoError(t, err) require.Equal(t, tcase.expected, actual) }) @@ -617,7 +618,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { tlsConfig tlsutil.Config opts AutoConfigOptions - expected pbautoconf.AutoConfigResponse + expected *pbautoconf.AutoConfigResponse } cases := map[string]testCase{ @@ -634,7 +635,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { CipherSuites: []types.TLSCipherSuite{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, }, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ CARoots: pbroots, ExtraCACertificates: []string{cacert}, Config: &pbconfig.Config{}, @@ -658,7 +659,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { CSR: csr, SpiffeID: &csrID, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{}, CARoots: pbroots, ExtraCACertificates: []string{cacert}, @@ -669,7 +670,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { serverConfig: Config{ ConnectEnabled: false, }, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{}, }, }, @@ -690,8 +691,8 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { backend: backend, } - actual := pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} - err = ac.updateTLSCertificatesInConfig(tcase.opts, &actual) + actual := &pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} + err = ac.updateTLSCertificatesInConfig(tcase.opts, actual) require.NoError(t, err) require.Equal(t, tcase.expected, actual) }) @@ -701,7 +702,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { func TestAutoConfig_updateACLsInConfig(t *testing.T) { type testCase struct { config Config - expected pbautoconf.AutoConfigResponse + expected *pbautoconf.AutoConfigResponse expectACLToken bool err error } @@ -729,7 +730,7 @@ func TestAutoConfig_updateACLsInConfig(t *testing.T) { ACLEnableKeyListPolicy: true, }, expectACLToken: true, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ ACL: &pbconfig.ACL{ Enabled: true, @@ -761,7 +762,7 @@ func TestAutoConfig_updateACLsInConfig(t *testing.T) { ACLEnableKeyListPolicy: true, }, expectACLToken: false, - expected: pbautoconf.AutoConfigResponse{ + expected: &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ ACL: &pbconfig.ACL{ Enabled: false, @@ -820,8 +821,8 @@ func TestAutoConfig_updateACLsInConfig(t *testing.T) { ac := AutoConfig{config: &tcase.config, backend: backend} - actual := pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} - err := ac.updateACLsInConfig(AutoConfigOptions{NodeName: "something"}, &actual) + actual := &pbautoconf.AutoConfigResponse{Config: &pbconfig.Config{}} + err := ac.updateACLsInConfig(AutoConfigOptions{NodeName: "something"}, actual) if tcase.err != nil { testutil.RequireErrorContains(t, err, tcase.err.Error()) } else { diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 16269242e..bb17dae10 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/types" ) @@ -162,7 +163,7 @@ func TestServiceHealthSnapshot(t *testing.T) { }), }, } - assertDeepEqual(t, expected, buf.events, cmpEvents) + prototest.AssertDeepEqual(t, expected, buf.events, cmpEvents) } func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) { @@ -263,7 +264,7 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) { }), }, } - assertDeepEqual(t, expected, buf.events, cmpEvents) + prototest.AssertDeepEqual(t, expected, buf.events, cmpEvents) } type snapshotAppender struct { @@ -1762,7 +1763,7 @@ func (tc eventsTestCase) run(t *testing.T) { } require.NoError(t, err) - assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents, cmpopts.EquateEmpty()) + prototest.AssertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents, cmpopts.EquateEmpty()) } func runCase(t *testing.T, name string, fn func(t *testing.T)) bool { @@ -1855,13 +1856,6 @@ func evServiceIndex(idx uint64) func(e *stream.Event) error { } } -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } -} - // cmpPartialOrderEvents returns a compare option which sorts events so that // all events for a particular topic are grouped together. The sort is // stable so events with the same key retain their relative order. diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index 2b2349c2d..0a39e7632 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/go-memdb" @@ -214,7 +215,7 @@ func TestStore_CARootSetList(t *testing.T) { assert.Nil(t, err) assert.Len(t, roots, 1) actual := roots[0] - assertDeepEqual(t, expected, *actual) + prototest.AssertDeepEqual(t, expected, *actual) } func TestStore_CARootSet_emptyID(t *testing.T) { diff --git a/agent/grpc/private/handler_test.go b/agent/grpc/private/handler_test.go index bb1a7f414..6edf82195 100644 --- a/agent/grpc/private/handler_test.go +++ b/agent/grpc/private/handler_test.go @@ -49,7 +49,7 @@ func TestHandler_PanicRecoveryInterceptor(t *testing.T) { resp, err := client.Something(ctx, &testservice.Req{}) expectedErr := status.Errorf(codes.Internal, "grpc: panic serving request") - require.Equal(t, expectedErr, err) + require.Equal(t, expectedErr.Error(), err.Error()) require.Nil(t, resp) // Read the log diff --git a/agent/grpc/private/internal/testservice/simple.pb.go b/agent/grpc/private/internal/testservice/simple.pb.go index bfd847a28..4f12adeb9 100644 --- a/agent/grpc/private/internal/testservice/simple.pb.go +++ b/agent/grpc/private/internal/testservice/simple.pb.go @@ -1,138 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: agent/grpc/private/internal/testservice/simple.proto package testservice import ( context "context" - fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Req struct { - Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } -func (m *Req) Reset() { *m = Req{} } -func (m *Req) String() string { return proto.CompactTextString(m) } -func (*Req) ProtoMessage() {} +func (x *Req) Reset() { + *x = Req{} + if protoimpl.UnsafeEnabled { + mi := &file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Req) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Req) ProtoMessage() {} + +func (x *Req) ProtoReflect() protoreflect.Message { + mi := &file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Req.ProtoReflect.Descriptor instead. func (*Req) Descriptor() ([]byte, []int) { - return fileDescriptor_98af0751f806f450, []int{0} + return file_agent_grpc_private_internal_testservice_simple_proto_rawDescGZIP(), []int{0} } -func (m *Req) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Req.Unmarshal(m, b) -} -func (m *Req) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Req.Marshal(b, m, deterministic) -} -func (m *Req) XXX_Merge(src proto.Message) { - xxx_messageInfo_Req.Merge(m, src) -} -func (m *Req) XXX_Size() int { - return xxx_messageInfo_Req.Size(m) -} -func (m *Req) XXX_DiscardUnknown() { - xxx_messageInfo_Req.DiscardUnknown(m) -} - -var xxx_messageInfo_Req proto.InternalMessageInfo - -func (m *Req) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *Req) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } type Resp struct { - ServerName string `protobuf:"bytes,1,opt,name=ServerName,proto3" json:"ServerName,omitempty"` - Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerName string `protobuf:"bytes,1,opt,name=ServerName,proto3" json:"ServerName,omitempty"` + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } -func (m *Resp) Reset() { *m = Resp{} } -func (m *Resp) String() string { return proto.CompactTextString(m) } -func (*Resp) ProtoMessage() {} +func (x *Resp) Reset() { + *x = Resp{} + if protoimpl.UnsafeEnabled { + mi := &file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resp) ProtoMessage() {} + +func (x *Resp) ProtoReflect() protoreflect.Message { + mi := &file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resp.ProtoReflect.Descriptor instead. func (*Resp) Descriptor() ([]byte, []int) { - return fileDescriptor_98af0751f806f450, []int{1} + return file_agent_grpc_private_internal_testservice_simple_proto_rawDescGZIP(), []int{1} } -func (m *Resp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resp.Unmarshal(m, b) -} -func (m *Resp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resp.Marshal(b, m, deterministic) -} -func (m *Resp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resp.Merge(m, src) -} -func (m *Resp) XXX_Size() int { - return xxx_messageInfo_Resp.Size(m) -} -func (m *Resp) XXX_DiscardUnknown() { - xxx_messageInfo_Resp.DiscardUnknown(m) -} - -var xxx_messageInfo_Resp proto.InternalMessageInfo - -func (m *Resp) GetServerName() string { - if m != nil { - return m.ServerName +func (x *Resp) GetServerName() string { + if x != nil { + return x.ServerName } return "" } -func (m *Resp) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *Resp) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func init() { - proto.RegisterType((*Req)(nil), "testservice.Req") - proto.RegisterType((*Resp)(nil), "testservice.Resp") +var File_agent_grpc_private_internal_testservice_simple_proto protoreflect.FileDescriptor + +var file_agent_grpc_private_internal_testservice_simple_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, + 0x73, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x74, 0x65, 0x73, 0x74, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x03, 0x52, 0x65, 0x71, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x46, 0x0a, 0x04, 0x52, 0x65, + 0x73, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x32, 0x6d, 0x0a, 0x06, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x32, 0x0a, 0x09, + 0x53, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x12, 0x2f, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x30, + 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("agent/grpc/private/internal/testservice/simple.proto", fileDescriptor_98af0751f806f450) +var ( + file_agent_grpc_private_internal_testservice_simple_proto_rawDescOnce sync.Once + file_agent_grpc_private_internal_testservice_simple_proto_rawDescData = file_agent_grpc_private_internal_testservice_simple_proto_rawDesc +) + +func file_agent_grpc_private_internal_testservice_simple_proto_rawDescGZIP() []byte { + file_agent_grpc_private_internal_testservice_simple_proto_rawDescOnce.Do(func() { + file_agent_grpc_private_internal_testservice_simple_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_grpc_private_internal_testservice_simple_proto_rawDescData) + }) + return file_agent_grpc_private_internal_testservice_simple_proto_rawDescData } -var fileDescriptor_98af0751f806f450 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xcd, 0x0a, 0x82, 0x40, - 0x14, 0x85, 0xb3, 0x44, 0xf0, 0xb6, 0xa9, 0x59, 0x45, 0x8b, 0x08, 0x21, 0x68, 0xe5, 0x84, 0xf5, - 0x08, 0xe1, 0xb2, 0xc5, 0xf8, 0x04, 0x93, 0x5c, 0x6c, 0xc0, 0xf9, 0x71, 0xe6, 0x62, 0xaf, 0x1f, - 0x0a, 0x91, 0xb8, 0x6a, 0xfb, 0xdd, 0xef, 0x1e, 0xce, 0x81, 0x9b, 0x6c, 0xd0, 0x10, 0x6f, 0xbc, - 0xab, 0xb9, 0xf3, 0xaa, 0x97, 0x84, 0x5c, 0x19, 0x42, 0x6f, 0x64, 0xcb, 0x09, 0x03, 0x05, 0xf4, - 0xbd, 0xaa, 0x91, 0x07, 0xa5, 0x5d, 0x8b, 0xb9, 0xf3, 0x96, 0x2c, 0x5b, 0x4f, 0x2e, 0xd9, 0x09, - 0x56, 0x02, 0x3b, 0x76, 0x00, 0xb8, 0x4b, 0x92, 0x35, 0x0e, 0xdf, 0xbb, 0xe8, 0x18, 0x9d, 0x53, - 0x31, 0x21, 0x59, 0x09, 0xb1, 0xc0, 0xe0, 0x06, 0xaf, 0x42, 0xdf, 0xa3, 0x7f, 0x48, 0x8d, 0x5f, - 0xef, 0x47, 0x66, 0x39, 0xcb, 0x79, 0x4e, 0xa1, 0x21, 0xa9, 0xc6, 0x2e, 0xac, 0x80, 0xb4, 0xb2, - 0x1a, 0xe9, 0xa5, 0x4c, 0xc3, 0x36, 0xf9, 0xa4, 0x53, 0x2e, 0xb0, 0xdb, 0x6f, 0x67, 0x24, 0xb8, - 0x6c, 0xc1, 0x38, 0xc4, 0x65, 0x6b, 0xdf, 0x7f, 0xea, 0x97, 0xe8, 0x99, 0x8c, 0x8b, 0xaf, 0x9f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x5c, 0xf5, 0xcb, 0x29, 0x01, 0x00, 0x00, +var file_agent_grpc_private_internal_testservice_simple_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_agent_grpc_private_internal_testservice_simple_proto_goTypes = []interface{}{ + (*Req)(nil), // 0: testservice.Req + (*Resp)(nil), // 1: testservice.Resp +} +var file_agent_grpc_private_internal_testservice_simple_proto_depIdxs = []int32{ + 0, // 0: testservice.Simple.Something:input_type -> testservice.Req + 0, // 1: testservice.Simple.Flow:input_type -> testservice.Req + 1, // 2: testservice.Simple.Something:output_type -> testservice.Resp + 1, // 3: testservice.Simple.Flow:output_type -> testservice.Resp + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_agent_grpc_private_internal_testservice_simple_proto_init() } +func file_agent_grpc_private_internal_testservice_simple_proto_init() { + if File_agent_grpc_private_internal_testservice_simple_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Req); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_agent_grpc_private_internal_testservice_simple_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_agent_grpc_private_internal_testservice_simple_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agent_grpc_private_internal_testservice_simple_proto_goTypes, + DependencyIndexes: file_agent_grpc_private_internal_testservice_simple_proto_depIdxs, + MessageInfos: file_agent_grpc_private_internal_testservice_simple_proto_msgTypes, + }.Build() + File_agent_grpc_private_internal_testservice_simple_proto = out.File + file_agent_grpc_private_internal_testservice_simple_proto_rawDesc = nil + file_agent_grpc_private_internal_testservice_simple_proto_goTypes = nil + file_agent_grpc_private_internal_testservice_simple_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. @@ -210,10 +310,10 @@ type SimpleServer interface { type UnimplementedSimpleServer struct { } -func (*UnimplementedSimpleServer) Something(ctx context.Context, req *Req) (*Resp, error) { +func (*UnimplementedSimpleServer) Something(context.Context, *Req) (*Resp, error) { return nil, status.Errorf(codes.Unimplemented, "method Something not implemented") } -func (*UnimplementedSimpleServer) Flow(req *Req, srv Simple_FlowServer) error { +func (*UnimplementedSimpleServer) Flow(*Req, Simple_FlowServer) error { return status.Errorf(codes.Unimplemented, "method Flow not implemented") } diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index aea7669c9..95df5fb13 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" @@ -28,6 +27,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/types" ) @@ -206,7 +206,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true}, }, } - assertDeepEqual(t, expected, snapshotEvents) + prototest.AssertDeepEqual(t, expected, snapshotEvents) }) runStep(t, "update the registration by adding a check", func(t *testing.T) { @@ -271,7 +271,7 @@ func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { }, }, } - assertDeepEqual(t, expectedEvent, event) + prototest.AssertDeepEqual(t, expectedEvent, event) }) } @@ -311,13 +311,6 @@ func getEvent(t *testing.T, ch chan eventOrError) *pbsubscribe.Event { return nil } -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } -} - type testBackend struct { store *state.Store authorizer func(token string, entMeta *structs.EnterpriseMeta) acl.Authorizer @@ -571,7 +564,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true}, }, } - assertDeepEqual(t, expected, snapshotEvents) + prototest.AssertDeepEqual(t, expected, snapshotEvents) }) runStep(t, "update the registration by adding a check", func(t *testing.T) { @@ -636,7 +629,7 @@ func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { }, }, } - assertDeepEqual(t, expectedEvent, event) + prototest.AssertDeepEqual(t, expectedEvent, event) }) } @@ -949,20 +942,20 @@ func TestNewEventFromSteamEvent(t *testing.T) { type testCase struct { name string event stream.Event - expected pbsubscribe.Event + expected *pbsubscribe.Event } fn := func(t *testing.T, tc testCase) { expected := tc.expected actual := newEventFromStreamEvent(tc.event) - assertDeepEqual(t, &expected, actual, cmpopts.EquateEmpty()) + prototest.AssertDeepEqual(t, expected, actual, cmpopts.EquateEmpty()) } var testCases = []testCase{ { name: "end of snapshot", event: newEventFromSubscription(t, 0), - expected: pbsubscribe.Event{ + expected: &pbsubscribe.Event{ Index: 1, Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true}, }, @@ -970,7 +963,7 @@ func TestNewEventFromSteamEvent(t *testing.T) { { name: "new snapshot to follow", event: newEventFromSubscription(t, 22), - expected: pbsubscribe.Event{ + expected: &pbsubscribe.Event{ Payload: &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true}, }, }, @@ -1000,7 +993,7 @@ func TestNewEventFromSteamEvent(t *testing.T) { }, }), }, - expected: pbsubscribe.Event{ + expected: &pbsubscribe.Event{ Index: 2002, Payload: &pbsubscribe.Event_EventBatch{ EventBatch: &pbsubscribe.EventBatch{ @@ -1066,7 +1059,7 @@ func TestNewEventFromSteamEvent(t *testing.T) { }, }, }, - expected: pbsubscribe.Event{ + expected: &pbsubscribe.Event{ Index: 2002, Payload: &pbsubscribe.Event_ServiceHealth{ ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ diff --git a/agent/grpc/private/stats_test.go b/agent/grpc/private/stats_test.go index 78e63647e..0d7268e4e 100644 --- a/agent/grpc/private/stats_test.go +++ b/agent/grpc/private/stats_test.go @@ -15,6 +15,7 @@ import ( "google.golang.org/grpc" "github.com/hashicorp/consul/agent/grpc/private/internal/testservice" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/go-hclog" ) @@ -86,21 +87,14 @@ func TestHandler_EmitsStats(t *testing.T) { {key: []string{"testing", "grpc", "server", "connections"}, val: 0}, {key: []string{"testing", "grpc", "server", "streams"}, val: 0}, } - assertDeepEqual(t, expectedGauge, sink.gaugeCalls, cmpMetricCalls) + prototest.AssertDeepEqual(t, expectedGauge, sink.gaugeCalls, cmpMetricCalls) expectedCounter := []metricCall{ {key: []string{"testing", "grpc", "server", "connection", "count"}, val: 1}, {key: []string{"testing", "grpc", "server", "request", "count"}, val: 1}, {key: []string{"testing", "grpc", "server", "stream", "count"}, val: 1}, } - assertDeepEqual(t, expectedCounter, sink.incrCounterCalls, cmpMetricCalls) -} - -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } + prototest.AssertDeepEqual(t, expectedCounter, sink.incrCounterCalls, cmpMetricCalls) } func patchGlobalMetrics(t *testing.T) (*fakeMetricsSink, func()) { diff --git a/agent/rpcclient/health/view.go b/agent/rpcclient/health/view.go index 7ec0eca26..6343032d9 100644 --- a/agent/rpcclient/health/view.go +++ b/agent/rpcclient/health/view.go @@ -21,9 +21,9 @@ type MaterializerDeps struct { Logger hclog.Logger } -func newMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index uint64) pbsubscribe.SubscribeRequest { - return func(index uint64) pbsubscribe.SubscribeRequest { - req := pbsubscribe.SubscribeRequest{ +func newMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index uint64) *pbsubscribe.SubscribeRequest { + return func(index uint64) *pbsubscribe.SubscribeRequest { + req := &pbsubscribe.SubscribeRequest{ Topic: pbsubscribe.Topic_ServiceHealth, Key: srvReq.ServiceName, Token: srvReq.Token, diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index 96bae37a1..c2a7ea79b 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/types" ) @@ -291,7 +292,7 @@ func TestHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T) { require.Equal(t, uint64(5), result.Index) expected := newExpectedNodes("node1", "node2", "node3") expected.Index = 5 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -319,7 +320,7 @@ func TestHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T) { require.Equal(t, uint64(20), result.Index) expected := newExpectedNodes("node2", "node3") expected.Index = 20 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -349,7 +350,7 @@ func TestHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T) { require.Equal(t, uint64(50), result.Index) expected := newExpectedNodes("node3", "node4", "node5") expected.Index = 50 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -376,7 +377,7 @@ func TestHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T) { require.Equal(t, uint64(50), result.Index) expected := newExpectedNodes("node3", "node4", "node5") expected.Index = 50 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) }) } @@ -399,13 +400,6 @@ var cmpCheckServiceNodeNames = cmp.Options{ }), } -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } -} - func TestHealthView_IntegrationWithStore_EventBatches(t *testing.T) { namespace := getNamespace("ns3") client := newStreamClient(validateNamespace(namespace)) @@ -444,7 +438,7 @@ func TestHealthView_IntegrationWithStore_EventBatches(t *testing.T) { expected := newExpectedNodes("node1", "node2", "node3") expected.Index = 5 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -465,7 +459,7 @@ func TestHealthView_IntegrationWithStore_EventBatches(t *testing.T) { require.Equal(t, uint64(20), result.Index) expected := newExpectedNodes("node2", "node3", "node4") expected.Index = 20 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -512,7 +506,7 @@ func TestHealthView_IntegrationWithStore_Filtering(t *testing.T) { require.Equal(t, uint64(5), result.Index) expected := newExpectedNodes("node2") expected.Index = 5 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) req.QueryOptions.MinQueryIndex = result.Index }) @@ -532,7 +526,7 @@ func TestHealthView_IntegrationWithStore_Filtering(t *testing.T) { require.Equal(t, uint64(20), result.Index) expected := newExpectedNodes("node2") expected.Index = 20 - assertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) + prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) }) } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 80f27dd95..1a678f5c0 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -2566,19 +2566,12 @@ func Encode(t MessageType, msg interface{}) ([]byte, error) { return buf.Bytes(), err } -type ProtoMarshaller interface { - Size() int - MarshalTo([]byte) (int, error) - Unmarshal([]byte) error - ProtoMessage() -} - func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) { if marshaller, ok := message.(proto.Message); ok { return EncodeProto(t, marshaller) } - return nil, fmt.Errorf("message does not implement the ProtoMarshaller interface: %T", message) + return nil, fmt.Errorf("message does not implement proto.Message: %T", message) } func EncodeProto(t MessageType, pb proto.Message) ([]byte, error) { diff --git a/agent/submatview/materializer.go b/agent/submatview/materializer.go index b830689e6..3b870d9e1 100644 --- a/agent/submatview/materializer.go +++ b/agent/submatview/materializer.go @@ -63,7 +63,7 @@ type Deps struct { Client StreamClient Logger hclog.Logger Waiter *retry.Waiter - Request func(index uint64) pbsubscribe.SubscribeRequest + Request func(index uint64) *pbsubscribe.SubscribeRequest } // StreamClient provides a subscription to state change events. @@ -136,13 +136,13 @@ func isNonTemporaryOrConsecutiveFailure(err error, failures int) bool { // runSubscription opens a new subscribe streaming call to the servers and runs // for it's lifetime or until the view is closed. -func (m *Materializer) runSubscription(ctx context.Context, req pbsubscribe.SubscribeRequest) error { +func (m *Materializer) runSubscription(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { ctx, cancel := context.WithCancel(ctx) defer cancel() m.handler = initialHandler(req.Index) - s, err := m.deps.Client.Subscribe(ctx, &req) + s, err := m.deps.Client.Subscribe(ctx, req) if err != nil { return err } diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index 93b04d1e8..b177380d3 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -225,8 +225,8 @@ func (r *fakeRequest) NewMaterializer() (*Materializer, error) { View: &fakeView{srvs: make(map[string]*pbservice.CheckServiceNode)}, Client: r.client, Logger: hclog.New(nil), - Request: func(index uint64) pbsubscribe.SubscribeRequest { - req := pbsubscribe.SubscribeRequest{ + Request: func(index uint64) *pbsubscribe.SubscribeRequest { + req := &pbsubscribe.SubscribeRequest{ Topic: pbsubscribe.Topic_ServiceHealth, Key: "key", Token: "abcd", diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 716c320d3..fac6d0cfd 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -10,6 +10,7 @@ import ( envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + envoy_upstreams_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" envoy_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" @@ -18,6 +19,7 @@ import ( "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/proxycfg" @@ -486,7 +488,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam protocol = cfg.Protocol } if protocol == "http2" || protocol == "grpc" { - c.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{} + s.setHttp2ProtocolOptions(c) } return c, err @@ -537,7 +539,7 @@ func (s *ResourceGenerator) makeUpstreamClusterForPreparedQuery(upstream structs OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), } if cfg.Protocol == "http2" || cfg.Protocol == "grpc" { - c.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{} + s.setHttp2ProtocolOptions(c) } } @@ -742,7 +744,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( } if proto == "http2" || proto == "grpc" { - c.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{} + s.setHttp2ProtocolOptions(c) } commonTLSContext := makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()) @@ -1037,3 +1039,21 @@ func injectLBToCluster(ec *structs.LoadBalancer, c *envoy_cluster_v3.Cluster) er } return nil } + +func (s *ResourceGenerator) setHttp2ProtocolOptions(c *envoy_cluster_v3.Cluster) { + typedExtensionProtocolOptions := &envoy_upstreams_v3.HttpProtocolOptions{ + UpstreamProtocolOptions: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_{ + ExplicitHttpConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig{ + ProtocolConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{ + Http2ProtocolOptions: &envoy_core_v3.Http2ProtocolOptions{}, + }, + }, + }, + } + typedExtensionProtocolOptionsEncoded, err := anypb.New(typedExtensionProtocolOptions) + if err != nil { + s.Logger.Warn("failed to convert http protocol options to anypb") + } + c.TypedExtensionProtocolOptions = make(map[string]*anypb.Any) + c.TypedExtensionProtocolOptions["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] = typedExtensionProtocolOptionsEncoded +} diff --git a/agent/xds/testdata/clusters/connect-proxy-with-chain-and-overrides.envoy-1-20-x.golden b/agent/xds/testdata/clusters/connect-proxy-with-chain-and-overrides.envoy-1-20-x.golden index b0e4298f5..fd63324de 100644 --- a/agent/xds/testdata/clusters/connect-proxy-with-chain-and-overrides.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/connect-proxy-with-chain-and-overrides.envoy-1-20-x.golden @@ -18,8 +18,15 @@ "circuitBreakers": { }, - "http2ProtocolOptions": { + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicitHttpConfig": { + "http2ProtocolOptions": { + } + } + } }, "outlierDetection": { diff --git a/agent/xds/testdata/clusters/expose-paths-grpc-new-cluster-http1.envoy-1-20-x.golden b/agent/xds/testdata/clusters/expose-paths-grpc-new-cluster-http1.envoy-1-20-x.golden index 11d8684b4..b86715755 100644 --- a/agent/xds/testdata/clusters/expose-paths-grpc-new-cluster-http1.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/expose-paths-grpc-new-cluster-http1.envoy-1-20-x.golden @@ -50,8 +50,15 @@ } ] }, - "http2ProtocolOptions": { + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicitHttpConfig": { + "http2ProtocolOptions": { + } + } + } } } ], diff --git a/agent/xds/testdata/clusters/expose-paths-new-cluster-http2.envoy-1-20-x.golden b/agent/xds/testdata/clusters/expose-paths-new-cluster-http2.envoy-1-20-x.golden index 624b8d8e6..b42be025b 100644 --- a/agent/xds/testdata/clusters/expose-paths-new-cluster-http2.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/expose-paths-new-cluster-http2.envoy-1-20-x.golden @@ -25,8 +25,15 @@ } ] }, - "http2ProtocolOptions": { + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicitHttpConfig": { + "http2ProtocolOptions": { + } + } + } } }, { diff --git a/agent/xds/testdata/clusters/ingress-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/clusters/ingress-gateway-no-services.envoy-1-20-x.golden index 24861388e..cd8f56517 100644 --- a/agent/xds/testdata/clusters/ingress-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/ingress-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/clusters/mesh-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/clusters/mesh-gateway-no-services.envoy-1-20-x.golden index 24861388e..cd8f56517 100644 --- a/agent/xds/testdata/clusters/mesh-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/mesh-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/clusters/terminating-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/clusters/terminating-gateway-no-services.envoy-1-20-x.golden index 24861388e..cd8f56517 100644 --- a/agent/xds/testdata/clusters/terminating-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/terminating-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/ingress-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/endpoints/ingress-gateway-no-services.envoy-1-20-x.golden index 4e316f149..8504dae2b 100644 --- a/agent/xds/testdata/endpoints/ingress-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/endpoints/ingress-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/mesh-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/endpoints/mesh-gateway-no-services.envoy-1-20-x.golden index 4e316f149..8504dae2b 100644 --- a/agent/xds/testdata/endpoints/mesh-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/endpoints/mesh-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/terminating-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/endpoints/terminating-gateway-no-services.envoy-1-20-x.golden index 4e316f149..8504dae2b 100644 --- a/agent/xds/testdata/endpoints/terminating-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/endpoints/terminating-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/listeners/ingress-gateway-no-services.envoy-1-20-x.golden b/agent/xds/testdata/listeners/ingress-gateway-no-services.envoy-1-20-x.golden index c587fc277..53b67bb37 100644 --- a/agent/xds/testdata/listeners/ingress-gateway-no-services.envoy-1-20-x.golden +++ b/agent/xds/testdata/listeners/ingress-gateway-no-services.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/routes/defaults.envoy-1-20-x.golden b/agent/xds/testdata/routes/defaults.envoy-1-20-x.golden index 6bbc48e6d..9c050cbe6 100644 --- a/agent/xds/testdata/routes/defaults.envoy-1-20-x.golden +++ b/agent/xds/testdata/routes/defaults.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/routes/ingress-defaults-no-chain.envoy-1-20-x.golden b/agent/xds/testdata/routes/ingress-defaults-no-chain.envoy-1-20-x.golden index 6bbc48e6d..9c050cbe6 100644 --- a/agent/xds/testdata/routes/ingress-defaults-no-chain.envoy-1-20-x.golden +++ b/agent/xds/testdata/routes/ingress-defaults-no-chain.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/routes/ingress-with-chain-external-sni.envoy-1-20-x.golden b/agent/xds/testdata/routes/ingress-with-chain-external-sni.envoy-1-20-x.golden index 6bbc48e6d..9c050cbe6 100644 --- a/agent/xds/testdata/routes/ingress-with-chain-external-sni.envoy-1-20-x.golden +++ b/agent/xds/testdata/routes/ingress-with-chain-external-sni.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/testdata/routes/ingress-with-chain.envoy-1-20-x.golden b/agent/xds/testdata/routes/ingress-with-chain.envoy-1-20-x.golden index 6bbc48e6d..9c050cbe6 100644 --- a/agent/xds/testdata/routes/ingress-with-chain.envoy-1-20-x.golden +++ b/agent/xds/testdata/routes/ingress-with-chain.envoy-1-20-x.golden @@ -1,7 +1,5 @@ { "versionInfo": "00000001", - "resources": [ - ], "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "nonce": "00000001" } \ No newline at end of file diff --git a/agent/xds/xds_protocol_helpers_test.go b/agent/xds/xds_protocol_helpers_test.go index 228cf543b..6f2863f31 100644 --- a/agent/xds/xds_protocol_helpers_test.go +++ b/agent/xds/xds_protocol_helpers_test.go @@ -18,6 +18,7 @@ import ( envoy_network_rbac_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3" envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + envoy_upstreams_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" @@ -27,6 +28,7 @@ import ( "github.com/golang/protobuf/ptypes/wrappers" "github.com/mitchellh/copystructure" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" @@ -421,7 +423,7 @@ func makeTestCluster(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName st TransportSocket: xdsNewUpstreamTransportSocket(t, snap, dbSNI, dbURI), } case "http2:db": - return &envoy_cluster_v3.Cluster{ + c := &envoy_cluster_v3.Cluster{ Name: dbSNI, ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{ Type: envoy_cluster_v3.Cluster_EDS, @@ -435,10 +437,23 @@ func makeTestCluster(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName st CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{ HealthyPanicThreshold: &envoy_type_v3.Percent{Value: 0}, }, - ConnectTimeout: ptypes.DurationProto(5 * time.Second), - TransportSocket: xdsNewUpstreamTransportSocket(t, snap, dbSNI, dbURI), - Http2ProtocolOptions: &envoy_core_v3.Http2ProtocolOptions{}, + ConnectTimeout: ptypes.DurationProto(5 * time.Second), + TransportSocket: xdsNewUpstreamTransportSocket(t, snap, dbSNI, dbURI), } + typedExtensionProtocolOptions := &envoy_upstreams_v3.HttpProtocolOptions{ + UpstreamProtocolOptions: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_{ + ExplicitHttpConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig{ + ProtocolConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{ + Http2ProtocolOptions: &envoy_core_v3.Http2ProtocolOptions{}, + }, + }, + }, + } + typedExtensionProtocolOptionsEncoded, err := anypb.New(typedExtensionProtocolOptions) + require.NoError(t, err) + c.TypedExtensionProtocolOptions = make(map[string]*anypb.Any) + c.TypedExtensionProtocolOptions["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] = typedExtensionProtocolOptionsEncoded + return c case "http:db": return &envoy_cluster_v3.Cluster{ Name: dbSNI, diff --git a/agent/xds/z_xds_packages.go b/agent/xds/z_xds_packages.go index 2c781214a..4528e3ae2 100644 --- a/agent/xds/z_xds_packages.go +++ b/agent/xds/z_xds_packages.go @@ -5,6 +5,7 @@ package xds import ( _ "github.com/envoyproxy/go-control-plane/envoy/admin/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/admin/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" @@ -16,15 +17,21 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" _ "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/cluster/aggregate/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/cluster/dynamic_forward_proxy/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/cluster/redis" _ "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/common/dynamic_forward_proxy/v2alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/common/tap/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/core/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/accesslog/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/dubbo/router/v2alpha1" @@ -83,21 +90,24 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/zookeeper_proxy/v1alpha1" _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/thrift/rate_limit/v2alpha1" _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/thrift/router/v2alpha1" - _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/udp/dns_filter/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/filter/udp/udp_proxy/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/grpc_credential/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/grpc_credential/v3" _ "github.com/envoyproxy/go-control-plane/envoy/config/health_checker/redis/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/listener/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/overload/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/overload/v3" _ "github.com/envoyproxy/go-control-plane/envoy/config/ratelimit/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/ratelimit/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/ratelimit/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/resource_monitor/fixed_heap/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/resource_monitor/injected_resource/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/retry/omit_canary_hosts/v2" @@ -105,14 +115,16 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/config/retry/previous_hosts/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/retry/previous_priorities" _ "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/route/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/tap/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/trace/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/trace/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/config/trace/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/transport_socket/alts/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/config/transport_socket/raw_buffer/v2" _ "github.com/envoyproxy/go-control-plane/envoy/config/transport_socket/tap/v2alpha" - _ "github.com/envoyproxy/go-control-plane/envoy/config/wasm/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v2" _ "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" _ "github.com/envoyproxy/go-control-plane/envoy/data/cluster/v2alpha" @@ -121,48 +133,88 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/data/core/v3" _ "github.com/envoyproxy/go-control-plane/envoy/data/dns/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/data/dns/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/data/dns/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/data/tap/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/data/tap/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/open_telemetry/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/open_telemetry/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/stream/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/stream/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/wasm/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/cache/simple_http_cache/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/dynamic_forward_proxy/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/redis/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/dynamic_forward_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/matching/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/matching/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/tap/v3" - _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filter/udp/dns_filter/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/common/tap/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/compression/brotli/compressor/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/compression/brotli/decompressor/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/compression/gzip/compressor/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/compression/gzip/decompressor/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/dependency/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/matcher/action/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/adaptive_concurrency/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/admission_control/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/aws_lambda/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/aws_request_signing/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/buffer/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cache/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cache/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cdn_loop/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/composite/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/compressor/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/compressor/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/csrf/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/csrf/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/decompressor/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/dynamic_forward_proxy/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/dynamo/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ext_authz/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ext_authz/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ext_proc/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_http1_bridge/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_json_transcoder/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_stats/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/gzip/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/gzip/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/header_to_metadata/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/header_to_metadata/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/health_check/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/health_check/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ip_tagging/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/kill_request/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/local_ratelimit/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/oauth2/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/on_demand/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/original_src/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ratelimit/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ratelimit/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/squash/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/tap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/tap/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/wasm/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_dst/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_src/v3" @@ -172,45 +224,110 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/direct_response/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/dubbo_proxy/router/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/dubbo_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/dubbo_proxy/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/echo/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ext_authz/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ext_authz/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/kafka_broker/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/local_ratelimit/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/mongo_proxy/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/mysql_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/postgres_proxy/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ratelimit/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ratelimit/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/redis_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rocketmq_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rocketmq_proxy/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_cluster/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/thrift_proxy/router/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/thrift_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/thrift_proxy/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/wasm/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/zookeeper_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/udp/dns_filter/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/udp/dns_filter/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/udp/udp_proxy/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/health_checkers/redis/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/http/header_formatters/preserve_case/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/http/original_ip_detection/custom_header/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/http/original_ip_detection/xff/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/internal_redirect/allow_listed_routes/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/internal_redirect/previous_routes/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/internal_redirect/safe_cross_scheme/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/matching/common_inputs/environment_variable/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/matching/input_matchers/consistent_hashing/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/network/socket_interface/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/rate_limit_descriptors/expr/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/request_id/uuid/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/resource_monitors/fixed_heap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/resource_monitors/injected_resource/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/host/omit_canary_hosts/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/host/omit_host_metadata/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/host/previous_hosts/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/priority/previous_priorities/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/stat_sinks/wasm/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/datadog/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/dynamic_ot/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/lightstep/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/opencensus/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/skywalking/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/xray/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/zipkin/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/alts/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/proxy_protocol/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/quic/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/quic/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/raw_buffer/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/s2a/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/starttls/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/starttls/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tap/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/generic/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/http/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/tcp/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/tcp/generic/v3" _ "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/extensions/watchdog/profile_action/v3alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/auth/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/endpoint/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/event_reporting/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/event_reporting/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/event_reporting/v4alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/service/ext_proc/v3alpha" + _ "github.com/envoyproxy/go-control-plane/envoy/service/extension/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/health/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/health/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/listener/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/route/v3" @@ -218,16 +335,22 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3" _ "github.com/envoyproxy/go-control-plane/envoy/service/status/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/status/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/tap/v2alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/tap/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/tap/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/service/trace/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/trace/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/service/trace/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/type" + _ "github.com/envoyproxy/go-control-plane/envoy/type/http/v3" _ "github.com/envoyproxy/go-control-plane/envoy/type/matcher" _ "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v4alpha" _ "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v2" _ "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" _ "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v2" _ "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" _ "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/watchdog/v3alpha" ) diff --git a/build-support/scripts/envoy-library-references.sh b/build-support/scripts/envoy-library-references.sh index b91285fa5..819f01e01 100644 --- a/build-support/scripts/envoy-library-references.sh +++ b/build-support/scripts/envoy-library-references.sh @@ -66,5 +66,5 @@ goimports -w "${OUTFILE}" mv -f "${OUTFILE}" ../../agent/xds ) -echo "updating vendored code..." -make update-vendor +echo "tidying dependencies..." +make go-mod-tidy diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh index 022b2ff44..eff35b09a 100755 --- a/build-support/scripts/proto-gen.sh +++ b/build-support/scripts/proto-gen.sh @@ -147,13 +147,9 @@ function main { return 1 fi - BUILD_TAGS=$(sed -e '/^[[:space:]]*$/,$d' < "${proto_path}" | grep '// +build') + BUILD_TAGS=$(head -n 2 "${proto_path}" | grep '^//go:build\|// +build') if test -n "${BUILD_TAGS}" then - echo -e "${BUILD_TAGS}\n" >> "${proto_go_path}.new" - cat "${proto_go_path}" >> "${proto_go_path}.new" - mv "${proto_go_path}.new" "${proto_go_path}" - echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" diff --git a/connect/tls_test.go b/connect/tls_test.go index 46d20a466..9659cf5be 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" ) @@ -265,7 +266,7 @@ func TestServerSideVerifier(t *testing.T) { // allows expecting a leaf cert different from the one in expect func requireEqualTLSConfig(t *testing.T, expect, got *tls.Config) { require.Equal(t, expect.RootCAs, got.RootCAs) - assertDeepEqual(t, expect.ClientCAs, got.ClientCAs, cmpCertPool) + prototest.AssertDeepEqual(t, expect.ClientCAs, got.ClientCAs, cmpCertPool) require.Equal(t, expect.InsecureSkipVerify, got.InsecureSkipVerify) require.Equal(t, expect.MinVersion, got.MinVersion) require.Equal(t, expect.CipherSuites, got.CipherSuites) @@ -298,13 +299,6 @@ var cmpCertPool = cmp.Comparer(func(x, y *x509.CertPool) bool { return cmp.Equal(x.Subjects(), y.Subjects()) }) -func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { - t.Helper() - if diff := cmp.Diff(x, y, opts...); diff != "" { - t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) - } -} - // requireCorrectVerifier invokes got.VerifyPeerCertificate and expects the // tls.Config arg to be returned on the provided channel. This ensures the // correct verifier func was attached to got. diff --git a/go.mod b/go.mod index 9f43af2bb..456623d40 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ replace github.com/hashicorp/consul/sdk => ./sdk replace launchpad.net/gocheck => github.com/go-check/check v0.0.0-20140225173054-eb6ee6f84d0a require ( + cloud.google.com/go v0.59.0 // indirect github.com/Microsoft/go-winio v0.4.3 // indirect github.com/NYTimes/gziphandler v1.0.1 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e @@ -20,11 +21,11 @@ require ( github.com/digitalocean/godo v1.10.0 // indirect github.com/docker/go-connections v0.3.0 github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0 - github.com/envoyproxy/go-control-plane v0.9.5 + github.com/envoyproxy/go-control-plane v0.9.9 github.com/frankban/quicktest v1.11.0 // indirect github.com/fsnotify/fsnotify v1.5.1 github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.3.5 + github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.6 github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 @@ -83,18 +84,16 @@ require ( github.com/shirou/gopsutil/v3 v3.21.10 github.com/stretchr/testify v1.7.0 go.etcd.io/bbolt v1.3.5 - go.opencensus.io v0.22.0 // indirect go.uber.org/goleak v1.1.10 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/net v0.0.0-20211216030914-fe4d6282115f - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20211013075003-97ac67df715c golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e - google.golang.org/api v0.9.0 // indirect - google.golang.org/appengine v1.6.0 // indirect - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 - google.golang.org/grpc v1.27.1 + google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb + google.golang.org/grpc v1.36.0 + google.golang.org/protobuf v1.25.0 gopkg.in/square/go-jose.v2 v2.5.1 gotest.tools/v3 v3.0.3 k8s.io/api v0.18.2 diff --git a/go.sum b/go.sum index 972a1634c..bf61a6bf0 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,35 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.59.0 h1:BM3svUDU3itpc2m5cu5wCyThIYNDlFlts9GASw31GW8= +cloud.google.com/go v0.59.0/go.mod h1:qJxNOVCRTxHfwLhvDxxSI9vQc1zI59b9pEglp1Iv60E= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -34,6 +62,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -53,6 +82,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -91,8 +121,10 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coredns/coredns v1.1.2 h1:bAFHrSsBeTeRG5W3Nf2su3lUGw7Npw2UKeCJm/3A638= github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= @@ -131,8 +163,10 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= -github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA= +github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -148,6 +182,9 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= @@ -170,18 +207,35 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -191,6 +245,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -203,11 +259,19 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 h1:ub2sxhs2A0HRa2dWHavvmWxiVGXNfE9wI+gcTMwED8A= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 h1:AtvtonGEH/fZK0XPNNBdB6swgy7Iudfx88wzyIpwqJ8= github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -223,6 +287,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmo github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 h1:Com/5n/omNSBusX11zdyIYtidiqewLIanchbm//McZA= github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -319,6 +384,7 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 h1:brI5vBRUlAlM34VFmnLPwjnCL/FxAJp9XvOdX6Zt+XE= github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= @@ -342,6 +408,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -480,6 +547,8 @@ github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -522,6 +591,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -540,14 +610,19 @@ github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -559,6 +634,8 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -567,13 +644,34 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -592,12 +690,25 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= @@ -605,14 +716,17 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklz golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -629,6 +743,7 @@ golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -636,19 +751,32 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -661,6 +789,7 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -669,6 +798,7 @@ golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -679,14 +809,43 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -695,35 +854,89 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= -google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb h1:PUcq6RTy8Gp9xukBme8m2+2Z8pQCmJ7TbPpQd6xNDvk= +google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -736,6 +949,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= @@ -748,6 +962,9 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= @@ -762,6 +979,9 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/proto/pbacl/acl.pb.go b/proto/pbacl/acl.pb.go index baaa994e9..b3c4e68a7 100644 --- a/proto/pbacl/acl.pb.go +++ b/proto/pbacl/acl.pb.go @@ -1,89 +1,158 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbacl/acl.proto package pbacl import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type ACLLink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // @gotags: hash:ignore-" - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` } -func (m *ACLLink) Reset() { *m = ACLLink{} } -func (m *ACLLink) String() string { return proto.CompactTextString(m) } -func (*ACLLink) ProtoMessage() {} +func (x *ACLLink) Reset() { + *x = ACLLink{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbacl_acl_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ACLLink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ACLLink) ProtoMessage() {} + +func (x *ACLLink) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbacl_acl_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ACLLink.ProtoReflect.Descriptor instead. func (*ACLLink) Descriptor() ([]byte, []int) { - return fileDescriptor_ad2d2c73a6a0d8b5, []int{0} + return file_proto_pbacl_acl_proto_rawDescGZIP(), []int{0} } -func (m *ACLLink) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ACLLink.Unmarshal(m, b) -} -func (m *ACLLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ACLLink.Marshal(b, m, deterministic) -} -func (m *ACLLink) XXX_Merge(src proto.Message) { - xxx_messageInfo_ACLLink.Merge(m, src) -} -func (m *ACLLink) XXX_Size() int { - return xxx_messageInfo_ACLLink.Size(m) -} -func (m *ACLLink) XXX_DiscardUnknown() { - xxx_messageInfo_ACLLink.DiscardUnknown(m) -} - -var xxx_messageInfo_ACLLink proto.InternalMessageInfo - -func (m *ACLLink) GetID() string { - if m != nil { - return m.ID +func (x *ACLLink) GetID() string { + if x != nil { + return x.ID } return "" } -func (m *ACLLink) GetName() string { - if m != nil { - return m.Name +func (x *ACLLink) GetName() string { + if x != nil { + return x.Name } return "" } -func init() { - proto.RegisterType((*ACLLink)(nil), "acl.ACLLink") +var File_proto_pbacl_acl_proto protoreflect.FileDescriptor + +var file_proto_pbacl_acl_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x61, 0x63, 0x6c, 0x2f, 0x61, 0x63, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x61, 0x63, 0x6c, 0x22, 0x2d, 0x0a, 0x07, + 0x41, 0x43, 0x4c, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x61, 0x63, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbacl/acl.proto", fileDescriptor_ad2d2c73a6a0d8b5) +var ( + file_proto_pbacl_acl_proto_rawDescOnce sync.Once + file_proto_pbacl_acl_proto_rawDescData = file_proto_pbacl_acl_proto_rawDesc +) + +func file_proto_pbacl_acl_proto_rawDescGZIP() []byte { + file_proto_pbacl_acl_proto_rawDescOnce.Do(func() { + file_proto_pbacl_acl_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbacl_acl_proto_rawDescData) + }) + return file_proto_pbacl_acl_proto_rawDescData } -var fileDescriptor_ad2d2c73a6a0d8b5 = []byte{ - // 128 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x28, 0xca, 0x2f, - 0xc9, 0xd7, 0x2f, 0x48, 0x4a, 0x4c, 0xce, 0xd1, 0x4f, 0x4c, 0xce, 0xd1, 0x03, 0xf3, 0x85, 0x98, - 0x13, 0x93, 0x73, 0x94, 0x74, 0xb9, 0xd8, 0x1d, 0x9d, 0x7d, 0x7c, 0x32, 0xf3, 0xb2, 0x85, 0xf8, - 0xb8, 0x98, 0x3c, 0x5d, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, - 0xb8, 0x58, 0xfc, 0x12, 0x73, 0x53, 0x25, 0x98, 0xc0, 0x22, 0x60, 0xb6, 0x93, 0x66, 0x94, 0x7a, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x46, 0x62, 0x71, 0x46, 0x66, - 0x72, 0x7e, 0x51, 0x81, 0x7e, 0x72, 0x7e, 0x5e, 0x71, 0x69, 0x8e, 0x3e, 0x92, 0x45, 0x49, 0x6c, - 0x60, 0x8e, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x25, 0x54, 0x7f, 0x7e, 0x00, 0x00, 0x00, +var file_proto_pbacl_acl_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_pbacl_acl_proto_goTypes = []interface{}{ + (*ACLLink)(nil), // 0: acl.ACLLink +} +var file_proto_pbacl_acl_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_proto_pbacl_acl_proto_init() } +func file_proto_pbacl_acl_proto_init() { + if File_proto_pbacl_acl_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbacl_acl_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ACLLink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbacl_acl_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbacl_acl_proto_goTypes, + DependencyIndexes: file_proto_pbacl_acl_proto_depIdxs, + MessageInfos: file_proto_pbacl_acl_proto_msgTypes, + }.Build() + File_proto_pbacl_acl_proto = out.File + file_proto_pbacl_acl_proto_rawDesc = nil + file_proto_pbacl_acl_proto_goTypes = nil + file_proto_pbacl_acl_proto_depIdxs = nil } diff --git a/proto/pbautoconf/auto_config.pb.go b/proto/pbautoconf/auto_config.pb.go index 0f64d9688..4e4a60e1c 100644 --- a/proto/pbautoconf/auto_config.pb.go +++ b/proto/pbautoconf/auto_config.pb.go @@ -1,30 +1,39 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbautoconf/auto_config.proto package pbautoconf import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" pbconfig "github.com/hashicorp/consul/proto/pbconfig" pbconnect "github.com/hashicorp/consul/proto/pbconnect" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // AutoConfigRequest is the data structure to be sent along with the // AutoConfig.InitialConfiguration RPC type AutoConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Datacenter is the local datacenter name. This wont actually be set by clients // but rather will be set by the servers to allow for forwarding to // the leader. If it ever happens to be set and differs from the local datacenters @@ -44,88 +53,96 @@ type AutoConfigRequest struct { ConsulToken string `protobuf:"bytes,6,opt,name=ConsulToken,proto3" json:"ConsulToken,omitempty"` // CSR is a certificate signing request to be used when generating the // agents TLS certificate - CSR string `protobuf:"bytes,7,opt,name=CSR,proto3" json:"CSR,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CSR string `protobuf:"bytes,7,opt,name=CSR,proto3" json:"CSR,omitempty"` } -func (m *AutoConfigRequest) Reset() { *m = AutoConfigRequest{} } -func (m *AutoConfigRequest) String() string { return proto.CompactTextString(m) } -func (*AutoConfigRequest) ProtoMessage() {} +func (x *AutoConfigRequest) Reset() { + *x = AutoConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbautoconf_auto_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AutoConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AutoConfigRequest) ProtoMessage() {} + +func (x *AutoConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbautoconf_auto_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AutoConfigRequest.ProtoReflect.Descriptor instead. func (*AutoConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ccc5af992e5daf69, []int{0} + return file_proto_pbautoconf_auto_config_proto_rawDescGZIP(), []int{0} } -func (m *AutoConfigRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AutoConfigRequest.Unmarshal(m, b) -} -func (m *AutoConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AutoConfigRequest.Marshal(b, m, deterministic) -} -func (m *AutoConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AutoConfigRequest.Merge(m, src) -} -func (m *AutoConfigRequest) XXX_Size() int { - return xxx_messageInfo_AutoConfigRequest.Size(m) -} -func (m *AutoConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AutoConfigRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AutoConfigRequest proto.InternalMessageInfo - -func (m *AutoConfigRequest) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *AutoConfigRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func (m *AutoConfigRequest) GetNode() string { - if m != nil { - return m.Node +func (x *AutoConfigRequest) GetNode() string { + if x != nil { + return x.Node } return "" } -func (m *AutoConfigRequest) GetSegment() string { - if m != nil { - return m.Segment +func (x *AutoConfigRequest) GetSegment() string { + if x != nil { + return x.Segment } return "" } -func (m *AutoConfigRequest) GetPartition() string { - if m != nil { - return m.Partition +func (x *AutoConfigRequest) GetPartition() string { + if x != nil { + return x.Partition } return "" } -func (m *AutoConfigRequest) GetJWT() string { - if m != nil { - return m.JWT +func (x *AutoConfigRequest) GetJWT() string { + if x != nil { + return x.JWT } return "" } -func (m *AutoConfigRequest) GetConsulToken() string { - if m != nil { - return m.ConsulToken +func (x *AutoConfigRequest) GetConsulToken() string { + if x != nil { + return x.ConsulToken } return "" } -func (m *AutoConfigRequest) GetCSR() string { - if m != nil { - return m.CSR +func (x *AutoConfigRequest) GetCSR() string { + if x != nil { + return x.CSR } return "" } // AutoConfigResponse is the data structure sent in response to a AutoConfig.InitialConfiguration request type AutoConfigResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Config is the partial Consul configuration to inject into the agents own configuration Config *pbconfig.Config `protobuf:"bytes,1,opt,name=Config,proto3" json:"Config,omitempty"` // CARoots is the current list of Connect CA Roots @@ -134,96 +151,188 @@ type AutoConfigResponse struct { Certificate *pbconnect.IssuedCert `protobuf:"bytes,3,opt,name=Certificate,proto3" json:"Certificate,omitempty"` // ExtraCACertificates holds non-Connect certificates that may be necessary // to verify TLS connections with the Consul servers - ExtraCACertificates []string `protobuf:"bytes,4,rep,name=ExtraCACertificates,proto3" json:"ExtraCACertificates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ExtraCACertificates []string `protobuf:"bytes,4,rep,name=ExtraCACertificates,proto3" json:"ExtraCACertificates,omitempty"` } -func (m *AutoConfigResponse) Reset() { *m = AutoConfigResponse{} } -func (m *AutoConfigResponse) String() string { return proto.CompactTextString(m) } -func (*AutoConfigResponse) ProtoMessage() {} +func (x *AutoConfigResponse) Reset() { + *x = AutoConfigResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbautoconf_auto_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AutoConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AutoConfigResponse) ProtoMessage() {} + +func (x *AutoConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbautoconf_auto_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AutoConfigResponse.ProtoReflect.Descriptor instead. func (*AutoConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ccc5af992e5daf69, []int{1} + return file_proto_pbautoconf_auto_config_proto_rawDescGZIP(), []int{1} } -func (m *AutoConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AutoConfigResponse.Unmarshal(m, b) -} -func (m *AutoConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AutoConfigResponse.Marshal(b, m, deterministic) -} -func (m *AutoConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AutoConfigResponse.Merge(m, src) -} -func (m *AutoConfigResponse) XXX_Size() int { - return xxx_messageInfo_AutoConfigResponse.Size(m) -} -func (m *AutoConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AutoConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AutoConfigResponse proto.InternalMessageInfo - -func (m *AutoConfigResponse) GetConfig() *pbconfig.Config { - if m != nil { - return m.Config +func (x *AutoConfigResponse) GetConfig() *pbconfig.Config { + if x != nil { + return x.Config } return nil } -func (m *AutoConfigResponse) GetCARoots() *pbconnect.CARoots { - if m != nil { - return m.CARoots +func (x *AutoConfigResponse) GetCARoots() *pbconnect.CARoots { + if x != nil { + return x.CARoots } return nil } -func (m *AutoConfigResponse) GetCertificate() *pbconnect.IssuedCert { - if m != nil { - return m.Certificate +func (x *AutoConfigResponse) GetCertificate() *pbconnect.IssuedCert { + if x != nil { + return x.Certificate } return nil } -func (m *AutoConfigResponse) GetExtraCACertificates() []string { - if m != nil { - return m.ExtraCACertificates +func (x *AutoConfigResponse) GetExtraCACertificates() []string { + if x != nil { + return x.ExtraCACertificates } return nil } -func init() { - proto.RegisterType((*AutoConfigRequest)(nil), "autoconf.AutoConfigRequest") - proto.RegisterType((*AutoConfigResponse)(nil), "autoconf.AutoConfigResponse") +var File_proto_pbautoconf_auto_config_proto protoreflect.FileDescriptor + +var file_proto_pbautoconf_auto_config_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6f, + 0x6e, 0x66, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6f, 0x6e, 0x66, 0x1a, 0x1b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x2f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x11, 0x41, + 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, + 0x4a, 0x57, 0x54, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4a, 0x57, 0x54, 0x12, 0x20, + 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x43, 0x53, 0x52, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x43, + 0x53, 0x52, 0x22, 0xd1, 0x01, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2a, 0x0a, 0x07, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x2e, 0x43, 0x41, 0x52, + 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x07, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x35, 0x0a, + 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x73, 0x73, + 0x75, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x52, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x43, 0x41, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x43, 0x41, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x61, 0x75, + 0x74, 0x6f, 0x63, 0x6f, 0x6e, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbautoconf/auto_config.proto", fileDescriptor_ccc5af992e5daf69) +var ( + file_proto_pbautoconf_auto_config_proto_rawDescOnce sync.Once + file_proto_pbautoconf_auto_config_proto_rawDescData = file_proto_pbautoconf_auto_config_proto_rawDesc +) + +func file_proto_pbautoconf_auto_config_proto_rawDescGZIP() []byte { + file_proto_pbautoconf_auto_config_proto_rawDescOnce.Do(func() { + file_proto_pbautoconf_auto_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbautoconf_auto_config_proto_rawDescData) + }) + return file_proto_pbautoconf_auto_config_proto_rawDescData } -var fileDescriptor_ccc5af992e5daf69 = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcd, 0x4a, 0xc3, 0x40, - 0x10, 0xc7, 0x89, 0xad, 0xfd, 0x98, 0x80, 0xd4, 0xed, 0x65, 0xa9, 0x1f, 0x94, 0x1e, 0xa4, 0x88, - 0x24, 0x52, 0xf1, 0x01, 0x6a, 0xf4, 0xa0, 0x07, 0x91, 0x6d, 0x41, 0xf0, 0x22, 0x69, 0xba, 0x6d, - 0x83, 0xed, 0x4e, 0xcc, 0xce, 0x82, 0x2f, 0xe8, 0x03, 0xf8, 0x46, 0x92, 0x4d, 0xa2, 0x8b, 0x78, - 0xca, 0x3f, 0xbf, 0xdf, 0x7f, 0x0e, 0x33, 0x09, 0x8c, 0xb2, 0x1c, 0x09, 0xc3, 0x6c, 0x11, 0x1b, - 0xc2, 0x04, 0xd5, 0x2a, 0x2c, 0xc2, 0x6b, 0x91, 0xd2, 0x75, 0x60, 0x25, 0xeb, 0xd4, 0x6e, 0x70, - 0x54, 0xb7, 0x4b, 0x1f, 0xba, 0xb5, 0xc1, 0x89, 0x23, 0x95, 0x4c, 0x28, 0xac, 0x9e, 0xa5, 0x1e, - 0x7d, 0x7a, 0x70, 0x38, 0x35, 0x84, 0x91, 0x9d, 0x11, 0xf2, 0xdd, 0x48, 0x4d, 0xec, 0x14, 0xe0, - 0x36, 0xa6, 0x38, 0x91, 0x8a, 0x64, 0xce, 0xbd, 0xa1, 0x37, 0xee, 0x0a, 0x87, 0x30, 0x06, 0xcd, - 0x47, 0x5c, 0x4a, 0xbe, 0x67, 0x8d, 0xcd, 0x8c, 0x43, 0x7b, 0x26, 0xd7, 0x3b, 0xa9, 0x88, 0x37, - 0x2d, 0xae, 0x5f, 0xd9, 0x31, 0x74, 0x9f, 0xe2, 0x9c, 0x52, 0x4a, 0x51, 0xf1, 0x8e, 0x75, 0xbf, - 0x80, 0xf5, 0xa0, 0xf1, 0xf0, 0x3c, 0xe7, 0xfb, 0x96, 0x17, 0x91, 0x0d, 0xc1, 0x8f, 0x50, 0x69, - 0xb3, 0x9d, 0xe3, 0x9b, 0x54, 0xbc, 0x65, 0x8d, 0x8b, 0x8a, 0x99, 0x68, 0x26, 0x78, 0xbb, 0x9c, - 0x89, 0x66, 0x62, 0xf4, 0xe5, 0x01, 0x73, 0xf7, 0xd0, 0x19, 0x2a, 0x2d, 0xd9, 0x19, 0xb4, 0x4a, - 0x62, 0x97, 0xf0, 0x27, 0x07, 0x41, 0x75, 0x9c, 0xaa, 0x57, 0x59, 0x76, 0x0e, 0xed, 0x68, 0x2a, - 0x10, 0x49, 0xdb, 0x9d, 0xfc, 0x49, 0x2f, 0xa8, 0xef, 0x54, 0x71, 0x51, 0x17, 0xd8, 0x35, 0xf8, - 0x91, 0xcc, 0x29, 0x5d, 0xa5, 0x49, 0x4c, 0x92, 0x37, 0x6c, 0xbf, 0xff, 0xd3, 0xbf, 0xd7, 0xda, - 0xc8, 0x65, 0xd1, 0x10, 0x6e, 0x8f, 0x5d, 0x42, 0xff, 0xee, 0x83, 0xf2, 0x38, 0x9a, 0x3a, 0x54, - 0xf3, 0xe6, 0xb0, 0x31, 0xee, 0x8a, 0xff, 0xd4, 0x4d, 0xf0, 0x72, 0xb1, 0x4e, 0x69, 0x63, 0x16, - 0x41, 0x82, 0xbb, 0x70, 0x13, 0xeb, 0x4d, 0x9a, 0x60, 0x9e, 0x15, 0x5f, 0x50, 0x9b, 0x6d, 0xf8, - 0xf7, 0x1f, 0x59, 0xb4, 0x2c, 0xb9, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x29, 0xae, 0x66, 0x30, - 0x3e, 0x02, 0x00, 0x00, +var file_proto_pbautoconf_auto_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_pbautoconf_auto_config_proto_goTypes = []interface{}{ + (*AutoConfigRequest)(nil), // 0: autoconf.AutoConfigRequest + (*AutoConfigResponse)(nil), // 1: autoconf.AutoConfigResponse + (*pbconfig.Config)(nil), // 2: config.Config + (*pbconnect.CARoots)(nil), // 3: connect.CARoots + (*pbconnect.IssuedCert)(nil), // 4: connect.IssuedCert +} +var file_proto_pbautoconf_auto_config_proto_depIdxs = []int32{ + 2, // 0: autoconf.AutoConfigResponse.Config:type_name -> config.Config + 3, // 1: autoconf.AutoConfigResponse.CARoots:type_name -> connect.CARoots + 4, // 2: autoconf.AutoConfigResponse.Certificate:type_name -> connect.IssuedCert + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_proto_pbautoconf_auto_config_proto_init() } +func file_proto_pbautoconf_auto_config_proto_init() { + if File_proto_pbautoconf_auto_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbautoconf_auto_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AutoConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbautoconf_auto_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AutoConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbautoconf_auto_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbautoconf_auto_config_proto_goTypes, + DependencyIndexes: file_proto_pbautoconf_auto_config_proto_depIdxs, + MessageInfos: file_proto_pbautoconf_auto_config_proto_msgTypes, + }.Build() + File_proto_pbautoconf_auto_config_proto = out.File + file_proto_pbautoconf_auto_config_proto_rawDesc = nil + file_proto_pbautoconf_auto_config_proto_goTypes = nil + file_proto_pbautoconf_auto_config_proto_depIdxs = nil } diff --git a/proto/pbcommon/common.go b/proto/pbcommon/common.go index f8211604d..79b1592e5 100644 --- a/proto/pbcommon/common.go +++ b/proto/pbcommon/common.go @@ -74,7 +74,7 @@ func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { q.StaleIfError = structs.DurationToProto(staleIfError) } -func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { +func (q *QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { maxTime := structs.DurationFromProto(q.MaxQueryTime) o := structs.QueryOptions{ MaxQueryTime: maxTime, @@ -91,12 +91,12 @@ func (q *QueryOptions) SetFilter(filter string) { // WriteRequest only applies to writes, always false // // IsRead implements structs.RPCInfo -func (w WriteRequest) IsRead() bool { +func (w *WriteRequest) IsRead() bool { return false } // SetTokenSecret implements structs.RPCInfo -func (w WriteRequest) TokenSecret() string { +func (w *WriteRequest) TokenSecret() string { return w.Token } @@ -108,12 +108,12 @@ func (w *WriteRequest) SetTokenSecret(s string) { // AllowStaleRead returns whether a stale read should be allowed // // AllowStaleRead implements structs.RPCInfo -func (w WriteRequest) AllowStaleRead() bool { +func (w *WriteRequest) AllowStaleRead() bool { return false } // HasTimedOut implements structs.RPCInfo -func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { +func (w *WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { return time.Since(start) > rpcHoldTimeout, nil } @@ -144,7 +144,7 @@ func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, } // RequestDatacenter implements structs.RPCInfo -func (td TargetDatacenter) RequestDatacenter() string { +func (td *TargetDatacenter) RequestDatacenter() string { return td.Datacenter } diff --git a/proto/pbcommon/common.pb.go b/proto/pbcommon/common.pb.go index a04042cac..e67bf6139 100644 --- a/proto/pbcommon/common.pb.go +++ b/proto/pbcommon/common.pb.go @@ -1,25 +1,30 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbcommon/common.proto package pbcommon import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // RaftIndex is used to track the index used while creating // or modifying a given struct type. @@ -31,50 +36,58 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // name=Structs // ignore-fields=state,sizeCache,unknownFields type RaftIndex struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // @gotags: bexpr:"-" CreateIndex uint64 `protobuf:"varint,1,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` // @gotags: bexpr:"-" - ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ModifyIndex uint64 `protobuf:"varint,2,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` } -func (m *RaftIndex) Reset() { *m = RaftIndex{} } -func (m *RaftIndex) String() string { return proto.CompactTextString(m) } -func (*RaftIndex) ProtoMessage() {} +func (x *RaftIndex) Reset() { + *x = RaftIndex{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RaftIndex) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RaftIndex) ProtoMessage() {} + +func (x *RaftIndex) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RaftIndex.ProtoReflect.Descriptor instead. func (*RaftIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{0} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{0} } -func (m *RaftIndex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RaftIndex.Unmarshal(m, b) -} -func (m *RaftIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RaftIndex.Marshal(b, m, deterministic) -} -func (m *RaftIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_RaftIndex.Merge(m, src) -} -func (m *RaftIndex) XXX_Size() int { - return xxx_messageInfo_RaftIndex.Size(m) -} -func (m *RaftIndex) XXX_DiscardUnknown() { - xxx_messageInfo_RaftIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_RaftIndex proto.InternalMessageInfo - -func (m *RaftIndex) GetCreateIndex() uint64 { - if m != nil { - return m.CreateIndex +func (x *RaftIndex) GetCreateIndex() uint64 { + if x != nil { + return x.CreateIndex } return 0 } -func (m *RaftIndex) GetModifyIndex() uint64 { - if m != nil { - return m.ModifyIndex +func (x *RaftIndex) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex } return 0 } @@ -82,40 +95,48 @@ func (m *RaftIndex) GetModifyIndex() uint64 { // TargetDatacenter is intended to be used within other messages used for RPC routing // amongst the various Consul datacenters type TargetDatacenter struct { - Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } -func (m *TargetDatacenter) Reset() { *m = TargetDatacenter{} } -func (m *TargetDatacenter) String() string { return proto.CompactTextString(m) } -func (*TargetDatacenter) ProtoMessage() {} +func (x *TargetDatacenter) Reset() { + *x = TargetDatacenter{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TargetDatacenter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TargetDatacenter) ProtoMessage() {} + +func (x *TargetDatacenter) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TargetDatacenter.ProtoReflect.Descriptor instead. func (*TargetDatacenter) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{1} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{1} } -func (m *TargetDatacenter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TargetDatacenter.Unmarshal(m, b) -} -func (m *TargetDatacenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TargetDatacenter.Marshal(b, m, deterministic) -} -func (m *TargetDatacenter) XXX_Merge(src proto.Message) { - xxx_messageInfo_TargetDatacenter.Merge(m, src) -} -func (m *TargetDatacenter) XXX_Size() int { - return xxx_messageInfo_TargetDatacenter.Size(m) -} -func (m *TargetDatacenter) XXX_DiscardUnknown() { - xxx_messageInfo_TargetDatacenter.DiscardUnknown(m) -} - -var xxx_messageInfo_TargetDatacenter proto.InternalMessageInfo - -func (m *TargetDatacenter) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *TargetDatacenter) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } @@ -127,42 +148,50 @@ func (m *TargetDatacenter) GetDatacenter() string { // name=Structs // ignore-fields=state,sizeCache,unknownFields type WriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` } -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (m *WriteRequest) String() string { return proto.CompactTextString(m) } -func (*WriteRequest) ProtoMessage() {} +func (x *WriteRequest) Reset() { + *x = WriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteRequest) ProtoMessage() {} + +func (x *WriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteRequest.ProtoReflect.Descriptor instead. func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{2} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{2} } -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WriteRequest.Unmarshal(m, b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return xxx_messageInfo_WriteRequest.Size(m) -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -func (m *WriteRequest) GetToken() string { - if m != nil { - return m.Token +func (x *WriteRequest) GetToken() string { + if x != nil { + return x.Token } return "" } @@ -174,51 +203,59 @@ func (m *WriteRequest) GetToken() string { // It is also similar to WriteRequest. It is a separate type so that in the // future we can introduce fields that may only be relevant for reads. type ReadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` // RequireConsistent indicates that the request must be sent to the leader. - RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RequireConsistent bool `protobuf:"varint,2,opt,name=RequireConsistent,proto3" json:"RequireConsistent,omitempty"` } -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (m *ReadRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRequest) ProtoMessage() {} +func (x *ReadRequest) Reset() { + *x = ReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadRequest) ProtoMessage() {} + +func (x *ReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead. func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{3} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{3} } -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadRequest.Unmarshal(m, b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return xxx_messageInfo_ReadRequest.Size(m) -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -func (m *ReadRequest) GetToken() string { - if m != nil { - return m.Token +func (x *ReadRequest) GetToken() string { + if x != nil { + return x.Token } return "" } -func (m *ReadRequest) GetRequireConsistent() bool { - if m != nil { - return m.RequireConsistent +func (x *ReadRequest) GetRequireConsistent() bool { + if x != nil { + return x.RequireConsistent } return false } @@ -232,6 +269,10 @@ func (m *ReadRequest) GetRequireConsistent() bool { // name=Structs // ignore-fields=StaleIfError,AllowNotModifiedResponse,state,sizeCache,unknownFields type QueryOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Token is the ACL token ID. If not provided, the 'anonymous' // token is assumed for backwards compatibility. Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` @@ -240,7 +281,7 @@ type QueryOptions struct { MinQueryIndex uint64 `protobuf:"varint,2,opt,name=MinQueryIndex,proto3" json:"MinQueryIndex,omitempty"` // Provided with MinQueryIndex to wait for change. // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - MaxQueryTime *duration.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime,omitempty"` + MaxQueryTime *durationpb.Duration `protobuf:"bytes,3,opt,name=MaxQueryTime,proto3" json:"MaxQueryTime,omitempty"` // If set, any follower can service the request. Results // may be arbitrarily stale. AllowStale bool `protobuf:"varint,4,opt,name=AllowStale,proto3" json:"AllowStale,omitempty"` @@ -259,7 +300,7 @@ type QueryOptions struct { // read, and then will perform a consistent read if stale // read is older than value. // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - MaxStaleDuration *duration.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration,omitempty"` + MaxStaleDuration *durationpb.Duration `protobuf:"bytes,7,opt,name=MaxStaleDuration,proto3" json:"MaxStaleDuration,omitempty"` // MaxAge limits how old a cached value will be returned if UseCache is true. // If there is a cached response that is older than the MaxAge, it is treated // as a cache miss and a new fetch invoked. If the fetch fails, the error is @@ -268,7 +309,7 @@ type QueryOptions struct { // if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - MaxAge *duration.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge,omitempty"` + MaxAge *durationpb.Duration `protobuf:"bytes,8,opt,name=MaxAge,proto3" json:"MaxAge,omitempty"` // MustRevalidate forces the agent to fetch a fresh version of a cached // resource or at least validate that the cached version is still fresh. It is // implied by either max-age=0 or must-revalidate Cache-Control headers. It @@ -280,113 +321,117 @@ type QueryOptions struct { // UseCache is true and MaxAge is set to a lower, non-zero value. It is // ignored if the endpoint supports background refresh caching. See // https://www.consul.io/api/index.html#agent-caching for more details. - StaleIfError *duration.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError,omitempty"` + StaleIfError *durationpb.Duration `protobuf:"bytes,10,opt,name=StaleIfError,proto3" json:"StaleIfError,omitempty"` // Filter specifies the go-bexpr filter expression to be used for // filtering the data prior to returning a response - Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filter string `protobuf:"bytes,11,opt,name=Filter,proto3" json:"Filter,omitempty"` } -func (m *QueryOptions) Reset() { *m = QueryOptions{} } -func (m *QueryOptions) String() string { return proto.CompactTextString(m) } -func (*QueryOptions) ProtoMessage() {} +func (x *QueryOptions) Reset() { + *x = QueryOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryOptions) ProtoMessage() {} + +func (x *QueryOptions) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryOptions.ProtoReflect.Descriptor instead. func (*QueryOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{4} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{4} } -func (m *QueryOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryOptions.Unmarshal(m, b) -} -func (m *QueryOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryOptions.Marshal(b, m, deterministic) -} -func (m *QueryOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOptions.Merge(m, src) -} -func (m *QueryOptions) XXX_Size() int { - return xxx_messageInfo_QueryOptions.Size(m) -} -func (m *QueryOptions) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOptions proto.InternalMessageInfo - -func (m *QueryOptions) GetToken() string { - if m != nil { - return m.Token +func (x *QueryOptions) GetToken() string { + if x != nil { + return x.Token } return "" } -func (m *QueryOptions) GetMinQueryIndex() uint64 { - if m != nil { - return m.MinQueryIndex +func (x *QueryOptions) GetMinQueryIndex() uint64 { + if x != nil { + return x.MinQueryIndex } return 0 } -func (m *QueryOptions) GetMaxQueryTime() *duration.Duration { - if m != nil { - return m.MaxQueryTime +func (x *QueryOptions) GetMaxQueryTime() *durationpb.Duration { + if x != nil { + return x.MaxQueryTime } return nil } -func (m *QueryOptions) GetAllowStale() bool { - if m != nil { - return m.AllowStale +func (x *QueryOptions) GetAllowStale() bool { + if x != nil { + return x.AllowStale } return false } -func (m *QueryOptions) GetRequireConsistent() bool { - if m != nil { - return m.RequireConsistent +func (x *QueryOptions) GetRequireConsistent() bool { + if x != nil { + return x.RequireConsistent } return false } -func (m *QueryOptions) GetUseCache() bool { - if m != nil { - return m.UseCache +func (x *QueryOptions) GetUseCache() bool { + if x != nil { + return x.UseCache } return false } -func (m *QueryOptions) GetMaxStaleDuration() *duration.Duration { - if m != nil { - return m.MaxStaleDuration +func (x *QueryOptions) GetMaxStaleDuration() *durationpb.Duration { + if x != nil { + return x.MaxStaleDuration } return nil } -func (m *QueryOptions) GetMaxAge() *duration.Duration { - if m != nil { - return m.MaxAge +func (x *QueryOptions) GetMaxAge() *durationpb.Duration { + if x != nil { + return x.MaxAge } return nil } -func (m *QueryOptions) GetMustRevalidate() bool { - if m != nil { - return m.MustRevalidate +func (x *QueryOptions) GetMustRevalidate() bool { + if x != nil { + return x.MustRevalidate } return false } -func (m *QueryOptions) GetStaleIfError() *duration.Duration { - if m != nil { - return m.StaleIfError +func (x *QueryOptions) GetStaleIfError() *durationpb.Duration { + if x != nil { + return x.StaleIfError } return nil } -func (m *QueryOptions) GetFilter() string { - if m != nil { - return m.Filter +func (x *QueryOptions) GetFilter() string { + if x != nil { + return x.Filter } return "" } @@ -401,13 +446,17 @@ func (m *QueryOptions) GetFilter() string { // name=Structs // ignore-fields=NotModified,Backend,state,sizeCache,unknownFields type QueryMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This is the index associated with the read Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` // If AllowStale is used, this is time elapsed since // last contact between the follower and leader. This // can be used to gauge staleness. // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - LastContact *duration.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact,omitempty"` + LastContact *durationpb.Duration `protobuf:"bytes,2,opt,name=LastContact,proto3" json:"LastContact,omitempty"` // Used to indicate if there is a known leader node KnownLeader bool `protobuf:"varint,3,opt,name=KnownLeader,proto3" json:"KnownLeader,omitempty"` // Consistencylevel returns the consistency used to serve the query @@ -417,68 +466,72 @@ type QueryMeta struct { // ResultsFilteredByACLs is true when some of the query's results were // filtered out by enforcing ACLs. It may be false because nothing was // removed, or because the endpoint does not yet support this flag. - ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` } -func (m *QueryMeta) Reset() { *m = QueryMeta{} } -func (m *QueryMeta) String() string { return proto.CompactTextString(m) } -func (*QueryMeta) ProtoMessage() {} +func (x *QueryMeta) Reset() { + *x = QueryMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryMeta) ProtoMessage() {} + +func (x *QueryMeta) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryMeta.ProtoReflect.Descriptor instead. func (*QueryMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{5} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{5} } -func (m *QueryMeta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryMeta.Unmarshal(m, b) -} -func (m *QueryMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryMeta.Marshal(b, m, deterministic) -} -func (m *QueryMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMeta.Merge(m, src) -} -func (m *QueryMeta) XXX_Size() int { - return xxx_messageInfo_QueryMeta.Size(m) -} -func (m *QueryMeta) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryMeta proto.InternalMessageInfo - -func (m *QueryMeta) GetIndex() uint64 { - if m != nil { - return m.Index +func (x *QueryMeta) GetIndex() uint64 { + if x != nil { + return x.Index } return 0 } -func (m *QueryMeta) GetLastContact() *duration.Duration { - if m != nil { - return m.LastContact +func (x *QueryMeta) GetLastContact() *durationpb.Duration { + if x != nil { + return x.LastContact } return nil } -func (m *QueryMeta) GetKnownLeader() bool { - if m != nil { - return m.KnownLeader +func (x *QueryMeta) GetKnownLeader() bool { + if x != nil { + return x.KnownLeader } return false } -func (m *QueryMeta) GetConsistencyLevel() string { - if m != nil { - return m.ConsistencyLevel +func (x *QueryMeta) GetConsistencyLevel() string { + if x != nil { + return x.ConsistencyLevel } return "" } -func (m *QueryMeta) GetResultsFilteredByACLs() bool { - if m != nil { - return m.ResultsFilteredByACLs +func (x *QueryMeta) GetResultsFilteredByACLs() bool { + if x != nil { + return x.ResultsFilteredByACLs } return false } @@ -486,103 +539,285 @@ func (m *QueryMeta) GetResultsFilteredByACLs() bool { // EnterpriseMeta contains metadata that is only used by the Enterprise version // of Consul. type EnterpriseMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Namespace in which the entity exists. Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` // Partition in which the entity exists. - Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` } -func (m *EnterpriseMeta) Reset() { *m = EnterpriseMeta{} } -func (m *EnterpriseMeta) String() string { return proto.CompactTextString(m) } -func (*EnterpriseMeta) ProtoMessage() {} +func (x *EnterpriseMeta) Reset() { + *x = EnterpriseMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbcommon_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnterpriseMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnterpriseMeta) ProtoMessage() {} + +func (x *EnterpriseMeta) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbcommon_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnterpriseMeta.ProtoReflect.Descriptor instead. func (*EnterpriseMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a6f5ac44994d718c, []int{6} + return file_proto_pbcommon_common_proto_rawDescGZIP(), []int{6} } -func (m *EnterpriseMeta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnterpriseMeta.Unmarshal(m, b) -} -func (m *EnterpriseMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnterpriseMeta.Marshal(b, m, deterministic) -} -func (m *EnterpriseMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnterpriseMeta.Merge(m, src) -} -func (m *EnterpriseMeta) XXX_Size() int { - return xxx_messageInfo_EnterpriseMeta.Size(m) -} -func (m *EnterpriseMeta) XXX_DiscardUnknown() { - xxx_messageInfo_EnterpriseMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_EnterpriseMeta proto.InternalMessageInfo - -func (m *EnterpriseMeta) GetNamespace() string { - if m != nil { - return m.Namespace +func (x *EnterpriseMeta) GetNamespace() string { + if x != nil { + return x.Namespace } return "" } -func (m *EnterpriseMeta) GetPartition() string { - if m != nil { - return m.Partition +func (x *EnterpriseMeta) GetPartition() string { + if x != nil { + return x.Partition } return "" } -func init() { - proto.RegisterType((*RaftIndex)(nil), "common.RaftIndex") - proto.RegisterType((*TargetDatacenter)(nil), "common.TargetDatacenter") - proto.RegisterType((*WriteRequest)(nil), "common.WriteRequest") - proto.RegisterType((*ReadRequest)(nil), "common.ReadRequest") - proto.RegisterType((*QueryOptions)(nil), "common.QueryOptions") - proto.RegisterType((*QueryMeta)(nil), "common.QueryMeta") - proto.RegisterType((*EnterpriseMeta)(nil), "common.EnterpriseMeta") +var File_proto_pbcommon_common_proto protoreflect.FileDescriptor + +var file_proto_pbcommon_common_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x32, 0x0a, 0x10, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x24, 0x0a, 0x0c, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x51, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x22, 0xec, 0x03, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x4d, 0x69, + 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0d, 0x4d, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x3d, 0x0a, 0x0c, 0x4d, 0x61, 0x78, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x4d, 0x61, 0x78, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x1e, 0x0a, 0x0a, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x12, + 0x2c, 0x0a, 0x11, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x55, 0x73, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x55, 0x73, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x45, 0x0a, 0x10, 0x4d, 0x61, 0x78, + 0x53, 0x74, 0x61, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x4d, 0x61, 0x78, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x31, 0x0a, 0x06, 0x4d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x4d, 0x61, 0x78, + 0x41, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x4d, 0x75, 0x73, 0x74, 0x52, 0x65, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x4d, 0x75, 0x73, + 0x74, 0x52, 0x65, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x6c, 0x65, 0x49, 0x66, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x53, 0x74, + 0x61, 0x6c, 0x65, 0x49, 0x66, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x3b, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x65, 0x64, 0x42, 0x79, 0x41, 0x43, 0x4c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x15, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, + 0x64, 0x42, 0x79, 0x41, 0x43, 0x4c, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, + 0x06, 0x10, 0x07, 0x22, 0x4c, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbcommon/common.proto", fileDescriptor_a6f5ac44994d718c) +var ( + file_proto_pbcommon_common_proto_rawDescOnce sync.Once + file_proto_pbcommon_common_proto_rawDescData = file_proto_pbcommon_common_proto_rawDesc +) + +func file_proto_pbcommon_common_proto_rawDescGZIP() []byte { + file_proto_pbcommon_common_proto_rawDescOnce.Do(func() { + file_proto_pbcommon_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbcommon_common_proto_rawDescData) + }) + return file_proto_pbcommon_common_proto_rawDescData } -var fileDescriptor_a6f5ac44994d718c = []byte{ - // 558 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x51, 0x6f, 0xd3, 0x30, - 0x10, 0x56, 0xb7, 0x2e, 0x4b, 0xae, 0x65, 0x2a, 0x16, 0xa0, 0x30, 0xd0, 0x54, 0x45, 0x13, 0x9a, - 0xa6, 0xa9, 0x11, 0x83, 0x37, 0xc4, 0x43, 0xd7, 0x15, 0x69, 0xa3, 0x61, 0xcc, 0x14, 0x21, 0xf1, - 0xe6, 0x26, 0xd7, 0xd6, 0x22, 0x8d, 0x83, 0xed, 0x6c, 0xed, 0x7f, 0x46, 0xfc, 0x06, 0x14, 0xa7, - 0xed, 0x52, 0xba, 0xad, 0x4f, 0xd1, 0xf7, 0xdd, 0xe7, 0xf3, 0xdd, 0x7d, 0xe7, 0xc0, 0xab, 0x54, - 0x0a, 0x2d, 0xfc, 0x74, 0x10, 0x8a, 0xc9, 0x44, 0x24, 0x7e, 0xf1, 0x69, 0x19, 0x96, 0x58, 0x05, - 0xda, 0x3f, 0x18, 0x09, 0x31, 0x8a, 0xd1, 0x37, 0xec, 0x20, 0x1b, 0xfa, 0x51, 0x26, 0x99, 0xe6, - 0x0b, 0x9d, 0x77, 0x05, 0x0e, 0x65, 0x43, 0x7d, 0x91, 0x44, 0x38, 0x25, 0x4d, 0xa8, 0x75, 0x24, - 0x32, 0x8d, 0x06, 0xba, 0x95, 0x66, 0xe5, 0xa8, 0x4a, 0xcb, 0x54, 0xae, 0x08, 0x44, 0xc4, 0x87, - 0xb3, 0x42, 0xb1, 0x55, 0x28, 0x4a, 0x94, 0x77, 0x0a, 0x8d, 0x3e, 0x93, 0x23, 0xd4, 0xe7, 0x4c, - 0xb3, 0x10, 0x13, 0x8d, 0x92, 0x1c, 0x00, 0xdc, 0x21, 0x93, 0xd6, 0xa1, 0x25, 0xc6, 0x3b, 0x84, - 0xfa, 0x0f, 0xc9, 0x35, 0x52, 0xfc, 0x9d, 0xa1, 0xd2, 0xe4, 0x19, 0xec, 0xf4, 0xc5, 0x2f, 0x4c, - 0xe6, 0xd2, 0x02, 0x78, 0xd7, 0x50, 0xa3, 0xc8, 0xa2, 0x47, 0x45, 0xe4, 0x04, 0x9e, 0xe6, 0x02, - 0x2e, 0xb1, 0x23, 0x12, 0xc5, 0x95, 0xc6, 0x44, 0x9b, 0x32, 0x6d, 0xba, 0x1e, 0xf0, 0xfe, 0x6c, - 0x43, 0xfd, 0x3a, 0x43, 0x39, 0xbb, 0x4a, 0xf3, 0x99, 0xa8, 0x07, 0x92, 0x1e, 0xc2, 0x93, 0x80, - 0x27, 0x46, 0x58, 0xee, 0x7b, 0x95, 0x24, 0x1f, 0xa1, 0x1e, 0xb0, 0xa9, 0x21, 0xfa, 0x7c, 0x82, - 0xee, 0x76, 0xb3, 0x72, 0x54, 0x3b, 0x7d, 0xd9, 0x2a, 0x1c, 0x68, 0x2d, 0x1c, 0x68, 0x9d, 0xcf, - 0x1d, 0xa0, 0x2b, 0xf2, 0x7c, 0x48, 0xed, 0x38, 0x16, 0xb7, 0xdf, 0x34, 0x8b, 0xd1, 0xad, 0x9a, - 0x92, 0x4b, 0xcc, 0xfd, 0x9d, 0xed, 0x3c, 0xd0, 0x19, 0xd9, 0x07, 0xfb, 0xbb, 0xc2, 0x0e, 0x0b, - 0xc7, 0xe8, 0x5a, 0x46, 0xb4, 0xc4, 0xa4, 0x0b, 0x8d, 0x80, 0x4d, 0x4d, 0xd6, 0x45, 0x2d, 0xee, - 0xee, 0xa6, 0x62, 0xd7, 0x8e, 0x90, 0xb7, 0x60, 0x05, 0x6c, 0xda, 0x1e, 0xa1, 0x6b, 0x6f, 0x3a, - 0x3c, 0x17, 0x92, 0x37, 0xb0, 0x17, 0x64, 0x4a, 0x53, 0xbc, 0x61, 0x31, 0x8f, 0x98, 0x46, 0xd7, - 0x31, 0xb5, 0xfd, 0xc7, 0xe6, 0xa3, 0x34, 0x77, 0x5d, 0x0c, 0xbb, 0x52, 0x0a, 0xe9, 0xc2, 0xc6, - 0x51, 0x96, 0xe5, 0xe4, 0x05, 0x58, 0x9f, 0x78, 0x9c, 0xef, 0x5a, 0xcd, 0xd8, 0x38, 0x47, 0xde, - 0xdf, 0x0a, 0x38, 0x66, 0xe0, 0x01, 0x6a, 0x96, 0x7b, 0x5d, 0xde, 0xf3, 0x02, 0x90, 0x0f, 0x50, - 0xeb, 0x31, 0xa5, 0x3b, 0x22, 0xd1, 0x2c, 0x2c, 0x56, 0xe7, 0xd1, 0x9b, 0xcb, 0xea, 0xfc, 0x79, - 0x7c, 0x4e, 0xc4, 0x6d, 0xd2, 0x43, 0x16, 0xa1, 0x34, 0x1b, 0x60, 0xd3, 0x32, 0x45, 0x8e, 0xa1, - 0xb1, 0x74, 0x29, 0x9c, 0xf5, 0xf0, 0x06, 0x63, 0xe3, 0xb5, 0x43, 0xd7, 0x78, 0xf2, 0x1e, 0x9e, - 0x53, 0x54, 0x59, 0xac, 0x55, 0x51, 0x3f, 0x46, 0x67, 0xb3, 0x76, 0xa7, 0xa7, 0x8c, 0x59, 0x36, - 0xbd, 0x3f, 0x78, 0x59, 0xb5, 0x77, 0x1a, 0xd6, 0x65, 0xd5, 0xb6, 0x1a, 0xbb, 0x5e, 0x0f, 0xf6, - 0xba, 0xf9, 0x0b, 0x4b, 0x25, 0x57, 0x68, 0x9a, 0x7e, 0x0d, 0xce, 0x17, 0x36, 0x41, 0x95, 0xb2, - 0x10, 0xe7, 0x4b, 0x7e, 0x47, 0xe4, 0xd1, 0xaf, 0x4c, 0x6a, 0x6e, 0x56, 0x62, 0xab, 0x88, 0x2e, - 0x89, 0xb3, 0x93, 0x9f, 0xc7, 0x23, 0xae, 0xc7, 0xd9, 0xa0, 0x15, 0x8a, 0x89, 0x3f, 0x66, 0x6a, - 0xcc, 0x43, 0x21, 0x53, 0x3f, 0x14, 0x89, 0xca, 0x62, 0x7f, 0xf5, 0x77, 0x34, 0xb0, 0x0c, 0x7e, - 0xf7, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x35, 0xe5, 0x62, 0x05, 0xa7, 0x04, 0x00, 0x00, +var file_proto_pbcommon_common_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_proto_pbcommon_common_proto_goTypes = []interface{}{ + (*RaftIndex)(nil), // 0: common.RaftIndex + (*TargetDatacenter)(nil), // 1: common.TargetDatacenter + (*WriteRequest)(nil), // 2: common.WriteRequest + (*ReadRequest)(nil), // 3: common.ReadRequest + (*QueryOptions)(nil), // 4: common.QueryOptions + (*QueryMeta)(nil), // 5: common.QueryMeta + (*EnterpriseMeta)(nil), // 6: common.EnterpriseMeta + (*durationpb.Duration)(nil), // 7: google.protobuf.Duration +} +var file_proto_pbcommon_common_proto_depIdxs = []int32{ + 7, // 0: common.QueryOptions.MaxQueryTime:type_name -> google.protobuf.Duration + 7, // 1: common.QueryOptions.MaxStaleDuration:type_name -> google.protobuf.Duration + 7, // 2: common.QueryOptions.MaxAge:type_name -> google.protobuf.Duration + 7, // 3: common.QueryOptions.StaleIfError:type_name -> google.protobuf.Duration + 7, // 4: common.QueryMeta.LastContact:type_name -> google.protobuf.Duration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_proto_pbcommon_common_proto_init() } +func file_proto_pbcommon_common_proto_init() { + if File_proto_pbcommon_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbcommon_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftIndex); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TargetDatacenter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbcommon_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnterpriseMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbcommon_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbcommon_common_proto_goTypes, + DependencyIndexes: file_proto_pbcommon_common_proto_depIdxs, + MessageInfos: file_proto_pbcommon_common_proto_msgTypes, + }.Build() + File_proto_pbcommon_common_proto = out.File + file_proto_pbcommon_common_proto_rawDesc = nil + file_proto_pbcommon_common_proto_goTypes = nil + file_proto_pbcommon_common_proto_depIdxs = nil } diff --git a/proto/pbconfig/config.pb.go b/proto/pbconfig/config.pb.go index a3dfa4c81..e7443fc37 100644 --- a/proto/pbconfig/config.pb.go +++ b/proto/pbconfig/config.pb.go @@ -1,305 +1,348 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbconfig/config.proto package pbconfig import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Config struct { - Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - PrimaryDatacenter string `protobuf:"bytes,2,opt,name=PrimaryDatacenter,proto3" json:"PrimaryDatacenter,omitempty"` - NodeName string `protobuf:"bytes,3,opt,name=NodeName,proto3" json:"NodeName,omitempty"` - SegmentName string `protobuf:"bytes,4,opt,name=SegmentName,proto3" json:"SegmentName,omitempty"` - Partition string `protobuf:"bytes,9,opt,name=Partition,proto3" json:"Partition,omitempty"` - ACL *ACL `protobuf:"bytes,5,opt,name=ACL,proto3" json:"ACL,omitempty"` - AutoEncrypt *AutoEncrypt `protobuf:"bytes,6,opt,name=AutoEncrypt,proto3" json:"AutoEncrypt,omitempty"` - Gossip *Gossip `protobuf:"bytes,7,opt,name=Gossip,proto3" json:"Gossip,omitempty"` - TLS *TLS `protobuf:"bytes,8,opt,name=TLS,proto3" json:"TLS,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Datacenter string `protobuf:"bytes,1,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + PrimaryDatacenter string `protobuf:"bytes,2,opt,name=PrimaryDatacenter,proto3" json:"PrimaryDatacenter,omitempty"` + NodeName string `protobuf:"bytes,3,opt,name=NodeName,proto3" json:"NodeName,omitempty"` + SegmentName string `protobuf:"bytes,4,opt,name=SegmentName,proto3" json:"SegmentName,omitempty"` + Partition string `protobuf:"bytes,9,opt,name=Partition,proto3" json:"Partition,omitempty"` + ACL *ACL `protobuf:"bytes,5,opt,name=ACL,proto3" json:"ACL,omitempty"` + AutoEncrypt *AutoEncrypt `protobuf:"bytes,6,opt,name=AutoEncrypt,proto3" json:"AutoEncrypt,omitempty"` + Gossip *Gossip `protobuf:"bytes,7,opt,name=Gossip,proto3" json:"Gossip,omitempty"` + TLS *TLS `protobuf:"bytes,8,opt,name=TLS,proto3" json:"TLS,omitempty"` } -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} +func (x *Config) Reset() { + *x = Config{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Config) ProtoMessage() {} + +func (x *Config) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Config.ProtoReflect.Descriptor instead. func (*Config) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{0} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{0} } -func (m *Config) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Config.Unmarshal(m, b) -} -func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Config.Marshal(b, m, deterministic) -} -func (m *Config) XXX_Merge(src proto.Message) { - xxx_messageInfo_Config.Merge(m, src) -} -func (m *Config) XXX_Size() int { - return xxx_messageInfo_Config.Size(m) -} -func (m *Config) XXX_DiscardUnknown() { - xxx_messageInfo_Config.DiscardUnknown(m) -} - -var xxx_messageInfo_Config proto.InternalMessageInfo - -func (m *Config) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *Config) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func (m *Config) GetPrimaryDatacenter() string { - if m != nil { - return m.PrimaryDatacenter +func (x *Config) GetPrimaryDatacenter() string { + if x != nil { + return x.PrimaryDatacenter } return "" } -func (m *Config) GetNodeName() string { - if m != nil { - return m.NodeName +func (x *Config) GetNodeName() string { + if x != nil { + return x.NodeName } return "" } -func (m *Config) GetSegmentName() string { - if m != nil { - return m.SegmentName +func (x *Config) GetSegmentName() string { + if x != nil { + return x.SegmentName } return "" } -func (m *Config) GetPartition() string { - if m != nil { - return m.Partition +func (x *Config) GetPartition() string { + if x != nil { + return x.Partition } return "" } -func (m *Config) GetACL() *ACL { - if m != nil { - return m.ACL +func (x *Config) GetACL() *ACL { + if x != nil { + return x.ACL } return nil } -func (m *Config) GetAutoEncrypt() *AutoEncrypt { - if m != nil { - return m.AutoEncrypt +func (x *Config) GetAutoEncrypt() *AutoEncrypt { + if x != nil { + return x.AutoEncrypt } return nil } -func (m *Config) GetGossip() *Gossip { - if m != nil { - return m.Gossip +func (x *Config) GetGossip() *Gossip { + if x != nil { + return x.Gossip } return nil } -func (m *Config) GetTLS() *TLS { - if m != nil { - return m.TLS +func (x *Config) GetTLS() *TLS { + if x != nil { + return x.TLS } return nil } type Gossip struct { - Encryption *GossipEncryption `protobuf:"bytes,1,opt,name=Encryption,proto3" json:"Encryption,omitempty"` - RetryJoinLAN []string `protobuf:"bytes,2,rep,name=RetryJoinLAN,proto3" json:"RetryJoinLAN,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Encryption *GossipEncryption `protobuf:"bytes,1,opt,name=Encryption,proto3" json:"Encryption,omitempty"` + RetryJoinLAN []string `protobuf:"bytes,2,rep,name=RetryJoinLAN,proto3" json:"RetryJoinLAN,omitempty"` } -func (m *Gossip) Reset() { *m = Gossip{} } -func (m *Gossip) String() string { return proto.CompactTextString(m) } -func (*Gossip) ProtoMessage() {} +func (x *Gossip) Reset() { + *x = Gossip{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Gossip) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Gossip) ProtoMessage() {} + +func (x *Gossip) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Gossip.ProtoReflect.Descriptor instead. func (*Gossip) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{1} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{1} } -func (m *Gossip) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gossip.Unmarshal(m, b) -} -func (m *Gossip) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gossip.Marshal(b, m, deterministic) -} -func (m *Gossip) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gossip.Merge(m, src) -} -func (m *Gossip) XXX_Size() int { - return xxx_messageInfo_Gossip.Size(m) -} -func (m *Gossip) XXX_DiscardUnknown() { - xxx_messageInfo_Gossip.DiscardUnknown(m) -} - -var xxx_messageInfo_Gossip proto.InternalMessageInfo - -func (m *Gossip) GetEncryption() *GossipEncryption { - if m != nil { - return m.Encryption +func (x *Gossip) GetEncryption() *GossipEncryption { + if x != nil { + return x.Encryption } return nil } -func (m *Gossip) GetRetryJoinLAN() []string { - if m != nil { - return m.RetryJoinLAN +func (x *Gossip) GetRetryJoinLAN() []string { + if x != nil { + return x.RetryJoinLAN } return nil } type GossipEncryption struct { - Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` - VerifyIncoming bool `protobuf:"varint,2,opt,name=VerifyIncoming,proto3" json:"VerifyIncoming,omitempty"` - VerifyOutgoing bool `protobuf:"varint,3,opt,name=VerifyOutgoing,proto3" json:"VerifyOutgoing,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` + VerifyIncoming bool `protobuf:"varint,2,opt,name=VerifyIncoming,proto3" json:"VerifyIncoming,omitempty"` + VerifyOutgoing bool `protobuf:"varint,3,opt,name=VerifyOutgoing,proto3" json:"VerifyOutgoing,omitempty"` } -func (m *GossipEncryption) Reset() { *m = GossipEncryption{} } -func (m *GossipEncryption) String() string { return proto.CompactTextString(m) } -func (*GossipEncryption) ProtoMessage() {} +func (x *GossipEncryption) Reset() { + *x = GossipEncryption{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GossipEncryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GossipEncryption) ProtoMessage() {} + +func (x *GossipEncryption) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GossipEncryption.ProtoReflect.Descriptor instead. func (*GossipEncryption) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{2} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{2} } -func (m *GossipEncryption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GossipEncryption.Unmarshal(m, b) -} -func (m *GossipEncryption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GossipEncryption.Marshal(b, m, deterministic) -} -func (m *GossipEncryption) XXX_Merge(src proto.Message) { - xxx_messageInfo_GossipEncryption.Merge(m, src) -} -func (m *GossipEncryption) XXX_Size() int { - return xxx_messageInfo_GossipEncryption.Size(m) -} -func (m *GossipEncryption) XXX_DiscardUnknown() { - xxx_messageInfo_GossipEncryption.DiscardUnknown(m) -} - -var xxx_messageInfo_GossipEncryption proto.InternalMessageInfo - -func (m *GossipEncryption) GetKey() string { - if m != nil { - return m.Key +func (x *GossipEncryption) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *GossipEncryption) GetVerifyIncoming() bool { - if m != nil { - return m.VerifyIncoming +func (x *GossipEncryption) GetVerifyIncoming() bool { + if x != nil { + return x.VerifyIncoming } return false } -func (m *GossipEncryption) GetVerifyOutgoing() bool { - if m != nil { - return m.VerifyOutgoing +func (x *GossipEncryption) GetVerifyOutgoing() bool { + if x != nil { + return x.VerifyOutgoing } return false } type TLS struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + VerifyOutgoing bool `protobuf:"varint,1,opt,name=VerifyOutgoing,proto3" json:"VerifyOutgoing,omitempty"` VerifyServerHostname bool `protobuf:"varint,2,opt,name=VerifyServerHostname,proto3" json:"VerifyServerHostname,omitempty"` CipherSuites string `protobuf:"bytes,3,opt,name=CipherSuites,proto3" json:"CipherSuites,omitempty"` MinVersion string `protobuf:"bytes,4,opt,name=MinVersion,proto3" json:"MinVersion,omitempty"` // Deprecated_PreferServerCipherSuites is deprecated. It is no longer // populated and should be ignored by clients. - Deprecated_PreferServerCipherSuites bool `protobuf:"varint,5,opt,name=Deprecated_PreferServerCipherSuites,json=DeprecatedPreferServerCipherSuites,proto3" json:"Deprecated_PreferServerCipherSuites,omitempty"` // Deprecated: Do not use. - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // Deprecated: Do not use. + Deprecated_PreferServerCipherSuites bool `protobuf:"varint,5,opt,name=Deprecated_PreferServerCipherSuites,json=DeprecatedPreferServerCipherSuites,proto3" json:"Deprecated_PreferServerCipherSuites,omitempty"` } -func (m *TLS) Reset() { *m = TLS{} } -func (m *TLS) String() string { return proto.CompactTextString(m) } -func (*TLS) ProtoMessage() {} +func (x *TLS) Reset() { + *x = TLS{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLS) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLS) ProtoMessage() {} + +func (x *TLS) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLS.ProtoReflect.Descriptor instead. func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{3} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{3} } -func (m *TLS) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TLS.Unmarshal(m, b) -} -func (m *TLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TLS.Marshal(b, m, deterministic) -} -func (m *TLS) XXX_Merge(src proto.Message) { - xxx_messageInfo_TLS.Merge(m, src) -} -func (m *TLS) XXX_Size() int { - return xxx_messageInfo_TLS.Size(m) -} -func (m *TLS) XXX_DiscardUnknown() { - xxx_messageInfo_TLS.DiscardUnknown(m) -} - -var xxx_messageInfo_TLS proto.InternalMessageInfo - -func (m *TLS) GetVerifyOutgoing() bool { - if m != nil { - return m.VerifyOutgoing +func (x *TLS) GetVerifyOutgoing() bool { + if x != nil { + return x.VerifyOutgoing } return false } -func (m *TLS) GetVerifyServerHostname() bool { - if m != nil { - return m.VerifyServerHostname +func (x *TLS) GetVerifyServerHostname() bool { + if x != nil { + return x.VerifyServerHostname } return false } -func (m *TLS) GetCipherSuites() string { - if m != nil { - return m.CipherSuites +func (x *TLS) GetCipherSuites() string { + if x != nil { + return x.CipherSuites } return "" } -func (m *TLS) GetMinVersion() string { - if m != nil { - return m.MinVersion +func (x *TLS) GetMinVersion() string { + if x != nil { + return x.MinVersion } return "" } // Deprecated: Do not use. -func (m *TLS) GetDeprecated_PreferServerCipherSuites() bool { - if m != nil { - return m.Deprecated_PreferServerCipherSuites +func (x *TLS) GetDeprecated_PreferServerCipherSuites() bool { + if x != nil { + return x.Deprecated_PreferServerCipherSuites } return false } type ACL struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Enabled bool `protobuf:"varint,1,opt,name=Enabled,proto3" json:"Enabled,omitempty"` PolicyTTL string `protobuf:"bytes,2,opt,name=PolicyTTL,proto3" json:"PolicyTTL,omitempty"` RoleTTL string `protobuf:"bytes,3,opt,name=RoleTTL,proto3" json:"RoleTTL,omitempty"` @@ -310,372 +353,612 @@ type ACL struct { Tokens *ACLTokens `protobuf:"bytes,8,opt,name=Tokens,proto3" json:"Tokens,omitempty"` // Deprecated_DisabledTTL is deprecated. It is no longer populated and should // be ignored by clients. - Deprecated_DisabledTTL string `protobuf:"bytes,9,opt,name=Deprecated_DisabledTTL,json=DeprecatedDisabledTTL,proto3" json:"Deprecated_DisabledTTL,omitempty"` // Deprecated: Do not use. - EnableTokenPersistence bool `protobuf:"varint,10,opt,name=EnableTokenPersistence,proto3" json:"EnableTokenPersistence,omitempty"` - MSPDisableBootstrap bool `protobuf:"varint,11,opt,name=MSPDisableBootstrap,proto3" json:"MSPDisableBootstrap,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // Deprecated: Do not use. + Deprecated_DisabledTTL string `protobuf:"bytes,9,opt,name=Deprecated_DisabledTTL,json=DeprecatedDisabledTTL,proto3" json:"Deprecated_DisabledTTL,omitempty"` + EnableTokenPersistence bool `protobuf:"varint,10,opt,name=EnableTokenPersistence,proto3" json:"EnableTokenPersistence,omitempty"` + MSPDisableBootstrap bool `protobuf:"varint,11,opt,name=MSPDisableBootstrap,proto3" json:"MSPDisableBootstrap,omitempty"` } -func (m *ACL) Reset() { *m = ACL{} } -func (m *ACL) String() string { return proto.CompactTextString(m) } -func (*ACL) ProtoMessage() {} +func (x *ACL) Reset() { + *x = ACL{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ACL) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ACL) ProtoMessage() {} + +func (x *ACL) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ACL.ProtoReflect.Descriptor instead. func (*ACL) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{4} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{4} } -func (m *ACL) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ACL.Unmarshal(m, b) -} -func (m *ACL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ACL.Marshal(b, m, deterministic) -} -func (m *ACL) XXX_Merge(src proto.Message) { - xxx_messageInfo_ACL.Merge(m, src) -} -func (m *ACL) XXX_Size() int { - return xxx_messageInfo_ACL.Size(m) -} -func (m *ACL) XXX_DiscardUnknown() { - xxx_messageInfo_ACL.DiscardUnknown(m) -} - -var xxx_messageInfo_ACL proto.InternalMessageInfo - -func (m *ACL) GetEnabled() bool { - if m != nil { - return m.Enabled +func (x *ACL) GetEnabled() bool { + if x != nil { + return x.Enabled } return false } -func (m *ACL) GetPolicyTTL() string { - if m != nil { - return m.PolicyTTL +func (x *ACL) GetPolicyTTL() string { + if x != nil { + return x.PolicyTTL } return "" } -func (m *ACL) GetRoleTTL() string { - if m != nil { - return m.RoleTTL +func (x *ACL) GetRoleTTL() string { + if x != nil { + return x.RoleTTL } return "" } -func (m *ACL) GetTokenTTL() string { - if m != nil { - return m.TokenTTL +func (x *ACL) GetTokenTTL() string { + if x != nil { + return x.TokenTTL } return "" } -func (m *ACL) GetDownPolicy() string { - if m != nil { - return m.DownPolicy +func (x *ACL) GetDownPolicy() string { + if x != nil { + return x.DownPolicy } return "" } -func (m *ACL) GetDefaultPolicy() string { - if m != nil { - return m.DefaultPolicy +func (x *ACL) GetDefaultPolicy() string { + if x != nil { + return x.DefaultPolicy } return "" } -func (m *ACL) GetEnableKeyListPolicy() bool { - if m != nil { - return m.EnableKeyListPolicy +func (x *ACL) GetEnableKeyListPolicy() bool { + if x != nil { + return x.EnableKeyListPolicy } return false } -func (m *ACL) GetTokens() *ACLTokens { - if m != nil { - return m.Tokens +func (x *ACL) GetTokens() *ACLTokens { + if x != nil { + return x.Tokens } return nil } // Deprecated: Do not use. -func (m *ACL) GetDeprecated_DisabledTTL() string { - if m != nil { - return m.Deprecated_DisabledTTL +func (x *ACL) GetDeprecated_DisabledTTL() string { + if x != nil { + return x.Deprecated_DisabledTTL } return "" } -func (m *ACL) GetEnableTokenPersistence() bool { - if m != nil { - return m.EnableTokenPersistence +func (x *ACL) GetEnableTokenPersistence() bool { + if x != nil { + return x.EnableTokenPersistence } return false } -func (m *ACL) GetMSPDisableBootstrap() bool { - if m != nil { - return m.MSPDisableBootstrap +func (x *ACL) GetMSPDisableBootstrap() bool { + if x != nil { + return x.MSPDisableBootstrap } return false } type ACLTokens struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + InitialManagement string `protobuf:"bytes,1,opt,name=InitialManagement,proto3" json:"InitialManagement,omitempty"` Replication string `protobuf:"bytes,2,opt,name=Replication,proto3" json:"Replication,omitempty"` AgentRecovery string `protobuf:"bytes,3,opt,name=AgentRecovery,proto3" json:"AgentRecovery,omitempty"` Default string `protobuf:"bytes,4,opt,name=Default,proto3" json:"Default,omitempty"` Agent string `protobuf:"bytes,5,opt,name=Agent,proto3" json:"Agent,omitempty"` ManagedServiceProvider []*ACLServiceProviderToken `protobuf:"bytes,6,rep,name=ManagedServiceProvider,proto3" json:"ManagedServiceProvider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *ACLTokens) Reset() { *m = ACLTokens{} } -func (m *ACLTokens) String() string { return proto.CompactTextString(m) } -func (*ACLTokens) ProtoMessage() {} +func (x *ACLTokens) Reset() { + *x = ACLTokens{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ACLTokens) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ACLTokens) ProtoMessage() {} + +func (x *ACLTokens) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ACLTokens.ProtoReflect.Descriptor instead. func (*ACLTokens) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{5} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{5} } -func (m *ACLTokens) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ACLTokens.Unmarshal(m, b) -} -func (m *ACLTokens) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ACLTokens.Marshal(b, m, deterministic) -} -func (m *ACLTokens) XXX_Merge(src proto.Message) { - xxx_messageInfo_ACLTokens.Merge(m, src) -} -func (m *ACLTokens) XXX_Size() int { - return xxx_messageInfo_ACLTokens.Size(m) -} -func (m *ACLTokens) XXX_DiscardUnknown() { - xxx_messageInfo_ACLTokens.DiscardUnknown(m) -} - -var xxx_messageInfo_ACLTokens proto.InternalMessageInfo - -func (m *ACLTokens) GetInitialManagement() string { - if m != nil { - return m.InitialManagement +func (x *ACLTokens) GetInitialManagement() string { + if x != nil { + return x.InitialManagement } return "" } -func (m *ACLTokens) GetReplication() string { - if m != nil { - return m.Replication +func (x *ACLTokens) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *ACLTokens) GetAgentRecovery() string { - if m != nil { - return m.AgentRecovery +func (x *ACLTokens) GetAgentRecovery() string { + if x != nil { + return x.AgentRecovery } return "" } -func (m *ACLTokens) GetDefault() string { - if m != nil { - return m.Default +func (x *ACLTokens) GetDefault() string { + if x != nil { + return x.Default } return "" } -func (m *ACLTokens) GetAgent() string { - if m != nil { - return m.Agent +func (x *ACLTokens) GetAgent() string { + if x != nil { + return x.Agent } return "" } -func (m *ACLTokens) GetManagedServiceProvider() []*ACLServiceProviderToken { - if m != nil { - return m.ManagedServiceProvider +func (x *ACLTokens) GetManagedServiceProvider() []*ACLServiceProviderToken { + if x != nil { + return x.ManagedServiceProvider } return nil } type ACLServiceProviderToken struct { - AccessorID string `protobuf:"bytes,1,opt,name=AccessorID,proto3" json:"AccessorID,omitempty"` - SecretID string `protobuf:"bytes,2,opt,name=SecretID,proto3" json:"SecretID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessorID string `protobuf:"bytes,1,opt,name=AccessorID,proto3" json:"AccessorID,omitempty"` + SecretID string `protobuf:"bytes,2,opt,name=SecretID,proto3" json:"SecretID,omitempty"` } -func (m *ACLServiceProviderToken) Reset() { *m = ACLServiceProviderToken{} } -func (m *ACLServiceProviderToken) String() string { return proto.CompactTextString(m) } -func (*ACLServiceProviderToken) ProtoMessage() {} +func (x *ACLServiceProviderToken) Reset() { + *x = ACLServiceProviderToken{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ACLServiceProviderToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ACLServiceProviderToken) ProtoMessage() {} + +func (x *ACLServiceProviderToken) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ACLServiceProviderToken.ProtoReflect.Descriptor instead. func (*ACLServiceProviderToken) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{6} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{6} } -func (m *ACLServiceProviderToken) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ACLServiceProviderToken.Unmarshal(m, b) -} -func (m *ACLServiceProviderToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ACLServiceProviderToken.Marshal(b, m, deterministic) -} -func (m *ACLServiceProviderToken) XXX_Merge(src proto.Message) { - xxx_messageInfo_ACLServiceProviderToken.Merge(m, src) -} -func (m *ACLServiceProviderToken) XXX_Size() int { - return xxx_messageInfo_ACLServiceProviderToken.Size(m) -} -func (m *ACLServiceProviderToken) XXX_DiscardUnknown() { - xxx_messageInfo_ACLServiceProviderToken.DiscardUnknown(m) -} - -var xxx_messageInfo_ACLServiceProviderToken proto.InternalMessageInfo - -func (m *ACLServiceProviderToken) GetAccessorID() string { - if m != nil { - return m.AccessorID +func (x *ACLServiceProviderToken) GetAccessorID() string { + if x != nil { + return x.AccessorID } return "" } -func (m *ACLServiceProviderToken) GetSecretID() string { - if m != nil { - return m.SecretID +func (x *ACLServiceProviderToken) GetSecretID() string { + if x != nil { + return x.SecretID } return "" } type AutoEncrypt struct { - TLS bool `protobuf:"varint,1,opt,name=TLS,proto3" json:"TLS,omitempty"` - DNSSAN []string `protobuf:"bytes,2,rep,name=DNSSAN,proto3" json:"DNSSAN,omitempty"` - IPSAN []string `protobuf:"bytes,3,rep,name=IPSAN,proto3" json:"IPSAN,omitempty"` - AllowTLS bool `protobuf:"varint,4,opt,name=AllowTLS,proto3" json:"AllowTLS,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TLS bool `protobuf:"varint,1,opt,name=TLS,proto3" json:"TLS,omitempty"` + DNSSAN []string `protobuf:"bytes,2,rep,name=DNSSAN,proto3" json:"DNSSAN,omitempty"` + IPSAN []string `protobuf:"bytes,3,rep,name=IPSAN,proto3" json:"IPSAN,omitempty"` + AllowTLS bool `protobuf:"varint,4,opt,name=AllowTLS,proto3" json:"AllowTLS,omitempty"` } -func (m *AutoEncrypt) Reset() { *m = AutoEncrypt{} } -func (m *AutoEncrypt) String() string { return proto.CompactTextString(m) } -func (*AutoEncrypt) ProtoMessage() {} +func (x *AutoEncrypt) Reset() { + *x = AutoEncrypt{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconfig_config_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AutoEncrypt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AutoEncrypt) ProtoMessage() {} + +func (x *AutoEncrypt) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconfig_config_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AutoEncrypt.ProtoReflect.Descriptor instead. func (*AutoEncrypt) Descriptor() ([]byte, []int) { - return fileDescriptor_aefa824db7b74d77, []int{7} + return file_proto_pbconfig_config_proto_rawDescGZIP(), []int{7} } -func (m *AutoEncrypt) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AutoEncrypt.Unmarshal(m, b) -} -func (m *AutoEncrypt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AutoEncrypt.Marshal(b, m, deterministic) -} -func (m *AutoEncrypt) XXX_Merge(src proto.Message) { - xxx_messageInfo_AutoEncrypt.Merge(m, src) -} -func (m *AutoEncrypt) XXX_Size() int { - return xxx_messageInfo_AutoEncrypt.Size(m) -} -func (m *AutoEncrypt) XXX_DiscardUnknown() { - xxx_messageInfo_AutoEncrypt.DiscardUnknown(m) -} - -var xxx_messageInfo_AutoEncrypt proto.InternalMessageInfo - -func (m *AutoEncrypt) GetTLS() bool { - if m != nil { - return m.TLS +func (x *AutoEncrypt) GetTLS() bool { + if x != nil { + return x.TLS } return false } -func (m *AutoEncrypt) GetDNSSAN() []string { - if m != nil { - return m.DNSSAN +func (x *AutoEncrypt) GetDNSSAN() []string { + if x != nil { + return x.DNSSAN } return nil } -func (m *AutoEncrypt) GetIPSAN() []string { - if m != nil { - return m.IPSAN +func (x *AutoEncrypt) GetIPSAN() []string { + if x != nil { + return x.IPSAN } return nil } -func (m *AutoEncrypt) GetAllowTLS() bool { - if m != nil { - return m.AllowTLS +func (x *AutoEncrypt) GetAllowTLS() bool { + if x != nil { + return x.AllowTLS } return false } -func init() { - proto.RegisterType((*Config)(nil), "config.Config") - proto.RegisterType((*Gossip)(nil), "config.Gossip") - proto.RegisterType((*GossipEncryption)(nil), "config.GossipEncryption") - proto.RegisterType((*TLS)(nil), "config.TLS") - proto.RegisterType((*ACL)(nil), "config.ACL") - proto.RegisterType((*ACLTokens)(nil), "config.ACLTokens") - proto.RegisterType((*ACLServiceProviderToken)(nil), "config.ACLServiceProviderToken") - proto.RegisterType((*AutoEncrypt)(nil), "config.AutoEncrypt") +var File_proto_pbconfig_config_proto protoreflect.FileDescriptor + +var file_proto_pbconfig_config_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xcf, 0x02, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, + 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x03, 0x41, 0x43, + 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x41, 0x43, 0x4c, 0x52, 0x03, 0x41, 0x43, 0x4c, 0x12, 0x35, 0x0a, 0x0b, 0x41, 0x75, 0x74, + 0x6f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x52, 0x0b, 0x41, 0x75, 0x74, 0x6f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x12, 0x26, 0x0a, 0x06, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x52, 0x06, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x1d, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, + 0x4c, 0x53, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x22, 0x66, 0x0a, 0x06, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x12, 0x38, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x4c, 0x41, 0x4e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x4c, 0x41, 0x4e, 0x22, + 0x74, 0x0a, 0x10, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x49, + 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, + 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4f, 0x75, 0x74, + 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x22, 0xfa, 0x01, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x26, 0x0a, + 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4f, 0x75, 0x74, + 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, + 0x23, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, + 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x22, 0xbb, 0x03, 0x0a, 0x03, 0x41, 0x43, 0x4c, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x54, + 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, + 0x54, 0x4c, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x6c, 0x65, 0x54, 0x54, 0x4c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x6c, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x1a, 0x0a, 0x08, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x54, 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x6f, + 0x77, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, + 0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x29, 0x0a, 0x06, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x43, 0x4c, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x73, 0x52, 0x06, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x39, 0x0a, 0x16, 0x44, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x54, 0x54, 0x4c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x15, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x54, 0x54, 0x4c, 0x12, 0x36, 0x0a, 0x16, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x30, + 0x0a, 0x13, 0x4d, 0x53, 0x50, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x4d, 0x53, 0x50, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x22, 0x8a, 0x02, 0x0a, 0x09, 0x41, 0x43, 0x4c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x2c, + 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x0a, 0x0d, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x16, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x43, + 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x16, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x55, 0x0a, + 0x17, 0x41, 0x43, 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6f, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x49, 0x44, 0x22, 0x69, 0x0a, 0x0b, 0x41, 0x75, 0x74, 0x6f, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x41, 0x4e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x41, 0x4e, 0x12, 0x14, 0x0a, + 0x05, 0x49, 0x50, 0x53, 0x41, 0x4e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x49, 0x50, + 0x53, 0x41, 0x4e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x4c, 0x53, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x4c, 0x53, 0x42, + 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbconfig/config.proto", fileDescriptor_aefa824db7b74d77) +var ( + file_proto_pbconfig_config_proto_rawDescOnce sync.Once + file_proto_pbconfig_config_proto_rawDescData = file_proto_pbconfig_config_proto_rawDesc +) + +func file_proto_pbconfig_config_proto_rawDescGZIP() []byte { + file_proto_pbconfig_config_proto_rawDescOnce.Do(func() { + file_proto_pbconfig_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbconfig_config_proto_rawDescData) + }) + return file_proto_pbconfig_config_proto_rawDescData } -var fileDescriptor_aefa824db7b74d77 = []byte{ - // 805 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x8e, 0x22, 0x45, - 0x14, 0x0e, 0xf4, 0x6e, 0x0f, 0x14, 0xba, 0xd9, 0xad, 0x5d, 0xb1, 0xe3, 0x2f, 0x69, 0xcd, 0x06, - 0xcd, 0x66, 0x30, 0x63, 0x34, 0x7a, 0xc9, 0xc0, 0x46, 0x71, 0x19, 0x24, 0xdd, 0x38, 0x26, 0xde, - 0x98, 0xa6, 0x39, 0x40, 0xc5, 0xa6, 0xaa, 0x53, 0x5d, 0xcc, 0xa4, 0x5f, 0xc1, 0x57, 0xf2, 0x21, - 0x7c, 0x1f, 0xaf, 0xcc, 0xa9, 0xaa, 0xfe, 0x9b, 0x81, 0x2b, 0x38, 0xdf, 0xf7, 0xd5, 0xa9, 0x73, - 0xea, 0xfc, 0x34, 0xf9, 0x38, 0x95, 0x42, 0x89, 0x51, 0xba, 0x8e, 0x05, 0xdf, 0xb2, 0xdd, 0xc8, - 0xfc, 0x5c, 0x6a, 0x94, 0xba, 0xc6, 0xf2, 0xff, 0x6d, 0x13, 0x77, 0xa2, 0xff, 0xd2, 0xcf, 0x08, - 0x99, 0x46, 0x2a, 0x8a, 0x81, 0x2b, 0x90, 0x5e, 0x6b, 0xd0, 0x1a, 0x76, 0x83, 0x1a, 0x42, 0xdf, - 0x90, 0x17, 0x4b, 0xc9, 0x0e, 0x91, 0xcc, 0x6b, 0xb2, 0xb6, 0x96, 0x3d, 0x26, 0xe8, 0x47, 0xa4, - 0xb3, 0x10, 0x1b, 0x58, 0x44, 0x07, 0xf0, 0x1c, 0x2d, 0x2a, 0x6d, 0x3a, 0x20, 0xbd, 0x10, 0x76, - 0x07, 0xe0, 0x4a, 0xd3, 0x4f, 0x34, 0x5d, 0x87, 0xe8, 0x27, 0xa4, 0xbb, 0x8c, 0xa4, 0x62, 0x8a, - 0x09, 0xee, 0x75, 0x35, 0x5f, 0x01, 0xf4, 0x53, 0xe2, 0x8c, 0x27, 0x73, 0xef, 0xe9, 0xa0, 0x35, - 0xec, 0x5d, 0xf5, 0x2e, 0x6d, 0x62, 0xe3, 0xc9, 0x3c, 0x40, 0x9c, 0x7e, 0x47, 0x7a, 0xe3, 0xa3, - 0x12, 0x6f, 0x79, 0x2c, 0xf3, 0x54, 0x79, 0xae, 0x96, 0xbd, 0x2c, 0x65, 0x15, 0x15, 0xd4, 0x75, - 0xf4, 0x35, 0x71, 0x7f, 0x12, 0x59, 0xc6, 0x52, 0xef, 0x42, 0x9f, 0x78, 0x56, 0x9c, 0x30, 0x68, - 0x60, 0x59, 0xbc, 0x7d, 0x35, 0x0f, 0xbd, 0x4e, 0xf3, 0xf6, 0xd5, 0x3c, 0x0c, 0x10, 0xf7, 0xb7, - 0x85, 0x1b, 0xfa, 0x03, 0x21, 0xd6, 0x37, 0x66, 0xd1, 0xd2, 0x7a, 0xaf, 0xe9, 0xb4, 0xe2, 0x83, - 0x9a, 0x96, 0xfa, 0xe4, 0xbd, 0x00, 0x94, 0xcc, 0x7f, 0x11, 0x8c, 0xcf, 0xc7, 0x0b, 0xaf, 0x3d, - 0x70, 0x86, 0xdd, 0xa0, 0x81, 0xf9, 0x8a, 0x3c, 0x7f, 0xe8, 0x83, 0x3e, 0x27, 0xce, 0x3b, 0xc8, - 0x6d, 0xed, 0xf0, 0x2f, 0x7d, 0x4d, 0x9e, 0xdd, 0x82, 0x64, 0xdb, 0x7c, 0xc6, 0x63, 0x71, 0x60, - 0x7c, 0xa7, 0x2b, 0xd6, 0x09, 0x1e, 0xa0, 0x95, 0xee, 0xd7, 0xa3, 0xda, 0x09, 0xd4, 0x39, 0x75, - 0x5d, 0x81, 0xfa, 0xff, 0xb5, 0x74, 0xf6, 0x27, 0xf4, 0xad, 0x53, 0x7a, 0x7a, 0x45, 0x5e, 0x19, - 0x24, 0x04, 0x79, 0x07, 0xf2, 0x67, 0x91, 0x29, 0x8e, 0x35, 0x37, 0x51, 0x9c, 0xe4, 0x30, 0xfb, - 0x09, 0x4b, 0xf7, 0x20, 0xc3, 0x23, 0x53, 0x90, 0xd9, 0xf6, 0x69, 0x60, 0xd8, 0xac, 0x37, 0x8c, - 0xdf, 0x82, 0xcc, 0xf0, 0x6d, 0x4d, 0x07, 0xd5, 0x10, 0x1a, 0x92, 0x2f, 0xa6, 0x90, 0x4a, 0x88, - 0x23, 0x05, 0x9b, 0x3f, 0x97, 0x12, 0xb6, 0x20, 0xcd, 0x35, 0x0d, 0xd7, 0xd8, 0x42, 0x9d, 0xeb, - 0xb6, 0xd7, 0x0a, 0xfc, 0x4a, 0x7e, 0x4e, 0xed, 0xff, 0xe3, 0xe8, 0xc6, 0xa3, 0x1e, 0xb9, 0x78, - 0xcb, 0xa3, 0x75, 0x02, 0x1b, 0x9b, 0x75, 0x61, 0xea, 0xbe, 0x15, 0x09, 0x8b, 0xf3, 0xd5, 0x6a, - 0x6e, 0x67, 0xa3, 0x02, 0xf0, 0x5c, 0x20, 0x12, 0x40, 0xce, 0xe4, 0x54, 0x98, 0x38, 0x2d, 0x2b, - 0xf1, 0x17, 0x70, 0xa4, 0x4c, 0x32, 0xa5, 0xad, 0xe7, 0x52, 0xdc, 0x73, 0xe3, 0x46, 0x47, 0x8c, - 0x73, 0x59, 0x22, 0xf4, 0x4b, 0xf2, 0xfe, 0x14, 0xb6, 0xd1, 0x31, 0x51, 0x56, 0xe2, 0x6a, 0x49, - 0x13, 0xa4, 0xdf, 0x90, 0x97, 0x26, 0xc8, 0x77, 0x90, 0xcf, 0x59, 0x56, 0x68, 0x2f, 0x74, 0xfc, - 0xa7, 0x28, 0xfa, 0x15, 0x71, 0x75, 0x0c, 0x99, 0x6d, 0xf5, 0x17, 0xb5, 0x41, 0x33, 0x44, 0x60, - 0x05, 0xf4, 0x47, 0xd2, 0xaf, 0xbd, 0xf6, 0x94, 0x65, 0xfa, 0x35, 0x30, 0x19, 0x3d, 0xbb, 0xfa, - 0x81, 0x3f, 0xa8, 0x14, 0x35, 0x01, 0xfd, 0x9e, 0xf4, 0xcd, 0xe5, 0xda, 0xd5, 0x12, 0xcb, 0x97, - 0x29, 0xe0, 0x31, 0x78, 0x44, 0x87, 0x76, 0x86, 0xc5, 0x7c, 0x6e, 0xc2, 0xa5, 0xf5, 0x74, 0x2d, - 0x84, 0xca, 0x94, 0x8c, 0x52, 0xaf, 0x67, 0xf2, 0x39, 0x41, 0xf9, 0x7f, 0xb7, 0x49, 0xb7, 0x0c, - 0x1d, 0xb7, 0xd9, 0x8c, 0x33, 0xc5, 0xa2, 0xe4, 0x26, 0xe2, 0xd1, 0x0e, 0x70, 0xf5, 0xd8, 0xc1, - 0x79, 0x4c, 0xe0, 0xc6, 0x0a, 0x20, 0x4d, 0x58, 0x1c, 0xe9, 0x59, 0x36, 0x95, 0xad, 0x43, 0x58, - 0x85, 0xf1, 0x0e, 0xb8, 0x0a, 0x20, 0x16, 0x77, 0x20, 0x73, 0x5b, 0xe1, 0x26, 0x88, 0x1d, 0x60, - 0xcb, 0x62, 0xcb, 0x5c, 0x98, 0xf4, 0x15, 0x79, 0xaa, 0xa5, 0xb6, 0xc0, 0xc6, 0xa0, 0xbf, 0x93, - 0xbe, 0x89, 0x62, 0x83, 0xed, 0xc8, 0x62, 0x58, 0x4a, 0x71, 0xc7, 0x36, 0x20, 0x3d, 0x77, 0xe0, - 0x0c, 0x7b, 0x57, 0x9f, 0xd7, 0x6a, 0xf2, 0x40, 0xa1, 0xf3, 0x0c, 0xce, 0x1c, 0xf7, 0x7f, 0x23, - 0x1f, 0x9e, 0x39, 0x82, 0xfd, 0x36, 0x8e, 0x63, 0xc8, 0x32, 0x21, 0x67, 0xd3, 0xe2, 0x3b, 0x50, - 0x21, 0xd8, 0xab, 0x21, 0xc4, 0x12, 0xd4, 0x6c, 0x6a, 0x1f, 0xa2, 0xb4, 0x7d, 0xd6, 0x58, 0xbd, - 0xb8, 0x8f, 0x70, 0x55, 0x9a, 0x21, 0xd1, 0x7b, 0xa3, 0x4f, 0xdc, 0xe9, 0x22, 0x0c, 0xcb, 0x9d, - 0x66, 0x2d, 0x4c, 0x7f, 0xb6, 0x44, 0xd8, 0xd1, 0xb0, 0x31, 0xf0, 0xaa, 0x71, 0x92, 0x88, 0x7b, - 0x74, 0xf2, 0x44, 0x3b, 0x29, 0xed, 0xeb, 0x37, 0x7f, 0x7c, 0xbd, 0x63, 0x6a, 0x7f, 0x5c, 0x5f, - 0xc6, 0xe2, 0x30, 0xda, 0x47, 0xd9, 0x9e, 0xc5, 0x42, 0xa6, 0xf8, 0x99, 0xcb, 0x8e, 0xc9, 0xa8, - 0xf9, 0xf1, 0x5b, 0xbb, 0xda, 0xfe, 0xf6, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x97, 0xb9, - 0xb8, 0x15, 0x07, 0x00, 0x00, +var file_proto_pbconfig_config_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_proto_pbconfig_config_proto_goTypes = []interface{}{ + (*Config)(nil), // 0: config.Config + (*Gossip)(nil), // 1: config.Gossip + (*GossipEncryption)(nil), // 2: config.GossipEncryption + (*TLS)(nil), // 3: config.TLS + (*ACL)(nil), // 4: config.ACL + (*ACLTokens)(nil), // 5: config.ACLTokens + (*ACLServiceProviderToken)(nil), // 6: config.ACLServiceProviderToken + (*AutoEncrypt)(nil), // 7: config.AutoEncrypt +} +var file_proto_pbconfig_config_proto_depIdxs = []int32{ + 4, // 0: config.Config.ACL:type_name -> config.ACL + 7, // 1: config.Config.AutoEncrypt:type_name -> config.AutoEncrypt + 1, // 2: config.Config.Gossip:type_name -> config.Gossip + 3, // 3: config.Config.TLS:type_name -> config.TLS + 2, // 4: config.Gossip.Encryption:type_name -> config.GossipEncryption + 5, // 5: config.ACL.Tokens:type_name -> config.ACLTokens + 6, // 6: config.ACLTokens.ManagedServiceProvider:type_name -> config.ACLServiceProviderToken + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_proto_pbconfig_config_proto_init() } +func file_proto_pbconfig_config_proto_init() { + if File_proto_pbconfig_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbconfig_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Config); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gossip); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GossipEncryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLS); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ACL); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ACLTokens); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ACLServiceProviderToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconfig_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AutoEncrypt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbconfig_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbconfig_config_proto_goTypes, + DependencyIndexes: file_proto_pbconfig_config_proto_depIdxs, + MessageInfos: file_proto_pbconfig_config_proto_msgTypes, + }.Build() + File_proto_pbconfig_config_proto = out.File + file_proto_pbconfig_config_proto_rawDesc = nil + file_proto_pbconfig_config_proto_goTypes = nil + file_proto_pbconfig_config_proto_depIdxs = nil } diff --git a/proto/pbconnect/connect.pb.go b/proto/pbconnect/connect.pb.go index 97ce17def..4197003bc 100644 --- a/proto/pbconnect/connect.pb.go +++ b/proto/pbconnect/connect.pb.go @@ -1,26 +1,31 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbconnect/connect.proto package pbconnect import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" pbcommon "github.com/hashicorp/consul/proto/pbcommon" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // CARoots is the list of all currently trusted CA Roots. // @@ -30,6 +35,10 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // output=connect.gen.go // name=StructsIndexedCARoots type CARoots struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ActiveRootID is the ID of a root in Roots that is the active CA root. // Other roots are still valid if they're in the Roots list but are in // the process of being rotated out. @@ -62,61 +71,65 @@ type CARoots struct { // QueryMeta here is mainly used to contain the latest Raft Index that could // be used to perform a blocking query. // mog: func-to=QueryMetaTo func-from=QueryMetaFrom - QueryMeta *pbcommon.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + QueryMeta *pbcommon.QueryMeta `protobuf:"bytes,4,opt,name=QueryMeta,proto3" json:"QueryMeta,omitempty"` } -func (m *CARoots) Reset() { *m = CARoots{} } -func (m *CARoots) String() string { return proto.CompactTextString(m) } -func (*CARoots) ProtoMessage() {} +func (x *CARoots) Reset() { + *x = CARoots{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconnect_connect_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CARoots) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CARoots) ProtoMessage() {} + +func (x *CARoots) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconnect_connect_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CARoots.ProtoReflect.Descriptor instead. func (*CARoots) Descriptor() ([]byte, []int) { - return fileDescriptor_80627e709958eb04, []int{0} + return file_proto_pbconnect_connect_proto_rawDescGZIP(), []int{0} } -func (m *CARoots) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CARoots.Unmarshal(m, b) -} -func (m *CARoots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CARoots.Marshal(b, m, deterministic) -} -func (m *CARoots) XXX_Merge(src proto.Message) { - xxx_messageInfo_CARoots.Merge(m, src) -} -func (m *CARoots) XXX_Size() int { - return xxx_messageInfo_CARoots.Size(m) -} -func (m *CARoots) XXX_DiscardUnknown() { - xxx_messageInfo_CARoots.DiscardUnknown(m) -} - -var xxx_messageInfo_CARoots proto.InternalMessageInfo - -func (m *CARoots) GetActiveRootID() string { - if m != nil { - return m.ActiveRootID +func (x *CARoots) GetActiveRootID() string { + if x != nil { + return x.ActiveRootID } return "" } -func (m *CARoots) GetTrustDomain() string { - if m != nil { - return m.TrustDomain +func (x *CARoots) GetTrustDomain() string { + if x != nil { + return x.TrustDomain } return "" } -func (m *CARoots) GetRoots() []*CARoot { - if m != nil { - return m.Roots +func (x *CARoots) GetRoots() []*CARoot { + if x != nil { + return x.Roots } return nil } -func (m *CARoots) GetQueryMeta() *pbcommon.QueryMeta { - if m != nil { - return m.QueryMeta +func (x *CARoots) GetQueryMeta() *pbcommon.QueryMeta { + if x != nil { + return x.QueryMeta } return nil } @@ -129,6 +142,10 @@ func (m *CARoots) GetQueryMeta() *pbcommon.QueryMeta { // output=connect.gen.go // name=StructsCARoot type CARoot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID is a globally unique ID (UUID) representing this CA root. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // Name is a human-friendly name for this CA root. This value is @@ -151,9 +168,9 @@ type CARoot struct { ExternalTrustDomain string `protobuf:"bytes,5,opt,name=ExternalTrustDomain,proto3" json:"ExternalTrustDomain,omitempty"` // Time validity bounds. // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto - NotBefore *timestamp.Timestamp `protobuf:"bytes,6,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` + NotBefore *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto - NotAfter *timestamp.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` + NotAfter *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` // RootCert is the PEM-encoded public certificate. RootCert string `protobuf:"bytes,8,opt,name=RootCert,proto3" json:"RootCert,omitempty"` // IntermediateCerts is a list of PEM-encoded intermediate certs to @@ -173,7 +190,7 @@ type CARoot struct { // This will only be set on roots that have been rotated out from being the // active root. // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto - RotatedOutAt *timestamp.Timestamp `protobuf:"bytes,13,opt,name=RotatedOutAt,proto3" json:"RotatedOutAt,omitempty"` + RotatedOutAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=RotatedOutAt,proto3" json:"RotatedOutAt,omitempty"` // PrivateKeyType is the type of the private key used to sign certificates. It // may be "rsa" or "ec". This is provided as a convenience to avoid parsing // the public key to from the certificate to infer the type. @@ -184,145 +201,149 @@ type CARoot struct { // mog: func-to=int func-from=int32 PrivateKeyBits int32 `protobuf:"varint,15,opt,name=PrivateKeyBits,proto3" json:"PrivateKeyBits,omitempty"` // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,16,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } -func (m *CARoot) Reset() { *m = CARoot{} } -func (m *CARoot) String() string { return proto.CompactTextString(m) } -func (*CARoot) ProtoMessage() {} +func (x *CARoot) Reset() { + *x = CARoot{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconnect_connect_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CARoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CARoot) ProtoMessage() {} + +func (x *CARoot) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconnect_connect_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CARoot.ProtoReflect.Descriptor instead. func (*CARoot) Descriptor() ([]byte, []int) { - return fileDescriptor_80627e709958eb04, []int{1} + return file_proto_pbconnect_connect_proto_rawDescGZIP(), []int{1} } -func (m *CARoot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CARoot.Unmarshal(m, b) -} -func (m *CARoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CARoot.Marshal(b, m, deterministic) -} -func (m *CARoot) XXX_Merge(src proto.Message) { - xxx_messageInfo_CARoot.Merge(m, src) -} -func (m *CARoot) XXX_Size() int { - return xxx_messageInfo_CARoot.Size(m) -} -func (m *CARoot) XXX_DiscardUnknown() { - xxx_messageInfo_CARoot.DiscardUnknown(m) -} - -var xxx_messageInfo_CARoot proto.InternalMessageInfo - -func (m *CARoot) GetID() string { - if m != nil { - return m.ID +func (x *CARoot) GetID() string { + if x != nil { + return x.ID } return "" } -func (m *CARoot) GetName() string { - if m != nil { - return m.Name +func (x *CARoot) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *CARoot) GetSerialNumber() uint64 { - if m != nil { - return m.SerialNumber +func (x *CARoot) GetSerialNumber() uint64 { + if x != nil { + return x.SerialNumber } return 0 } -func (m *CARoot) GetSigningKeyID() string { - if m != nil { - return m.SigningKeyID +func (x *CARoot) GetSigningKeyID() string { + if x != nil { + return x.SigningKeyID } return "" } -func (m *CARoot) GetExternalTrustDomain() string { - if m != nil { - return m.ExternalTrustDomain +func (x *CARoot) GetExternalTrustDomain() string { + if x != nil { + return x.ExternalTrustDomain } return "" } -func (m *CARoot) GetNotBefore() *timestamp.Timestamp { - if m != nil { - return m.NotBefore +func (x *CARoot) GetNotBefore() *timestamppb.Timestamp { + if x != nil { + return x.NotBefore } return nil } -func (m *CARoot) GetNotAfter() *timestamp.Timestamp { - if m != nil { - return m.NotAfter +func (x *CARoot) GetNotAfter() *timestamppb.Timestamp { + if x != nil { + return x.NotAfter } return nil } -func (m *CARoot) GetRootCert() string { - if m != nil { - return m.RootCert +func (x *CARoot) GetRootCert() string { + if x != nil { + return x.RootCert } return "" } -func (m *CARoot) GetIntermediateCerts() []string { - if m != nil { - return m.IntermediateCerts +func (x *CARoot) GetIntermediateCerts() []string { + if x != nil { + return x.IntermediateCerts } return nil } -func (m *CARoot) GetSigningCert() string { - if m != nil { - return m.SigningCert +func (x *CARoot) GetSigningCert() string { + if x != nil { + return x.SigningCert } return "" } -func (m *CARoot) GetSigningKey() string { - if m != nil { - return m.SigningKey +func (x *CARoot) GetSigningKey() string { + if x != nil { + return x.SigningKey } return "" } -func (m *CARoot) GetActive() bool { - if m != nil { - return m.Active +func (x *CARoot) GetActive() bool { + if x != nil { + return x.Active } return false } -func (m *CARoot) GetRotatedOutAt() *timestamp.Timestamp { - if m != nil { - return m.RotatedOutAt +func (x *CARoot) GetRotatedOutAt() *timestamppb.Timestamp { + if x != nil { + return x.RotatedOutAt } return nil } -func (m *CARoot) GetPrivateKeyType() string { - if m != nil { - return m.PrivateKeyType +func (x *CARoot) GetPrivateKeyType() string { + if x != nil { + return x.PrivateKeyType } return "" } -func (m *CARoot) GetPrivateKeyBits() int32 { - if m != nil { - return m.PrivateKeyBits +func (x *CARoot) GetPrivateKeyBits() int32 { + if x != nil { + return x.PrivateKeyBits } return 0 } -func (m *CARoot) GetRaftIndex() *pbcommon.RaftIndex { - if m != nil { - return m.RaftIndex +func (x *CARoot) GetRaftIndex() *pbcommon.RaftIndex { + if x != nil { + return x.RaftIndex } return nil } @@ -336,6 +357,10 @@ func (m *CARoot) GetRaftIndex() *pbcommon.RaftIndex { // output=connect.gen.go // name=StructsIssuedCert type IssuedCert struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // SerialNumber is the unique serial number for this certificate. // This is encoded in standard hex separated by :. SerialNumber string `protobuf:"bytes,1,opt,name=SerialNumber,proto3" json:"SerialNumber,omitempty"` @@ -355,171 +380,319 @@ type IssuedCert struct { // ValidAfter and ValidBefore are the validity periods for the // certificate. // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto - ValidAfter *timestamp.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` + ValidAfter *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=ValidAfter,proto3" json:"ValidAfter,omitempty"` // mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto - ValidBefore *timestamp.Timestamp `protobuf:"bytes,9,opt,name=ValidBefore,proto3" json:"ValidBefore,omitempty"` + ValidBefore *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=ValidBefore,proto3" json:"ValidBefore,omitempty"` // EnterpriseMeta is the Consul Enterprise specific metadata // mog: func-to=EnterpriseMetaTo func-from=EnterpriseMetaFrom EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,10,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=RaftIndexTo func-from=RaftIndexFrom - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } -func (m *IssuedCert) Reset() { *m = IssuedCert{} } -func (m *IssuedCert) String() string { return proto.CompactTextString(m) } -func (*IssuedCert) ProtoMessage() {} +func (x *IssuedCert) Reset() { + *x = IssuedCert{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbconnect_connect_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IssuedCert) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IssuedCert) ProtoMessage() {} + +func (x *IssuedCert) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbconnect_connect_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IssuedCert.ProtoReflect.Descriptor instead. func (*IssuedCert) Descriptor() ([]byte, []int) { - return fileDescriptor_80627e709958eb04, []int{2} + return file_proto_pbconnect_connect_proto_rawDescGZIP(), []int{2} } -func (m *IssuedCert) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IssuedCert.Unmarshal(m, b) -} -func (m *IssuedCert) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IssuedCert.Marshal(b, m, deterministic) -} -func (m *IssuedCert) XXX_Merge(src proto.Message) { - xxx_messageInfo_IssuedCert.Merge(m, src) -} -func (m *IssuedCert) XXX_Size() int { - return xxx_messageInfo_IssuedCert.Size(m) -} -func (m *IssuedCert) XXX_DiscardUnknown() { - xxx_messageInfo_IssuedCert.DiscardUnknown(m) -} - -var xxx_messageInfo_IssuedCert proto.InternalMessageInfo - -func (m *IssuedCert) GetSerialNumber() string { - if m != nil { - return m.SerialNumber +func (x *IssuedCert) GetSerialNumber() string { + if x != nil { + return x.SerialNumber } return "" } -func (m *IssuedCert) GetCertPEM() string { - if m != nil { - return m.CertPEM +func (x *IssuedCert) GetCertPEM() string { + if x != nil { + return x.CertPEM } return "" } -func (m *IssuedCert) GetPrivateKeyPEM() string { - if m != nil { - return m.PrivateKeyPEM +func (x *IssuedCert) GetPrivateKeyPEM() string { + if x != nil { + return x.PrivateKeyPEM } return "" } -func (m *IssuedCert) GetService() string { - if m != nil { - return m.Service +func (x *IssuedCert) GetService() string { + if x != nil { + return x.Service } return "" } -func (m *IssuedCert) GetServiceURI() string { - if m != nil { - return m.ServiceURI +func (x *IssuedCert) GetServiceURI() string { + if x != nil { + return x.ServiceURI } return "" } -func (m *IssuedCert) GetAgent() string { - if m != nil { - return m.Agent +func (x *IssuedCert) GetAgent() string { + if x != nil { + return x.Agent } return "" } -func (m *IssuedCert) GetAgentURI() string { - if m != nil { - return m.AgentURI +func (x *IssuedCert) GetAgentURI() string { + if x != nil { + return x.AgentURI } return "" } -func (m *IssuedCert) GetValidAfter() *timestamp.Timestamp { - if m != nil { - return m.ValidAfter +func (x *IssuedCert) GetValidAfter() *timestamppb.Timestamp { + if x != nil { + return x.ValidAfter } return nil } -func (m *IssuedCert) GetValidBefore() *timestamp.Timestamp { - if m != nil { - return m.ValidBefore +func (x *IssuedCert) GetValidBefore() *timestamppb.Timestamp { + if x != nil { + return x.ValidBefore } return nil } -func (m *IssuedCert) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { - if m != nil { - return m.EnterpriseMeta +func (x *IssuedCert) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if x != nil { + return x.EnterpriseMeta } return nil } -func (m *IssuedCert) GetRaftIndex() *pbcommon.RaftIndex { - if m != nil { - return m.RaftIndex +func (x *IssuedCert) GetRaftIndex() *pbcommon.RaftIndex { + if x != nil { + return x.RaftIndex } return nil } -func init() { - proto.RegisterType((*CARoots)(nil), "connect.CARoots") - proto.RegisterType((*CARoot)(nil), "connect.CARoot") - proto.RegisterType((*IssuedCert)(nil), "connect.IssuedCert") +var File_proto_pbconnect_connect_proto protoreflect.FileDescriptor + +var file_proto_pbconnect_connect_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x07, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, 0x01, 0x0a, 0x07, 0x43, 0x41, 0x52, 0x6f, 0x6f, + 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x6f, 0x6f, 0x74, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x52, 0x6f, 0x6f, 0x74, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x05, 0x52, 0x6f, 0x6f, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x2e, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x05, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, + 0x2f, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x22, 0xfd, 0x04, 0x0a, 0x06, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x22, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, + 0x79, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x72, + 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x4e, 0x6f, 0x74, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, + 0x43, 0x65, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x43, 0x65, 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x69, 0x67, + 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, + 0x3e, 0x0a, 0x0c, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x41, 0x74, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0c, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x41, 0x74, 0x12, + 0x26, 0x0a, 0x0e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x69, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x69, 0x74, 0x73, 0x12, + 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x22, 0xc7, 0x03, 0x0a, 0x0a, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x12, + 0x22, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x65, 0x72, 0x74, 0x50, 0x45, 0x4d, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, 0x65, 0x72, 0x74, 0x50, 0x45, 0x4d, 0x12, 0x24, 0x0a, + 0x0d, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x45, 0x4d, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x50, 0x45, 0x4d, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1e, 0x0a, + 0x0a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x52, 0x49, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x52, 0x49, 0x12, 0x14, 0x0a, + 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x55, 0x52, 0x49, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x55, 0x52, 0x49, 0x12, + 0x3a, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, + 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func init() { - proto.RegisterFile("proto/pbconnect/connect.proto", fileDescriptor_80627e709958eb04) +var ( + file_proto_pbconnect_connect_proto_rawDescOnce sync.Once + file_proto_pbconnect_connect_proto_rawDescData = file_proto_pbconnect_connect_proto_rawDesc +) + +func file_proto_pbconnect_connect_proto_rawDescGZIP() []byte { + file_proto_pbconnect_connect_proto_rawDescOnce.Do(func() { + file_proto_pbconnect_connect_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbconnect_connect_proto_rawDescData) + }) + return file_proto_pbconnect_connect_proto_rawDescData } -var fileDescriptor_80627e709958eb04 = []byte{ - // 632 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x5d, 0x6f, 0xd3, 0x30, - 0x14, 0x55, 0xd7, 0xcf, 0xdc, 0x6e, 0x1d, 0x33, 0x68, 0xb2, 0x8a, 0x80, 0xa8, 0x02, 0x14, 0x09, - 0x68, 0xd0, 0x90, 0x10, 0x42, 0x68, 0x52, 0xb7, 0xee, 0x21, 0x9a, 0x56, 0x86, 0x37, 0x78, 0xe0, - 0x2d, 0x6d, 0x6f, 0x3b, 0x4b, 0x4d, 0x5c, 0x39, 0xce, 0xb4, 0xfe, 0x22, 0x7e, 0x0a, 0xbf, 0x0a, - 0x09, 0xd9, 0x4e, 0xda, 0xa4, 0x20, 0xf5, 0x29, 0xbe, 0xe7, 0x1e, 0x5f, 0xdf, 0xeb, 0x73, 0x62, - 0x78, 0xb6, 0x94, 0x42, 0x09, 0x7f, 0x39, 0x9e, 0x88, 0x38, 0xc6, 0x89, 0xf2, 0xb3, 0x6f, 0xdf, - 0xe0, 0xa4, 0x99, 0x85, 0xdd, 0x17, 0x73, 0x21, 0xe6, 0x0b, 0xf4, 0x0d, 0x3c, 0x4e, 0x67, 0xbe, - 0xe2, 0x11, 0x26, 0x2a, 0x8c, 0x96, 0x96, 0xd9, 0x7d, 0xba, 0x29, 0x14, 0x45, 0x22, 0xf6, 0xed, - 0xc7, 0x26, 0x7b, 0xbf, 0x2a, 0xd0, 0x3c, 0x1f, 0x30, 0x21, 0x54, 0x42, 0x7a, 0xb0, 0x3f, 0x98, - 0x28, 0x7e, 0x8f, 0x3a, 0x0c, 0x86, 0xb4, 0xe2, 0x56, 0x3c, 0x87, 0x95, 0x30, 0xe2, 0x42, 0xfb, - 0x56, 0xa6, 0x89, 0x1a, 0x8a, 0x28, 0xe4, 0x31, 0xdd, 0x33, 0x94, 0x22, 0x44, 0x5e, 0x41, 0xdd, - 0x94, 0xa3, 0x55, 0xb7, 0xea, 0xb5, 0x4f, 0x0e, 0xfb, 0x79, 0xdf, 0xf6, 0x18, 0x66, 0xb3, 0xc4, - 0x07, 0xe7, 0x5b, 0x8a, 0x72, 0x75, 0x85, 0x2a, 0xa4, 0x35, 0xb7, 0xe2, 0xb5, 0x4f, 0x8e, 0xfa, - 0x59, 0x6b, 0xeb, 0x04, 0xdb, 0x70, 0x7a, 0x7f, 0x6a, 0xd0, 0xb0, 0x25, 0x48, 0x07, 0xf6, 0xd6, - 0xed, 0xed, 0x05, 0x43, 0x42, 0xa0, 0x36, 0x0a, 0x23, 0xcc, 0xba, 0x31, 0x6b, 0x3d, 0xcc, 0x0d, - 0x4a, 0x1e, 0x2e, 0x46, 0x69, 0x34, 0x46, 0x49, 0xab, 0x6e, 0xc5, 0xab, 0xb1, 0x12, 0x66, 0x38, - 0x7c, 0x1e, 0xf3, 0x78, 0x7e, 0x89, 0xab, 0x60, 0x68, 0xda, 0x70, 0x58, 0x09, 0x23, 0xef, 0xe1, - 0xf1, 0xc5, 0x83, 0x42, 0x19, 0x87, 0x8b, 0xe2, 0xe0, 0x75, 0x43, 0xfd, 0x5f, 0x8a, 0x7c, 0x02, - 0x67, 0x24, 0xd4, 0x19, 0xce, 0x84, 0x44, 0xda, 0x30, 0x93, 0x75, 0xfb, 0x56, 0xa4, 0x7e, 0x2e, - 0x52, 0xff, 0x36, 0x17, 0x89, 0x6d, 0xc8, 0xe4, 0x23, 0xb4, 0x46, 0x42, 0x0d, 0x66, 0x0a, 0x25, - 0x6d, 0xee, 0xdc, 0xb8, 0xe6, 0x92, 0x2e, 0xb4, 0xf4, 0xbd, 0x9c, 0xa3, 0x54, 0xb4, 0x65, 0x1a, - 0x5b, 0xc7, 0xe4, 0x2d, 0x1c, 0x05, 0xb1, 0x42, 0x19, 0xe1, 0x94, 0x87, 0x0a, 0x35, 0x96, 0x50, - 0xc7, 0xad, 0x7a, 0x0e, 0xfb, 0x37, 0xa1, 0xe5, 0xcd, 0xa6, 0x37, 0xc5, 0xc0, 0xca, 0x5b, 0x80, - 0xc8, 0x73, 0x80, 0xcd, 0xfd, 0xd0, 0xb6, 0x21, 0x14, 0x10, 0x72, 0x0c, 0x0d, 0x6b, 0x18, 0xba, - 0xef, 0x56, 0xbc, 0x16, 0xcb, 0x22, 0x72, 0x0a, 0xfb, 0x4c, 0xa8, 0x50, 0xe1, 0xf4, 0x6b, 0xaa, - 0x06, 0x8a, 0x1e, 0xec, 0x9c, 0xaf, 0xc4, 0x27, 0xaf, 0xa1, 0x73, 0x2d, 0xf9, 0x7d, 0xa8, 0xf0, - 0x12, 0x57, 0xb7, 0xab, 0x25, 0xd2, 0x8e, 0x39, 0x7b, 0x0b, 0x2d, 0xf3, 0xce, 0xb8, 0x4a, 0xe8, - 0xa1, 0x5b, 0xf1, 0xea, 0x6c, 0x0b, 0xd5, 0xfe, 0x63, 0xe1, 0x4c, 0x05, 0xf1, 0x14, 0x1f, 0xe8, - 0xa3, 0xb2, 0xff, 0xd6, 0x09, 0xb6, 0xe1, 0xf4, 0x7e, 0x57, 0x01, 0x82, 0x24, 0x49, 0x71, 0x6a, - 0xee, 0x61, 0xdb, 0x5f, 0xd9, 0xcf, 0x52, 0xf2, 0x17, 0x85, 0xa6, 0xe6, 0x5e, 0x5f, 0x5c, 0x65, - 0xd6, 0xcc, 0x43, 0xf2, 0x12, 0x0e, 0x36, 0xfd, 0xe8, 0x7c, 0xd5, 0xe4, 0xcb, 0xa0, 0xde, 0x7f, - 0x83, 0xf2, 0x9e, 0x4f, 0x30, 0xb3, 0x66, 0x1e, 0x1a, 0x15, 0xec, 0xf2, 0x3b, 0x0b, 0x32, 0x33, - 0x16, 0x10, 0xf2, 0x04, 0xea, 0x83, 0x39, 0xc6, 0xca, 0xf8, 0xcf, 0x61, 0x36, 0xd0, 0x3e, 0x31, - 0x0b, 0xbd, 0xa7, 0x69, 0x7d, 0x92, 0xc7, 0xe4, 0x33, 0xc0, 0x8f, 0x70, 0xc1, 0xa7, 0xd6, 0x7d, - 0xad, 0x9d, 0xea, 0x14, 0xd8, 0xe4, 0x0b, 0xb4, 0x4d, 0x94, 0x79, 0xde, 0xd9, 0xb9, 0xb9, 0x48, - 0x27, 0xa7, 0xd0, 0xb9, 0xd0, 0x46, 0x5c, 0x4a, 0x9e, 0xa0, 0x79, 0x0e, 0xc0, 0x14, 0x38, 0xce, - 0xe5, 0x28, 0x67, 0xd9, 0x16, 0xbb, 0xac, 0x64, 0x7b, 0xb7, 0x92, 0x67, 0xef, 0x7e, 0xbe, 0x99, - 0x73, 0x75, 0x97, 0x8e, 0x35, 0xcb, 0xbf, 0x0b, 0x93, 0x3b, 0x3e, 0x11, 0x72, 0xa9, 0x1f, 0xd8, - 0x24, 0x5d, 0xf8, 0x5b, 0xef, 0xee, 0xb8, 0x61, 0x80, 0x0f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, - 0x77, 0x18, 0x20, 0xcd, 0x91, 0x05, 0x00, 0x00, +var file_proto_pbconnect_connect_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_proto_pbconnect_connect_proto_goTypes = []interface{}{ + (*CARoots)(nil), // 0: connect.CARoots + (*CARoot)(nil), // 1: connect.CARoot + (*IssuedCert)(nil), // 2: connect.IssuedCert + (*pbcommon.QueryMeta)(nil), // 3: common.QueryMeta + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*pbcommon.RaftIndex)(nil), // 5: common.RaftIndex + (*pbcommon.EnterpriseMeta)(nil), // 6: common.EnterpriseMeta +} +var file_proto_pbconnect_connect_proto_depIdxs = []int32{ + 1, // 0: connect.CARoots.Roots:type_name -> connect.CARoot + 3, // 1: connect.CARoots.QueryMeta:type_name -> common.QueryMeta + 4, // 2: connect.CARoot.NotBefore:type_name -> google.protobuf.Timestamp + 4, // 3: connect.CARoot.NotAfter:type_name -> google.protobuf.Timestamp + 4, // 4: connect.CARoot.RotatedOutAt:type_name -> google.protobuf.Timestamp + 5, // 5: connect.CARoot.RaftIndex:type_name -> common.RaftIndex + 4, // 6: connect.IssuedCert.ValidAfter:type_name -> google.protobuf.Timestamp + 4, // 7: connect.IssuedCert.ValidBefore:type_name -> google.protobuf.Timestamp + 6, // 8: connect.IssuedCert.EnterpriseMeta:type_name -> common.EnterpriseMeta + 5, // 9: connect.IssuedCert.RaftIndex:type_name -> common.RaftIndex + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_proto_pbconnect_connect_proto_init() } +func file_proto_pbconnect_connect_proto_init() { + if File_proto_pbconnect_connect_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbconnect_connect_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CARoots); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconnect_connect_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CARoot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbconnect_connect_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssuedCert); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbconnect_connect_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbconnect_connect_proto_goTypes, + DependencyIndexes: file_proto_pbconnect_connect_proto_depIdxs, + MessageInfos: file_proto_pbconnect_connect_proto_msgTypes, + }.Build() + File_proto_pbconnect_connect_proto = out.File + file_proto_pbconnect_connect_proto_rawDesc = nil + file_proto_pbconnect_connect_proto_goTypes = nil + file_proto_pbconnect_connect_proto_depIdxs = nil } diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index 3f5fe637b..a0dbe715c 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -1,26 +1,31 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbservice/healthcheck.proto package pbservice import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" pbcommon "github.com/hashicorp/consul/proto/pbcommon" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // HealthCheck represents a single check on a given node // @@ -30,203 +35,219 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // output=healthcheck.gen.go // name=Structs type HealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Node string `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` // mog: func-to=CheckIDType func-from=string CheckID string `protobuf:"bytes,2,opt,name=CheckID,proto3" json:"CheckID,omitempty"` Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` - Status string `protobuf:"bytes,4,opt,name=Status,proto3" json:"Status,omitempty"` - Notes string `protobuf:"bytes,5,opt,name=Notes,proto3" json:"Notes,omitempty"` - Output string `protobuf:"bytes,6,opt,name=Output,proto3" json:"Output,omitempty"` - ServiceID string `protobuf:"bytes,7,opt,name=ServiceID,proto3" json:"ServiceID,omitempty"` - ServiceName string `protobuf:"bytes,8,opt,name=ServiceName,proto3" json:"ServiceName,omitempty"` - ServiceTags []string `protobuf:"bytes,9,rep,name=ServiceTags,proto3" json:"ServiceTags,omitempty"` - Type string `protobuf:"bytes,12,opt,name=Type,proto3" json:"Type,omitempty"` + Status string `protobuf:"bytes,4,opt,name=Status,proto3" json:"Status,omitempty"` // The current check status + Notes string `protobuf:"bytes,5,opt,name=Notes,proto3" json:"Notes,omitempty"` // Additional notes with the status + Output string `protobuf:"bytes,6,opt,name=Output,proto3" json:"Output,omitempty"` // Holds output of script runs + ServiceID string `protobuf:"bytes,7,opt,name=ServiceID,proto3" json:"ServiceID,omitempty"` // optional associated service + ServiceName string `protobuf:"bytes,8,opt,name=ServiceName,proto3" json:"ServiceName,omitempty"` // optional service name + ServiceTags []string `protobuf:"bytes,9,rep,name=ServiceTags,proto3" json:"ServiceTags,omitempty"` // optional service tags + Type string `protobuf:"bytes,12,opt,name=Type,proto3" json:"Type,omitempty"` // Check type: http/ttl/tcp/etc Definition *HealthCheckDefinition `protobuf:"bytes,10,opt,name=Definition,proto3" json:"Definition,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=int func-from=int32 - ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` - Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` - Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` + Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` + Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` } -func (m *HealthCheck) Reset() { *m = HealthCheck{} } -func (m *HealthCheck) String() string { return proto.CompactTextString(m) } -func (*HealthCheck) ProtoMessage() {} +func (x *HealthCheck) Reset() { + *x = HealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck) ProtoMessage() {} + +func (x *HealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead. func (*HealthCheck) Descriptor() ([]byte, []int) { - return fileDescriptor_8a6f7448747c9fbe, []int{0} + return file_proto_pbservice_healthcheck_proto_rawDescGZIP(), []int{0} } -func (m *HealthCheck) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HealthCheck.Unmarshal(m, b) -} -func (m *HealthCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HealthCheck.Marshal(b, m, deterministic) -} -func (m *HealthCheck) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheck.Merge(m, src) -} -func (m *HealthCheck) XXX_Size() int { - return xxx_messageInfo_HealthCheck.Size(m) -} -func (m *HealthCheck) XXX_DiscardUnknown() { - xxx_messageInfo_HealthCheck.DiscardUnknown(m) -} - -var xxx_messageInfo_HealthCheck proto.InternalMessageInfo - -func (m *HealthCheck) GetNode() string { - if m != nil { - return m.Node +func (x *HealthCheck) GetNode() string { + if x != nil { + return x.Node } return "" } -func (m *HealthCheck) GetCheckID() string { - if m != nil { - return m.CheckID +func (x *HealthCheck) GetCheckID() string { + if x != nil { + return x.CheckID } return "" } -func (m *HealthCheck) GetName() string { - if m != nil { - return m.Name +func (x *HealthCheck) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *HealthCheck) GetStatus() string { - if m != nil { - return m.Status +func (x *HealthCheck) GetStatus() string { + if x != nil { + return x.Status } return "" } -func (m *HealthCheck) GetNotes() string { - if m != nil { - return m.Notes +func (x *HealthCheck) GetNotes() string { + if x != nil { + return x.Notes } return "" } -func (m *HealthCheck) GetOutput() string { - if m != nil { - return m.Output +func (x *HealthCheck) GetOutput() string { + if x != nil { + return x.Output } return "" } -func (m *HealthCheck) GetServiceID() string { - if m != nil { - return m.ServiceID +func (x *HealthCheck) GetServiceID() string { + if x != nil { + return x.ServiceID } return "" } -func (m *HealthCheck) GetServiceName() string { - if m != nil { - return m.ServiceName +func (x *HealthCheck) GetServiceName() string { + if x != nil { + return x.ServiceName } return "" } -func (m *HealthCheck) GetServiceTags() []string { - if m != nil { - return m.ServiceTags +func (x *HealthCheck) GetServiceTags() []string { + if x != nil { + return x.ServiceTags } return nil } -func (m *HealthCheck) GetType() string { - if m != nil { - return m.Type +func (x *HealthCheck) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *HealthCheck) GetDefinition() *HealthCheckDefinition { - if m != nil { - return m.Definition +func (x *HealthCheck) GetDefinition() *HealthCheckDefinition { + if x != nil { + return x.Definition } return nil } -func (m *HealthCheck) GetRaftIndex() *pbcommon.RaftIndex { - if m != nil { - return m.RaftIndex +func (x *HealthCheck) GetRaftIndex() *pbcommon.RaftIndex { + if x != nil { + return x.RaftIndex } return nil } -func (m *HealthCheck) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { - if m != nil { - return m.EnterpriseMeta +func (x *HealthCheck) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if x != nil { + return x.EnterpriseMeta } return nil } -func (m *HealthCheck) GetExposedPort() int32 { - if m != nil { - return m.ExposedPort +func (x *HealthCheck) GetExposedPort() int32 { + if x != nil { + return x.ExposedPort } return 0 } -func (m *HealthCheck) GetInterval() string { - if m != nil { - return m.Interval +func (x *HealthCheck) GetInterval() string { + if x != nil { + return x.Interval } return "" } -func (m *HealthCheck) GetTimeout() string { - if m != nil { - return m.Timeout +func (x *HealthCheck) GetTimeout() string { + if x != nil { + return x.Timeout } return "" } type HeaderValue struct { - Value []string `protobuf:"bytes,1,rep,name=Value,proto3" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=Value,proto3" json:"Value,omitempty"` } -func (m *HeaderValue) Reset() { *m = HeaderValue{} } -func (m *HeaderValue) String() string { return proto.CompactTextString(m) } -func (*HeaderValue) ProtoMessage() {} +func (x *HeaderValue) Reset() { + *x = HeaderValue{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValue) ProtoMessage() {} + +func (x *HeaderValue) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. func (*HeaderValue) Descriptor() ([]byte, []int) { - return fileDescriptor_8a6f7448747c9fbe, []int{1} + return file_proto_pbservice_healthcheck_proto_rawDescGZIP(), []int{1} } -func (m *HeaderValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HeaderValue.Unmarshal(m, b) -} -func (m *HeaderValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HeaderValue.Marshal(b, m, deterministic) -} -func (m *HeaderValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_HeaderValue.Merge(m, src) -} -func (m *HeaderValue) XXX_Size() int { - return xxx_messageInfo_HeaderValue.Size(m) -} -func (m *HeaderValue) XXX_DiscardUnknown() { - xxx_messageInfo_HeaderValue.DiscardUnknown(m) -} - -var xxx_messageInfo_HeaderValue proto.InternalMessageInfo - -func (m *HeaderValue) GetValue() []string { - if m != nil { - return m.Value +func (x *HeaderValue) GetValue() []string { + if x != nil { + return x.Value } return nil } @@ -239,6 +260,10 @@ func (m *HeaderValue) GetValue() []string { // output=healthcheck.gen.go // name=Structs type HealthCheckDefinition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + HTTP string `protobuf:"bytes,1,opt,name=HTTP,proto3" json:"HTTP,omitempty"` TLSServerName string `protobuf:"bytes,19,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` @@ -248,197 +273,201 @@ type HealthCheckDefinition struct { Body string `protobuf:"bytes,18,opt,name=Body,proto3" json:"Body,omitempty"` TCP string `protobuf:"bytes,5,opt,name=TCP,proto3" json:"TCP,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - Interval *duration.Duration `protobuf:"bytes,6,opt,name=Interval,proto3" json:"Interval,omitempty"` + Interval *durationpb.Duration `protobuf:"bytes,6,opt,name=Interval,proto3" json:"Interval,omitempty"` // mog: func-to=uint func-from=uint32 OutputMaxSize uint32 `protobuf:"varint,9,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - Timeout *duration.Duration `protobuf:"bytes,7,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,7,opt,name=Timeout,proto3" json:"Timeout,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - DeregisterCriticalServiceAfter *duration.Duration `protobuf:"bytes,8,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` - ScriptArgs []string `protobuf:"bytes,10,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` - DockerContainerID string `protobuf:"bytes,11,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` - Shell string `protobuf:"bytes,12,opt,name=Shell,proto3" json:"Shell,omitempty"` - H2PING string `protobuf:"bytes,20,opt,name=H2PING,proto3" json:"H2PING,omitempty"` - H2PingUseTLS bool `protobuf:"varint,21,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` - GRPC string `protobuf:"bytes,13,opt,name=GRPC,proto3" json:"GRPC,omitempty"` - GRPCUseTLS bool `protobuf:"varint,14,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` - AliasNode string `protobuf:"bytes,15,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` - AliasService string `protobuf:"bytes,16,opt,name=AliasService,proto3" json:"AliasService,omitempty"` + DeregisterCriticalServiceAfter *durationpb.Duration `protobuf:"bytes,8,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` + ScriptArgs []string `protobuf:"bytes,10,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` + DockerContainerID string `protobuf:"bytes,11,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` + Shell string `protobuf:"bytes,12,opt,name=Shell,proto3" json:"Shell,omitempty"` + H2PING string `protobuf:"bytes,20,opt,name=H2PING,proto3" json:"H2PING,omitempty"` + H2PingUseTLS bool `protobuf:"varint,21,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` + GRPC string `protobuf:"bytes,13,opt,name=GRPC,proto3" json:"GRPC,omitempty"` + GRPCUseTLS bool `protobuf:"varint,14,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` + AliasNode string `protobuf:"bytes,15,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` + AliasService string `protobuf:"bytes,16,opt,name=AliasService,proto3" json:"AliasService,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - TTL *duration.Duration `protobuf:"bytes,17,opt,name=TTL,proto3" json:"TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + TTL *durationpb.Duration `protobuf:"bytes,17,opt,name=TTL,proto3" json:"TTL,omitempty"` } -func (m *HealthCheckDefinition) Reset() { *m = HealthCheckDefinition{} } -func (m *HealthCheckDefinition) String() string { return proto.CompactTextString(m) } -func (*HealthCheckDefinition) ProtoMessage() {} +func (x *HealthCheckDefinition) Reset() { + *x = HealthCheckDefinition{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckDefinition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckDefinition) ProtoMessage() {} + +func (x *HealthCheckDefinition) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckDefinition.ProtoReflect.Descriptor instead. func (*HealthCheckDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_8a6f7448747c9fbe, []int{2} + return file_proto_pbservice_healthcheck_proto_rawDescGZIP(), []int{2} } -func (m *HealthCheckDefinition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HealthCheckDefinition.Unmarshal(m, b) -} -func (m *HealthCheckDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HealthCheckDefinition.Marshal(b, m, deterministic) -} -func (m *HealthCheckDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheckDefinition.Merge(m, src) -} -func (m *HealthCheckDefinition) XXX_Size() int { - return xxx_messageInfo_HealthCheckDefinition.Size(m) -} -func (m *HealthCheckDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_HealthCheckDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_HealthCheckDefinition proto.InternalMessageInfo - -func (m *HealthCheckDefinition) GetHTTP() string { - if m != nil { - return m.HTTP +func (x *HealthCheckDefinition) GetHTTP() string { + if x != nil { + return x.HTTP } return "" } -func (m *HealthCheckDefinition) GetTLSServerName() string { - if m != nil { - return m.TLSServerName +func (x *HealthCheckDefinition) GetTLSServerName() string { + if x != nil { + return x.TLSServerName } return "" } -func (m *HealthCheckDefinition) GetTLSSkipVerify() bool { - if m != nil { - return m.TLSSkipVerify +func (x *HealthCheckDefinition) GetTLSSkipVerify() bool { + if x != nil { + return x.TLSSkipVerify } return false } -func (m *HealthCheckDefinition) GetHeader() map[string]*HeaderValue { - if m != nil { - return m.Header +func (x *HealthCheckDefinition) GetHeader() map[string]*HeaderValue { + if x != nil { + return x.Header } return nil } -func (m *HealthCheckDefinition) GetMethod() string { - if m != nil { - return m.Method +func (x *HealthCheckDefinition) GetMethod() string { + if x != nil { + return x.Method } return "" } -func (m *HealthCheckDefinition) GetBody() string { - if m != nil { - return m.Body +func (x *HealthCheckDefinition) GetBody() string { + if x != nil { + return x.Body } return "" } -func (m *HealthCheckDefinition) GetTCP() string { - if m != nil { - return m.TCP +func (x *HealthCheckDefinition) GetTCP() string { + if x != nil { + return x.TCP } return "" } -func (m *HealthCheckDefinition) GetInterval() *duration.Duration { - if m != nil { - return m.Interval +func (x *HealthCheckDefinition) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval } return nil } -func (m *HealthCheckDefinition) GetOutputMaxSize() uint32 { - if m != nil { - return m.OutputMaxSize +func (x *HealthCheckDefinition) GetOutputMaxSize() uint32 { + if x != nil { + return x.OutputMaxSize } return 0 } -func (m *HealthCheckDefinition) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout +func (x *HealthCheckDefinition) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout } return nil } -func (m *HealthCheckDefinition) GetDeregisterCriticalServiceAfter() *duration.Duration { - if m != nil { - return m.DeregisterCriticalServiceAfter +func (x *HealthCheckDefinition) GetDeregisterCriticalServiceAfter() *durationpb.Duration { + if x != nil { + return x.DeregisterCriticalServiceAfter } return nil } -func (m *HealthCheckDefinition) GetScriptArgs() []string { - if m != nil { - return m.ScriptArgs +func (x *HealthCheckDefinition) GetScriptArgs() []string { + if x != nil { + return x.ScriptArgs } return nil } -func (m *HealthCheckDefinition) GetDockerContainerID() string { - if m != nil { - return m.DockerContainerID +func (x *HealthCheckDefinition) GetDockerContainerID() string { + if x != nil { + return x.DockerContainerID } return "" } -func (m *HealthCheckDefinition) GetShell() string { - if m != nil { - return m.Shell +func (x *HealthCheckDefinition) GetShell() string { + if x != nil { + return x.Shell } return "" } -func (m *HealthCheckDefinition) GetH2PING() string { - if m != nil { - return m.H2PING +func (x *HealthCheckDefinition) GetH2PING() string { + if x != nil { + return x.H2PING } return "" } -func (m *HealthCheckDefinition) GetH2PingUseTLS() bool { - if m != nil { - return m.H2PingUseTLS +func (x *HealthCheckDefinition) GetH2PingUseTLS() bool { + if x != nil { + return x.H2PingUseTLS } return false } -func (m *HealthCheckDefinition) GetGRPC() string { - if m != nil { - return m.GRPC +func (x *HealthCheckDefinition) GetGRPC() string { + if x != nil { + return x.GRPC } return "" } -func (m *HealthCheckDefinition) GetGRPCUseTLS() bool { - if m != nil { - return m.GRPCUseTLS +func (x *HealthCheckDefinition) GetGRPCUseTLS() bool { + if x != nil { + return x.GRPCUseTLS } return false } -func (m *HealthCheckDefinition) GetAliasNode() string { - if m != nil { - return m.AliasNode +func (x *HealthCheckDefinition) GetAliasNode() string { + if x != nil { + return x.AliasNode } return "" } -func (m *HealthCheckDefinition) GetAliasService() string { - if m != nil { - return m.AliasService +func (x *HealthCheckDefinition) GetAliasService() string { + if x != nil { + return x.AliasService } return "" } -func (m *HealthCheckDefinition) GetTTL() *duration.Duration { - if m != nil { - return m.TTL +func (x *HealthCheckDefinition) GetTTL() *durationpb.Duration { + if x != nil { + return x.TTL } return nil } @@ -456,6 +485,10 @@ func (m *HealthCheckDefinition) GetTTL() *duration.Duration { // output=healthcheck.gen.go // name=Structs type CheckType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=CheckIDType func-from=string CheckID string `protobuf:"bytes,1,opt,name=CheckID,proto3" json:"CheckID,omitempty"` Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` @@ -469,21 +502,21 @@ type CheckType struct { Body string `protobuf:"bytes,26,opt,name=Body,proto3" json:"Body,omitempty"` TCP string `protobuf:"bytes,8,opt,name=TCP,proto3" json:"TCP,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - Interval *duration.Duration `protobuf:"bytes,9,opt,name=Interval,proto3" json:"Interval,omitempty"` - AliasNode string `protobuf:"bytes,10,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` - AliasService string `protobuf:"bytes,11,opt,name=AliasService,proto3" json:"AliasService,omitempty"` - DockerContainerID string `protobuf:"bytes,12,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` - Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"` - H2PING string `protobuf:"bytes,28,opt,name=H2PING,proto3" json:"H2PING,omitempty"` - H2PingUseTLS bool `protobuf:"varint,30,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` - GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"` - GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` - TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` - TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` + Interval *durationpb.Duration `protobuf:"bytes,9,opt,name=Interval,proto3" json:"Interval,omitempty"` + AliasNode string `protobuf:"bytes,10,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` + AliasService string `protobuf:"bytes,11,opt,name=AliasService,proto3" json:"AliasService,omitempty"` + DockerContainerID string `protobuf:"bytes,12,opt,name=DockerContainerID,proto3" json:"DockerContainerID,omitempty"` + Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"` + H2PING string `protobuf:"bytes,28,opt,name=H2PING,proto3" json:"H2PING,omitempty"` + H2PingUseTLS bool `protobuf:"varint,30,opt,name=H2PingUseTLS,proto3" json:"H2PingUseTLS,omitempty"` + GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"` + GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` + TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` + TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - Timeout *duration.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3" json:"Timeout,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - TTL *duration.Duration `protobuf:"bytes,18,opt,name=TTL,proto3" json:"TTL,omitempty"` + TTL *durationpb.Duration `protobuf:"bytes,18,opt,name=TTL,proto3" json:"TTL,omitempty"` // mog: func-to=int func-from=int32 SuccessBeforePassing int32 `protobuf:"varint,21,opt,name=SuccessBeforePassing,proto3" json:"SuccessBeforePassing,omitempty"` // mog: func-to=int func-from=int32 @@ -497,325 +530,554 @@ type CheckType struct { // service, if any, to be deregistered if this check is critical for // longer than this duration. // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto - DeregisterCriticalServiceAfter *duration.Duration `protobuf:"bytes,19,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` + DeregisterCriticalServiceAfter *durationpb.Duration `protobuf:"bytes,19,opt,name=DeregisterCriticalServiceAfter,proto3" json:"DeregisterCriticalServiceAfter,omitempty"` // mog: func-to=int func-from=int32 - OutputMaxSize int32 `protobuf:"varint,25,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + OutputMaxSize int32 `protobuf:"varint,25,opt,name=OutputMaxSize,proto3" json:"OutputMaxSize,omitempty"` } -func (m *CheckType) Reset() { *m = CheckType{} } -func (m *CheckType) String() string { return proto.CompactTextString(m) } -func (*CheckType) ProtoMessage() {} +func (x *CheckType) Reset() { + *x = CheckType{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckType) ProtoMessage() {} + +func (x *CheckType) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_healthcheck_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckType.ProtoReflect.Descriptor instead. func (*CheckType) Descriptor() ([]byte, []int) { - return fileDescriptor_8a6f7448747c9fbe, []int{3} + return file_proto_pbservice_healthcheck_proto_rawDescGZIP(), []int{3} } -func (m *CheckType) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CheckType.Unmarshal(m, b) -} -func (m *CheckType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CheckType.Marshal(b, m, deterministic) -} -func (m *CheckType) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckType.Merge(m, src) -} -func (m *CheckType) XXX_Size() int { - return xxx_messageInfo_CheckType.Size(m) -} -func (m *CheckType) XXX_DiscardUnknown() { - xxx_messageInfo_CheckType.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckType proto.InternalMessageInfo - -func (m *CheckType) GetCheckID() string { - if m != nil { - return m.CheckID +func (x *CheckType) GetCheckID() string { + if x != nil { + return x.CheckID } return "" } -func (m *CheckType) GetName() string { - if m != nil { - return m.Name +func (x *CheckType) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *CheckType) GetStatus() string { - if m != nil { - return m.Status +func (x *CheckType) GetStatus() string { + if x != nil { + return x.Status } return "" } -func (m *CheckType) GetNotes() string { - if m != nil { - return m.Notes +func (x *CheckType) GetNotes() string { + if x != nil { + return x.Notes } return "" } -func (m *CheckType) GetScriptArgs() []string { - if m != nil { - return m.ScriptArgs +func (x *CheckType) GetScriptArgs() []string { + if x != nil { + return x.ScriptArgs } return nil } -func (m *CheckType) GetHTTP() string { - if m != nil { - return m.HTTP +func (x *CheckType) GetHTTP() string { + if x != nil { + return x.HTTP } return "" } -func (m *CheckType) GetHeader() map[string]*HeaderValue { - if m != nil { - return m.Header +func (x *CheckType) GetHeader() map[string]*HeaderValue { + if x != nil { + return x.Header } return nil } -func (m *CheckType) GetMethod() string { - if m != nil { - return m.Method +func (x *CheckType) GetMethod() string { + if x != nil { + return x.Method } return "" } -func (m *CheckType) GetBody() string { - if m != nil { - return m.Body +func (x *CheckType) GetBody() string { + if x != nil { + return x.Body } return "" } -func (m *CheckType) GetTCP() string { - if m != nil { - return m.TCP +func (x *CheckType) GetTCP() string { + if x != nil { + return x.TCP } return "" } -func (m *CheckType) GetInterval() *duration.Duration { - if m != nil { - return m.Interval +func (x *CheckType) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval } return nil } -func (m *CheckType) GetAliasNode() string { - if m != nil { - return m.AliasNode +func (x *CheckType) GetAliasNode() string { + if x != nil { + return x.AliasNode } return "" } -func (m *CheckType) GetAliasService() string { - if m != nil { - return m.AliasService +func (x *CheckType) GetAliasService() string { + if x != nil { + return x.AliasService } return "" } -func (m *CheckType) GetDockerContainerID() string { - if m != nil { - return m.DockerContainerID +func (x *CheckType) GetDockerContainerID() string { + if x != nil { + return x.DockerContainerID } return "" } -func (m *CheckType) GetShell() string { - if m != nil { - return m.Shell +func (x *CheckType) GetShell() string { + if x != nil { + return x.Shell } return "" } -func (m *CheckType) GetH2PING() string { - if m != nil { - return m.H2PING +func (x *CheckType) GetH2PING() string { + if x != nil { + return x.H2PING } return "" } -func (m *CheckType) GetH2PingUseTLS() bool { - if m != nil { - return m.H2PingUseTLS +func (x *CheckType) GetH2PingUseTLS() bool { + if x != nil { + return x.H2PingUseTLS } return false } -func (m *CheckType) GetGRPC() string { - if m != nil { - return m.GRPC +func (x *CheckType) GetGRPC() string { + if x != nil { + return x.GRPC } return "" } -func (m *CheckType) GetGRPCUseTLS() bool { - if m != nil { - return m.GRPCUseTLS +func (x *CheckType) GetGRPCUseTLS() bool { + if x != nil { + return x.GRPCUseTLS } return false } -func (m *CheckType) GetTLSServerName() string { - if m != nil { - return m.TLSServerName +func (x *CheckType) GetTLSServerName() string { + if x != nil { + return x.TLSServerName } return "" } -func (m *CheckType) GetTLSSkipVerify() bool { - if m != nil { - return m.TLSSkipVerify +func (x *CheckType) GetTLSSkipVerify() bool { + if x != nil { + return x.TLSSkipVerify } return false } -func (m *CheckType) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout +func (x *CheckType) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout } return nil } -func (m *CheckType) GetTTL() *duration.Duration { - if m != nil { - return m.TTL +func (x *CheckType) GetTTL() *durationpb.Duration { + if x != nil { + return x.TTL } return nil } -func (m *CheckType) GetSuccessBeforePassing() int32 { - if m != nil { - return m.SuccessBeforePassing +func (x *CheckType) GetSuccessBeforePassing() int32 { + if x != nil { + return x.SuccessBeforePassing } return 0 } -func (m *CheckType) GetFailuresBeforeWarning() int32 { - if m != nil { - return m.FailuresBeforeWarning +func (x *CheckType) GetFailuresBeforeWarning() int32 { + if x != nil { + return x.FailuresBeforeWarning } return 0 } -func (m *CheckType) GetFailuresBeforeCritical() int32 { - if m != nil { - return m.FailuresBeforeCritical +func (x *CheckType) GetFailuresBeforeCritical() int32 { + if x != nil { + return x.FailuresBeforeCritical } return 0 } -func (m *CheckType) GetProxyHTTP() string { - if m != nil { - return m.ProxyHTTP +func (x *CheckType) GetProxyHTTP() string { + if x != nil { + return x.ProxyHTTP } return "" } -func (m *CheckType) GetProxyGRPC() string { - if m != nil { - return m.ProxyGRPC +func (x *CheckType) GetProxyGRPC() string { + if x != nil { + return x.ProxyGRPC } return "" } -func (m *CheckType) GetDeregisterCriticalServiceAfter() *duration.Duration { - if m != nil { - return m.DeregisterCriticalServiceAfter +func (x *CheckType) GetDeregisterCriticalServiceAfter() *durationpb.Duration { + if x != nil { + return x.DeregisterCriticalServiceAfter } return nil } -func (m *CheckType) GetOutputMaxSize() int32 { - if m != nil { - return m.OutputMaxSize +func (x *CheckType) GetOutputMaxSize() int32 { + if x != nil { + return x.OutputMaxSize } return 0 } -func init() { - proto.RegisterType((*HealthCheck)(nil), "pbservice.HealthCheck") - proto.RegisterType((*HeaderValue)(nil), "pbservice.HeaderValue") - proto.RegisterType((*HealthCheckDefinition)(nil), "pbservice.HealthCheckDefinition") - proto.RegisterMapType((map[string]*HeaderValue)(nil), "pbservice.HealthCheckDefinition.HeaderEntry") - proto.RegisterType((*CheckType)(nil), "pbservice.CheckType") - proto.RegisterMapType((map[string]*HeaderValue)(nil), "pbservice.CheckType.HeaderEntry") +var File_proto_pbservice_healthcheck_proto protoreflect.FileDescriptor + +var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x04, 0x0a, 0x0b, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x4e, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x44, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, + 0x67, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x54, 0x61, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0a, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x09, 0x52, + 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x3e, 0x0a, 0x0e, + 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x20, 0x0a, 0x0b, + 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0b, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x86, 0x07, 0x0a, 0x15, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x12, 0x44, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, + 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, + 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, + 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, + 0x65, 0x6c, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, + 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, + 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, + 0x47, 0x52, 0x50, 0x43, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, + 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, + 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, + 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, + 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xa8, 0x09, 0x0a, 0x09, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, + 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x48, 0x54, 0x54, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, + 0x12, 0x38, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, + 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, + 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, + 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, + 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, + 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x1e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, + 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, + 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, + 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, + 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, + 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, + 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, + 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, + 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, + 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, + 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, + 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, + 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, + 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, + 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, + 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) +var ( + file_proto_pbservice_healthcheck_proto_rawDescOnce sync.Once + file_proto_pbservice_healthcheck_proto_rawDescData = file_proto_pbservice_healthcheck_proto_rawDesc +) + +func file_proto_pbservice_healthcheck_proto_rawDescGZIP() []byte { + file_proto_pbservice_healthcheck_proto_rawDescOnce.Do(func() { + file_proto_pbservice_healthcheck_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbservice_healthcheck_proto_rawDescData) + }) + return file_proto_pbservice_healthcheck_proto_rawDescData } -var fileDescriptor_8a6f7448747c9fbe = []byte{ - // 994 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xd1, 0x6e, 0x22, 0x37, - 0x14, 0x15, 0x21, 0x40, 0xc6, 0x84, 0x6c, 0xe2, 0x4d, 0x52, 0x2f, 0xbb, 0x8d, 0x28, 0xdd, 0x07, - 0xa4, 0x4d, 0x41, 0x62, 0xdb, 0x6a, 0xd5, 0x87, 0xaa, 0x09, 0xa4, 0x1b, 0xa4, 0x24, 0xa5, 0xc3, - 0x74, 0x2b, 0xf5, 0xcd, 0x19, 0x0c, 0x58, 0x19, 0xc6, 0x23, 0x8f, 0x27, 0x0a, 0xfd, 0x80, 0x7e, - 0x42, 0xbf, 0xa1, 0x9f, 0x59, 0xf9, 0x7a, 0x06, 0x66, 0x96, 0x49, 0x88, 0x54, 0xf5, 0x09, 0xdf, - 0x7b, 0xae, 0x8d, 0x7d, 0xef, 0x39, 0x07, 0xd0, 0x57, 0x81, 0x14, 0x4a, 0x74, 0x82, 0xdb, 0x90, - 0xc9, 0x7b, 0xee, 0xb2, 0xce, 0x8c, 0x51, 0x4f, 0xcd, 0xdc, 0x19, 0x73, 0xef, 0xda, 0x80, 0x61, - 0x6b, 0x09, 0xd6, 0x4f, 0xa6, 0x42, 0x4c, 0x3d, 0xd6, 0x01, 0xe0, 0x36, 0x9a, 0x74, 0xc6, 0x91, - 0xa4, 0x8a, 0x0b, 0xdf, 0x94, 0xd6, 0x5f, 0x27, 0xa7, 0xb9, 0x62, 0x3e, 0x17, 0x7e, 0xc7, 0x7c, - 0x18, 0xb0, 0xf9, 0xf7, 0x36, 0xaa, 0x5e, 0xc2, 0xe9, 0x3d, 0x7d, 0x3a, 0xc6, 0x68, 0xfb, 0x46, - 0x8c, 0x19, 0x29, 0x34, 0x0a, 0x2d, 0xcb, 0x86, 0x35, 0x26, 0xa8, 0x02, 0xe0, 0xa0, 0x4f, 0xb6, - 0x20, 0x9d, 0x84, 0x50, 0x4d, 0xe7, 0x8c, 0x14, 0xe3, 0x6a, 0x3a, 0x67, 0xf8, 0x18, 0x95, 0x47, - 0x8a, 0xaa, 0x28, 0x24, 0xdb, 0x90, 0x8d, 0x23, 0x7c, 0x88, 0x4a, 0x37, 0x42, 0xb1, 0x90, 0x94, - 0x20, 0x6d, 0x02, 0x5d, 0xfd, 0x4b, 0xa4, 0x82, 0x48, 0x91, 0xb2, 0xa9, 0x36, 0x11, 0x7e, 0x83, - 0xac, 0x91, 0x79, 0xdf, 0xa0, 0x4f, 0x2a, 0x00, 0xad, 0x12, 0xb8, 0x81, 0xaa, 0x71, 0x00, 0x5f, - 0xbf, 0x03, 0x78, 0x3a, 0x95, 0xaa, 0x70, 0xe8, 0x34, 0x24, 0x56, 0xa3, 0x98, 0xaa, 0xd0, 0x29, - 0x7d, 0x77, 0x67, 0x11, 0x30, 0xb2, 0x6b, 0xee, 0xae, 0xd7, 0xf8, 0x27, 0x84, 0xfa, 0x6c, 0xc2, - 0x7d, 0xae, 0xdb, 0x47, 0x50, 0xa3, 0xd0, 0xaa, 0x76, 0x1b, 0xed, 0x65, 0xab, 0xdb, 0xa9, 0x4e, - 0xad, 0xea, 0xec, 0xd4, 0x1e, 0xdc, 0x41, 0x96, 0x4d, 0x27, 0x6a, 0xe0, 0x8f, 0xd9, 0x03, 0xa9, - 0xc2, 0x01, 0x07, 0xed, 0xb8, 0xe3, 0x4b, 0xc0, 0x5e, 0xd5, 0xe0, 0x1f, 0xd1, 0xde, 0x85, 0xaf, - 0x98, 0x0c, 0x24, 0x0f, 0xd9, 0x35, 0x53, 0x94, 0xd4, 0x60, 0xd7, 0x71, 0xb2, 0x2b, 0x8b, 0xda, - 0x9f, 0x55, 0xeb, 0x87, 0x5e, 0x3c, 0x04, 0x22, 0x64, 0xe3, 0xa1, 0x90, 0x8a, 0xec, 0x35, 0x0a, - 0xad, 0x92, 0x9d, 0x4e, 0xe1, 0x3a, 0xda, 0x19, 0xe8, 0x3d, 0xf7, 0xd4, 0x23, 0x2f, 0xe0, 0xb1, - 0xcb, 0x58, 0x8f, 0xd6, 0xe1, 0x73, 0x26, 0x22, 0x45, 0xf6, 0xcd, 0x68, 0xe3, 0xb0, 0xf9, 0x35, - 0xf0, 0x62, 0xcc, 0xe4, 0x27, 0xea, 0x45, 0x4c, 0x4f, 0x0f, 0x16, 0xa4, 0x00, 0x9d, 0x34, 0x41, - 0xf3, 0xaf, 0x0a, 0x3a, 0xca, 0xed, 0x89, 0xee, 0xee, 0xa5, 0xe3, 0x0c, 0x13, 0x1e, 0xe9, 0x35, - 0x7e, 0x8b, 0x6a, 0xce, 0xd5, 0x48, 0xcf, 0x80, 0x49, 0x98, 0xdb, 0x4b, 0x00, 0xb3, 0xc9, 0xa4, - 0xea, 0x8e, 0x07, 0x9f, 0x98, 0xe4, 0x93, 0x05, 0x70, 0x6e, 0xc7, 0xce, 0x26, 0x71, 0x1f, 0x95, - 0xcd, 0xf5, 0x48, 0xb1, 0x51, 0x6c, 0x55, 0xbb, 0xa7, 0x9b, 0xa6, 0xd4, 0x36, 0xe5, 0x17, 0xbe, - 0x92, 0x0b, 0x3b, 0xde, 0xab, 0xd9, 0x77, 0xcd, 0xd4, 0x4c, 0x8c, 0x13, 0xae, 0x9a, 0x48, 0xdf, - 0xfe, 0x5c, 0x8c, 0x17, 0x04, 0x9b, 0xdb, 0xeb, 0x35, 0xde, 0x47, 0x45, 0xa7, 0x37, 0x8c, 0xd9, - 0xab, 0x97, 0xf8, 0xbb, 0x54, 0x63, 0xcb, 0x30, 0xb4, 0x57, 0x6d, 0xa3, 0xc5, 0x76, 0xa2, 0xc5, - 0x76, 0x3f, 0xd6, 0x62, 0xaa, 0xe7, 0x6f, 0x51, 0xcd, 0x90, 0xfc, 0x9a, 0x3e, 0x8c, 0xf8, 0x9f, - 0x8c, 0x58, 0x8d, 0x42, 0xab, 0x66, 0x67, 0x93, 0xf8, 0xfd, 0x6a, 0x32, 0x95, 0x4d, 0x67, 0x27, - 0x95, 0x98, 0xa2, 0x93, 0x3e, 0x93, 0x6c, 0xca, 0x43, 0xc5, 0x64, 0x4f, 0x72, 0xc5, 0x5d, 0xea, - 0xc5, 0xa4, 0x3f, 0x9b, 0x28, 0x26, 0x41, 0x2a, 0x4f, 0x9e, 0xb5, 0xe1, 0x00, 0x7c, 0x82, 0xd0, - 0xc8, 0x95, 0x3c, 0x50, 0x67, 0x72, 0x1a, 0x12, 0x04, 0x6c, 0x48, 0x65, 0xf0, 0x29, 0x3a, 0xe8, - 0x0b, 0xf7, 0x8e, 0xc9, 0x9e, 0xf0, 0x15, 0xe5, 0x3e, 0x93, 0x83, 0x3e, 0x08, 0xc1, 0xb2, 0xd7, - 0x01, 0x4d, 0xab, 0xd1, 0x8c, 0x79, 0x5e, 0xac, 0x42, 0x13, 0xe8, 0xb1, 0x5c, 0x76, 0x87, 0x83, - 0x9b, 0x8f, 0xe4, 0xd0, 0x8c, 0xc5, 0x44, 0xb8, 0x89, 0x76, 0x2f, 0xbb, 0x43, 0xee, 0x4f, 0x7f, - 0x0b, 0x99, 0x73, 0x35, 0x22, 0x47, 0xc0, 0x8c, 0x4c, 0x4e, 0x8f, 0xee, 0xa3, 0x3d, 0xec, 0x81, - 0x8a, 0x2c, 0x1b, 0xd6, 0xfa, 0xce, 0xfa, 0x33, 0xde, 0xb5, 0x07, 0xbb, 0x52, 0x19, 0x6d, 0x36, - 0x67, 0x1e, 0xa7, 0x21, 0x38, 0x9f, 0x91, 0xc8, 0x2a, 0xa1, 0xbf, 0x15, 0x82, 0xb8, 0x0d, 0xb1, - 0x50, 0x32, 0x39, 0xfc, 0x0e, 0x15, 0x1d, 0xe7, 0x8a, 0x1c, 0x6c, 0xea, 0xae, 0xae, 0xaa, 0xff, - 0x9a, 0x48, 0x0b, 0xc8, 0xa8, 0x89, 0x75, 0xc7, 0x16, 0xb1, 0x52, 0xf4, 0x12, 0x9f, 0xa2, 0xd2, - 0x3d, 0x88, 0x6d, 0x2b, 0xb6, 0x82, 0x0c, 0xb7, 0x13, 0x4d, 0xda, 0xa6, 0xe8, 0x87, 0xad, 0x0f, - 0x85, 0xe6, 0x3f, 0x16, 0xb2, 0x80, 0xf0, 0x60, 0x63, 0x29, 0xc3, 0x2e, 0xe4, 0x1b, 0xf6, 0x56, - 0xae, 0x61, 0x17, 0xf3, 0x0d, 0x7b, 0x3b, 0x6d, 0xd8, 0xd9, 0xf9, 0x97, 0xd6, 0xe6, 0x9f, 0x08, - 0xbf, 0x9c, 0x12, 0xfe, 0x87, 0xa5, 0x58, 0x0f, 0x41, 0xac, 0x69, 0x4b, 0x5d, 0xde, 0x7a, 0x83, - 0x40, 0x2b, 0xb9, 0x02, 0xad, 0xaf, 0x0b, 0x74, 0x27, 0x5f, 0xa0, 0xd6, 0xf3, 0x05, 0x9a, 0xa1, - 0x03, 0xda, 0x44, 0x87, 0x6a, 0x0e, 0x1d, 0x72, 0x45, 0xb0, 0xbb, 0x51, 0x04, 0xb5, 0x7c, 0x11, - 0xbc, 0x79, 0x52, 0x04, 0x27, 0x4f, 0x88, 0x60, 0xef, 0x51, 0x11, 0xbc, 0x58, 0x13, 0xc1, 0x9a, - 0x3b, 0xbf, 0x7e, 0x96, 0x3b, 0xef, 0xe7, 0xb9, 0x73, 0xca, 0xbc, 0x0e, 0x9e, 0x6d, 0x5e, 0xb1, - 0x86, 0xf0, 0x73, 0x34, 0x84, 0xbb, 0xe8, 0x70, 0x14, 0xb9, 0x2e, 0x0b, 0xc3, 0x73, 0x36, 0x11, - 0x92, 0x0d, 0x69, 0x18, 0x72, 0x7f, 0x0a, 0x96, 0x50, 0xb2, 0x73, 0x31, 0xfc, 0x2d, 0x3a, 0xfa, - 0x99, 0x72, 0x2f, 0x92, 0x2c, 0x06, 0x7e, 0xa7, 0xd2, 0xd7, 0x9b, 0xbe, 0x84, 0x4d, 0xf9, 0x20, - 0xfe, 0x1e, 0x1d, 0x67, 0x81, 0xc4, 0x16, 0xc9, 0x31, 0x6c, 0x7b, 0x04, 0xd5, 0x2c, 0x1a, 0x4a, - 0xf1, 0xb0, 0x00, 0x35, 0x7c, 0x61, 0x58, 0xb4, 0x4c, 0x2c, 0x51, 0x18, 0x13, 0x49, 0xa1, 0x30, - 0xab, 0xcd, 0x3e, 0xfe, 0xf2, 0xbf, 0xfa, 0xf8, 0xda, 0xaf, 0xd0, 0x2b, 0x78, 0x4d, 0x36, 0xf9, - 0x3f, 0x58, 0xd5, 0xf9, 0x37, 0x7f, 0xbc, 0x9b, 0x72, 0x35, 0x8b, 0x6e, 0xf5, 0x1f, 0x9c, 0xce, - 0x8c, 0x86, 0x33, 0xee, 0x0a, 0x19, 0x74, 0x5c, 0xe1, 0x87, 0x91, 0xd7, 0xf9, 0xec, 0xaf, 0xef, - 0x6d, 0x19, 0x12, 0xef, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x33, 0x6c, 0x46, 0x9d, 0x14, 0x0b, - 0x00, 0x00, +var file_proto_pbservice_healthcheck_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_proto_pbservice_healthcheck_proto_goTypes = []interface{}{ + (*HealthCheck)(nil), // 0: pbservice.HealthCheck + (*HeaderValue)(nil), // 1: pbservice.HeaderValue + (*HealthCheckDefinition)(nil), // 2: pbservice.HealthCheckDefinition + (*CheckType)(nil), // 3: pbservice.CheckType + nil, // 4: pbservice.HealthCheckDefinition.HeaderEntry + nil, // 5: pbservice.CheckType.HeaderEntry + (*pbcommon.RaftIndex)(nil), // 6: common.RaftIndex + (*pbcommon.EnterpriseMeta)(nil), // 7: common.EnterpriseMeta + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_proto_pbservice_healthcheck_proto_depIdxs = []int32{ + 2, // 0: pbservice.HealthCheck.Definition:type_name -> pbservice.HealthCheckDefinition + 6, // 1: pbservice.HealthCheck.RaftIndex:type_name -> common.RaftIndex + 7, // 2: pbservice.HealthCheck.EnterpriseMeta:type_name -> common.EnterpriseMeta + 4, // 3: pbservice.HealthCheckDefinition.Header:type_name -> pbservice.HealthCheckDefinition.HeaderEntry + 8, // 4: pbservice.HealthCheckDefinition.Interval:type_name -> google.protobuf.Duration + 8, // 5: pbservice.HealthCheckDefinition.Timeout:type_name -> google.protobuf.Duration + 8, // 6: pbservice.HealthCheckDefinition.DeregisterCriticalServiceAfter:type_name -> google.protobuf.Duration + 8, // 7: pbservice.HealthCheckDefinition.TTL:type_name -> google.protobuf.Duration + 5, // 8: pbservice.CheckType.Header:type_name -> pbservice.CheckType.HeaderEntry + 8, // 9: pbservice.CheckType.Interval:type_name -> google.protobuf.Duration + 8, // 10: pbservice.CheckType.Timeout:type_name -> google.protobuf.Duration + 8, // 11: pbservice.CheckType.TTL:type_name -> google.protobuf.Duration + 8, // 12: pbservice.CheckType.DeregisterCriticalServiceAfter:type_name -> google.protobuf.Duration + 1, // 13: pbservice.HealthCheckDefinition.HeaderEntry.value:type_name -> pbservice.HeaderValue + 1, // 14: pbservice.CheckType.HeaderEntry.value:type_name -> pbservice.HeaderValue + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_proto_pbservice_healthcheck_proto_init() } +func file_proto_pbservice_healthcheck_proto_init() { + if File_proto_pbservice_healthcheck_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbservice_healthcheck_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_healthcheck_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_healthcheck_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckDefinition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_healthcheck_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbservice_healthcheck_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbservice_healthcheck_proto_goTypes, + DependencyIndexes: file_proto_pbservice_healthcheck_proto_depIdxs, + MessageInfos: file_proto_pbservice_healthcheck_proto_msgTypes, + }.Build() + File_proto_pbservice_healthcheck_proto = out.File + file_proto_pbservice_healthcheck_proto_rawDesc = nil + file_proto_pbservice_healthcheck_proto_goTypes = nil + file_proto_pbservice_healthcheck_proto_depIdxs = nil } diff --git a/proto/pbservice/ids_test.go b/proto/pbservice/ids_test.go index 2d534e902..09d459aaf 100644 --- a/proto/pbservice/ids_test.go +++ b/proto/pbservice/ids_test.go @@ -11,17 +11,17 @@ import ( func TestCheckServiceNode_UniqueID(t *testing.T) { type testCase struct { name string - csn CheckServiceNode + csn *CheckServiceNode expected string } - fn := func(t *testing.T, tc testCase) { + fn := func(t *testing.T, tc *testCase) { require.Equal(t, tc.expected, tc.csn.UniqueID()) } var testCases = []testCase{ { name: "full", - csn: CheckServiceNode{ + csn: &CheckServiceNode{ Node: &Node{Node: "the-node-name"}, Service: &NodeService{ ID: "the-service-id", @@ -32,7 +32,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { }, { name: "without node", - csn: CheckServiceNode{ + csn: &CheckServiceNode{ Service: &NodeService{ ID: "the-service-id", EnterpriseMeta: &pbcommon.EnterpriseMeta{Namespace: "the-namespace"}, @@ -42,14 +42,14 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { }, { name: "without service", - csn: CheckServiceNode{ + csn: &CheckServiceNode{ Node: &Node{Node: "the-node-name"}, }, expected: "/the-node-name/", }, { name: "without namespace", - csn: CheckServiceNode{ + csn: &CheckServiceNode{ Node: &Node{Node: "the-node-name"}, Service: &NodeService{ ID: "the-service-id", @@ -60,7 +60,7 @@ func TestCheckServiceNode_UniqueID(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - fn(t, tc) + fn(t, &tc) }) } diff --git a/proto/pbservice/node.pb.go b/proto/pbservice/node.pb.go index 556f37d57..44340c9aa 100644 --- a/proto/pbservice/node.pb.go +++ b/proto/pbservice/node.pb.go @@ -1,79 +1,92 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbservice/node.proto package pbservice import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" pbcommon "github.com/hashicorp/consul/proto/pbcommon" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. type CheckServiceNode struct { - Node *Node `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` - Service *NodeService `protobuf:"bytes,2,opt,name=Service,proto3" json:"Service,omitempty"` - Checks []*HealthCheck `protobuf:"bytes,3,rep,name=Checks,proto3" json:"Checks,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Node *Node `protobuf:"bytes,1,opt,name=Node,proto3" json:"Node,omitempty"` + Service *NodeService `protobuf:"bytes,2,opt,name=Service,proto3" json:"Service,omitempty"` + Checks []*HealthCheck `protobuf:"bytes,3,rep,name=Checks,proto3" json:"Checks,omitempty"` } -func (m *CheckServiceNode) Reset() { *m = CheckServiceNode{} } -func (m *CheckServiceNode) String() string { return proto.CompactTextString(m) } -func (*CheckServiceNode) ProtoMessage() {} +func (x *CheckServiceNode) Reset() { + *x = CheckServiceNode{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckServiceNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckServiceNode) ProtoMessage() {} + +func (x *CheckServiceNode) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckServiceNode.ProtoReflect.Descriptor instead. func (*CheckServiceNode) Descriptor() ([]byte, []int) { - return fileDescriptor_bbc215b78fa95fe5, []int{0} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{0} } -func (m *CheckServiceNode) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CheckServiceNode.Unmarshal(m, b) -} -func (m *CheckServiceNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CheckServiceNode.Marshal(b, m, deterministic) -} -func (m *CheckServiceNode) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckServiceNode.Merge(m, src) -} -func (m *CheckServiceNode) XXX_Size() int { - return xxx_messageInfo_CheckServiceNode.Size(m) -} -func (m *CheckServiceNode) XXX_DiscardUnknown() { - xxx_messageInfo_CheckServiceNode.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckServiceNode proto.InternalMessageInfo - -func (m *CheckServiceNode) GetNode() *Node { - if m != nil { - return m.Node +func (x *CheckServiceNode) GetNode() *Node { + if x != nil { + return x.Node } return nil } -func (m *CheckServiceNode) GetService() *NodeService { - if m != nil { - return m.Service +func (x *CheckServiceNode) GetService() *NodeService { + if x != nil { + return x.Service } return nil } -func (m *CheckServiceNode) GetChecks() []*HealthCheck { - if m != nil { - return m.Checks +func (x *CheckServiceNode) GetChecks() []*HealthCheck { + if x != nil { + return x.Checks } return nil } @@ -86,6 +99,10 @@ func (m *CheckServiceNode) GetChecks() []*HealthCheck { // output=node.gen.go // name=Structs type Node struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=NodeIDType func-from=string ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` @@ -95,89 +112,93 @@ type Node struct { TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,7,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_node_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_node_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Node.ProtoReflect.Descriptor instead. func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_bbc215b78fa95fe5, []int{1} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{1} } -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetID() string { - if m != nil { - return m.ID +func (x *Node) GetID() string { + if x != nil { + return x.ID } return "" } -func (m *Node) GetNode() string { - if m != nil { - return m.Node +func (x *Node) GetNode() string { + if x != nil { + return x.Node } return "" } -func (m *Node) GetPartition() string { - if m != nil { - return m.Partition +func (x *Node) GetPartition() string { + if x != nil { + return x.Partition } return "" } -func (m *Node) GetAddress() string { - if m != nil { - return m.Address +func (x *Node) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Node) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *Node) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func (m *Node) GetTaggedAddresses() map[string]string { - if m != nil { - return m.TaggedAddresses +func (x *Node) GetTaggedAddresses() map[string]string { + if x != nil { + return x.TaggedAddresses } return nil } -func (m *Node) GetMeta() map[string]string { - if m != nil { - return m.Meta +func (x *Node) GetMeta() map[string]string { + if x != nil { + return x.Meta } return nil } -func (m *Node) GetRaftIndex() *pbcommon.RaftIndex { - if m != nil { - return m.RaftIndex +func (x *Node) GetRaftIndex() *pbcommon.RaftIndex { + if x != nil { + return x.RaftIndex } return nil } @@ -190,6 +211,10 @@ func (m *Node) GetRaftIndex() *pbcommon.RaftIndex { // output=node.gen.go // name=Structs type NodeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Kind is the kind of service this is. Different kinds of services may // have differing validation, DNS behavior, etc. An empty kind will default // to the Default kind. See ServiceKind for the full list of kinds. @@ -243,204 +268,372 @@ type NodeService struct { // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs - RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } -func (m *NodeService) Reset() { *m = NodeService{} } -func (m *NodeService) String() string { return proto.CompactTextString(m) } -func (*NodeService) ProtoMessage() {} +func (x *NodeService) Reset() { + *x = NodeService{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_node_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeService) ProtoMessage() {} + +func (x *NodeService) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_node_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeService.ProtoReflect.Descriptor instead. func (*NodeService) Descriptor() ([]byte, []int) { - return fileDescriptor_bbc215b78fa95fe5, []int{2} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{2} } -func (m *NodeService) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeService.Unmarshal(m, b) -} -func (m *NodeService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeService.Marshal(b, m, deterministic) -} -func (m *NodeService) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeService.Merge(m, src) -} -func (m *NodeService) XXX_Size() int { - return xxx_messageInfo_NodeService.Size(m) -} -func (m *NodeService) XXX_DiscardUnknown() { - xxx_messageInfo_NodeService.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeService proto.InternalMessageInfo - -func (m *NodeService) GetKind() string { - if m != nil { - return m.Kind +func (x *NodeService) GetKind() string { + if x != nil { + return x.Kind } return "" } -func (m *NodeService) GetID() string { - if m != nil { - return m.ID +func (x *NodeService) GetID() string { + if x != nil { + return x.ID } return "" } -func (m *NodeService) GetService() string { - if m != nil { - return m.Service +func (x *NodeService) GetService() string { + if x != nil { + return x.Service } return "" } -func (m *NodeService) GetTags() []string { - if m != nil { - return m.Tags +func (x *NodeService) GetTags() []string { + if x != nil { + return x.Tags } return nil } -func (m *NodeService) GetAddress() string { - if m != nil { - return m.Address +func (x *NodeService) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *NodeService) GetTaggedAddresses() map[string]*ServiceAddress { - if m != nil { - return m.TaggedAddresses +func (x *NodeService) GetTaggedAddresses() map[string]*ServiceAddress { + if x != nil { + return x.TaggedAddresses } return nil } -func (m *NodeService) GetMeta() map[string]string { - if m != nil { - return m.Meta +func (x *NodeService) GetMeta() map[string]string { + if x != nil { + return x.Meta } return nil } -func (m *NodeService) GetPort() int32 { - if m != nil { - return m.Port +func (x *NodeService) GetPort() int32 { + if x != nil { + return x.Port } return 0 } -func (m *NodeService) GetSocketPath() string { - if m != nil { - return m.SocketPath +func (x *NodeService) GetSocketPath() string { + if x != nil { + return x.SocketPath } return "" } -func (m *NodeService) GetWeights() *Weights { - if m != nil { - return m.Weights +func (x *NodeService) GetWeights() *Weights { + if x != nil { + return x.Weights } return nil } -func (m *NodeService) GetEnableTagOverride() bool { - if m != nil { - return m.EnableTagOverride +func (x *NodeService) GetEnableTagOverride() bool { + if x != nil { + return x.EnableTagOverride } return false } -func (m *NodeService) GetProxy() *ConnectProxyConfig { - if m != nil { - return m.Proxy +func (x *NodeService) GetProxy() *ConnectProxyConfig { + if x != nil { + return x.Proxy } return nil } -func (m *NodeService) GetConnect() *ServiceConnect { - if m != nil { - return m.Connect +func (x *NodeService) GetConnect() *ServiceConnect { + if x != nil { + return x.Connect } return nil } -func (m *NodeService) GetLocallyRegisteredAsSidecar() bool { - if m != nil { - return m.LocallyRegisteredAsSidecar +func (x *NodeService) GetLocallyRegisteredAsSidecar() bool { + if x != nil { + return x.LocallyRegisteredAsSidecar } return false } -func (m *NodeService) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { - if m != nil { - return m.EnterpriseMeta +func (x *NodeService) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if x != nil { + return x.EnterpriseMeta } return nil } -func (m *NodeService) GetRaftIndex() *pbcommon.RaftIndex { - if m != nil { - return m.RaftIndex +func (x *NodeService) GetRaftIndex() *pbcommon.RaftIndex { + if x != nil { + return x.RaftIndex } return nil } -func init() { - proto.RegisterType((*CheckServiceNode)(nil), "pbservice.CheckServiceNode") - proto.RegisterType((*Node)(nil), "pbservice.Node") - proto.RegisterMapType((map[string]string)(nil), "pbservice.Node.MetaEntry") - proto.RegisterMapType((map[string]string)(nil), "pbservice.Node.TaggedAddressesEntry") - proto.RegisterType((*NodeService)(nil), "pbservice.NodeService") - proto.RegisterMapType((map[string]string)(nil), "pbservice.NodeService.MetaEntry") - proto.RegisterMapType((map[string]*ServiceAddress)(nil), "pbservice.NodeService.TaggedAddressesEntry") +var File_proto_pbservice_node_proto protoreflect.FileDescriptor + +var file_proto_pbservice_node_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x70, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x99, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x4e, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x30, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x22, 0xaf, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, + 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc9, 0x06, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x55, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x12, + 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, 0x0a, + 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, 0x53, 0x69, + 0x64, 0x65, 0x63, 0x61, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, + 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, + 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbservice/node.proto", fileDescriptor_bbc215b78fa95fe5) +var ( + file_proto_pbservice_node_proto_rawDescOnce sync.Once + file_proto_pbservice_node_proto_rawDescData = file_proto_pbservice_node_proto_rawDesc +) + +func file_proto_pbservice_node_proto_rawDescGZIP() []byte { + file_proto_pbservice_node_proto_rawDescOnce.Do(func() { + file_proto_pbservice_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbservice_node_proto_rawDescData) + }) + return file_proto_pbservice_node_proto_rawDescData } -var fileDescriptor_bbc215b78fa95fe5 = []byte{ - // 646 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdb, 0x6e, 0xd3, 0x40, - 0x10, 0x55, 0xee, 0xcd, 0x04, 0x7a, 0x59, 0x55, 0x68, 0x09, 0x14, 0x85, 0xc2, 0x43, 0xa5, 0xb6, - 0x31, 0x6a, 0x91, 0x40, 0x3c, 0x54, 0xea, 0x4d, 0xa2, 0x02, 0x4a, 0xb4, 0x2d, 0x42, 0x42, 0xe2, - 0x61, 0x63, 0x4f, 0x6d, 0xab, 0xa9, 0x37, 0x5a, 0x6f, 0xab, 0xe6, 0x53, 0xf8, 0x0a, 0xbe, 0x83, - 0xbf, 0x42, 0x3b, 0xde, 0x24, 0x8e, 0x5b, 0x50, 0x90, 0x78, 0xf2, 0x7a, 0xce, 0x39, 0xb3, 0xe3, - 0x39, 0x33, 0x86, 0xf6, 0x50, 0x2b, 0xa3, 0xbc, 0x61, 0x3f, 0x45, 0x7d, 0x13, 0xfb, 0xe8, 0x25, - 0x2a, 0xc0, 0x2e, 0x05, 0x59, 0x73, 0x12, 0x6d, 0x3f, 0x19, 0xd3, 0x7c, 0x75, 0x75, 0xa5, 0x12, - 0x2f, 0x7b, 0x64, 0xbc, 0xf6, 0xf3, 0x62, 0x8e, 0x08, 0xe5, 0xc0, 0x44, 0x7e, 0x84, 0xfe, 0xa5, - 0xa3, 0xac, 0x15, 0x29, 0xee, 0x99, 0xc1, 0xeb, 0x3f, 0x4a, 0xb0, 0x7c, 0x68, 0xe9, 0x67, 0x59, - 0xf8, 0x54, 0x05, 0xc8, 0x5e, 0x40, 0xd5, 0x3e, 0x79, 0xa9, 0x53, 0xda, 0x68, 0xed, 0x2c, 0x75, - 0x27, 0xe2, 0xae, 0x0d, 0x0b, 0x02, 0xd9, 0x2b, 0x68, 0x38, 0x0d, 0x2f, 0x13, 0xef, 0x51, 0x81, - 0xe7, 0x50, 0x31, 0xa6, 0xb1, 0x2e, 0xd4, 0xe9, 0xaa, 0x94, 0x57, 0x3a, 0x95, 0x82, 0xe0, 0x3d, - 0x15, 0x4e, 0xb0, 0x70, 0xac, 0xf5, 0x9f, 0x95, 0xac, 0x0e, 0xb6, 0x08, 0xe5, 0x93, 0x23, 0xaa, - 0xa6, 0x29, 0xca, 0x27, 0x47, 0x8c, 0xb9, 0xfa, 0xca, 0x14, 0xc9, 0x38, 0x4f, 0xa1, 0xd9, 0x93, - 0xda, 0xc4, 0x26, 0x56, 0x09, 0x5f, 0x20, 0x60, 0x1a, 0x60, 0x1c, 0x1a, 0xfb, 0x41, 0xa0, 0x31, - 0xb5, 0x77, 0x5b, 0x6c, 0xfc, 0xca, 0x9e, 0x01, 0x1c, 0x49, 0x23, 0x7d, 0x4c, 0x0c, 0x6a, 0x5e, - 0x25, 0x30, 0x17, 0x61, 0xa7, 0xb0, 0x74, 0x2e, 0xc3, 0x10, 0x03, 0x27, 0xc0, 0x94, 0xd7, 0xa8, - 0xfa, 0x97, 0x85, 0xcf, 0xed, 0x16, 0x68, 0xc7, 0x89, 0xd1, 0x23, 0x51, 0x14, 0xb3, 0x6d, 0xa8, - 0x7e, 0x42, 0x23, 0x79, 0x9d, 0x92, 0x3c, 0x2e, 0x26, 0xb1, 0x58, 0xa6, 0x24, 0x1a, 0xf3, 0xa0, - 0x29, 0xe4, 0x85, 0x39, 0x49, 0x02, 0xbc, 0xe5, 0x0d, 0xea, 0xf3, 0x4a, 0xd7, 0xcd, 0xc0, 0x04, - 0x10, 0x53, 0x4e, 0xfb, 0x00, 0x56, 0xef, 0x2b, 0x84, 0x2d, 0x43, 0xe5, 0x12, 0x47, 0xae, 0x89, - 0xf6, 0xc8, 0x56, 0xa1, 0x76, 0x23, 0x07, 0xd7, 0xe3, 0x36, 0x66, 0x2f, 0xef, 0xca, 0x6f, 0x4b, - 0xed, 0x37, 0xd0, 0x9c, 0xd4, 0xf1, 0x2f, 0xc2, 0xf5, 0x5f, 0x75, 0x68, 0xe5, 0xac, 0xb7, 0x46, - 0x7d, 0x88, 0x93, 0xc0, 0x89, 0xe9, 0xec, 0xcc, 0x2c, 0x4f, 0xcc, 0xe4, 0xd3, 0x39, 0x72, 0xd6, - 0xe4, 0xd4, 0xe7, 0x32, 0x4c, 0x79, 0xb5, 0x53, 0xb1, 0x6a, 0x7b, 0xce, 0x1b, 0x59, 0x9b, 0x35, - 0xf2, 0xcb, 0x5d, 0xa3, 0x96, 0xa8, 0xc7, 0x9b, 0xf7, 0xcf, 0xe5, 0x9c, 0x7e, 0xbd, 0x9e, 0xf1, - 0xab, 0xf3, 0x87, 0x5c, 0x45, 0xdb, 0x18, 0x54, 0x7b, 0x4a, 0x1b, 0x72, 0xac, 0x26, 0xe8, 0x6c, - 0x27, 0xed, 0x4c, 0xf9, 0x97, 0x68, 0x7a, 0xd2, 0x44, 0x7c, 0x25, 0x9b, 0xb4, 0x69, 0x84, 0x6d, - 0x41, 0xe3, 0x2b, 0xc6, 0x61, 0x64, 0x52, 0x9a, 0xdf, 0xd6, 0x0e, 0xcb, 0x5d, 0xe6, 0x10, 0x31, - 0xa6, 0xb0, 0x2d, 0x58, 0x39, 0x4e, 0x64, 0x7f, 0x80, 0xe7, 0x32, 0xfc, 0x7c, 0x83, 0x5a, 0xc7, - 0x01, 0xf2, 0x66, 0xa7, 0xb4, 0xb1, 0x20, 0xee, 0x02, 0x6c, 0x17, 0x6a, 0x3d, 0xad, 0x6e, 0x47, - 0xbc, 0x45, 0x99, 0xd7, 0x72, 0x99, 0x0f, 0x55, 0x92, 0xa0, 0x6f, 0x08, 0x3e, 0x54, 0xc9, 0x45, - 0x1c, 0x8a, 0x8c, 0xcb, 0x76, 0xa1, 0xe1, 0x40, 0xfe, 0x80, 0x64, 0xf9, 0x69, 0x75, 0x5f, 0xee, - 0x08, 0x62, 0xcc, 0x64, 0x7b, 0xd0, 0xfe, 0xa8, 0x7c, 0x39, 0x18, 0x8c, 0x04, 0x86, 0x71, 0x6a, - 0x50, 0x63, 0xb0, 0x9f, 0x9e, 0xc5, 0x01, 0xfa, 0x52, 0xf3, 0x87, 0x54, 0xe0, 0x5f, 0x18, 0x6c, - 0x0f, 0x16, 0x8f, 0xed, 0xe2, 0x0d, 0x75, 0x9c, 0x22, 0x75, 0x7e, 0xd9, 0xfd, 0x5d, 0xdc, 0xd4, - 0xcf, 0xa2, 0xa2, 0xc0, 0x9e, 0x5d, 0x98, 0xc5, 0x39, 0x16, 0xe6, 0xfb, 0xdc, 0x0b, 0xe3, 0xe5, - 0xe7, 0xfe, 0xde, 0x6e, 0xb8, 0x14, 0xff, 0x63, 0x97, 0x0e, 0xb6, 0xbf, 0x6d, 0x86, 0xb1, 0x89, - 0xae, 0xfb, 0xb6, 0x7a, 0x2f, 0x92, 0x69, 0x14, 0xfb, 0x4a, 0x0f, 0x3d, 0x5f, 0x25, 0xe9, 0xf5, - 0xc0, 0x2b, 0xfc, 0xd6, 0xfb, 0x75, 0x0a, 0xec, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x69, - 0xf0, 0xb9, 0x57, 0x06, 0x00, 0x00, +var file_proto_pbservice_node_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_proto_pbservice_node_proto_goTypes = []interface{}{ + (*CheckServiceNode)(nil), // 0: pbservice.CheckServiceNode + (*Node)(nil), // 1: pbservice.Node + (*NodeService)(nil), // 2: pbservice.NodeService + nil, // 3: pbservice.Node.TaggedAddressesEntry + nil, // 4: pbservice.Node.MetaEntry + nil, // 5: pbservice.NodeService.TaggedAddressesEntry + nil, // 6: pbservice.NodeService.MetaEntry + (*HealthCheck)(nil), // 7: pbservice.HealthCheck + (*pbcommon.RaftIndex)(nil), // 8: common.RaftIndex + (*Weights)(nil), // 9: pbservice.Weights + (*ConnectProxyConfig)(nil), // 10: pbservice.ConnectProxyConfig + (*ServiceConnect)(nil), // 11: pbservice.ServiceConnect + (*pbcommon.EnterpriseMeta)(nil), // 12: common.EnterpriseMeta + (*ServiceAddress)(nil), // 13: pbservice.ServiceAddress +} +var file_proto_pbservice_node_proto_depIdxs = []int32{ + 1, // 0: pbservice.CheckServiceNode.Node:type_name -> pbservice.Node + 2, // 1: pbservice.CheckServiceNode.Service:type_name -> pbservice.NodeService + 7, // 2: pbservice.CheckServiceNode.Checks:type_name -> pbservice.HealthCheck + 3, // 3: pbservice.Node.TaggedAddresses:type_name -> pbservice.Node.TaggedAddressesEntry + 4, // 4: pbservice.Node.Meta:type_name -> pbservice.Node.MetaEntry + 8, // 5: pbservice.Node.RaftIndex:type_name -> common.RaftIndex + 5, // 6: pbservice.NodeService.TaggedAddresses:type_name -> pbservice.NodeService.TaggedAddressesEntry + 6, // 7: pbservice.NodeService.Meta:type_name -> pbservice.NodeService.MetaEntry + 9, // 8: pbservice.NodeService.Weights:type_name -> pbservice.Weights + 10, // 9: pbservice.NodeService.Proxy:type_name -> pbservice.ConnectProxyConfig + 11, // 10: pbservice.NodeService.Connect:type_name -> pbservice.ServiceConnect + 12, // 11: pbservice.NodeService.EnterpriseMeta:type_name -> common.EnterpriseMeta + 8, // 12: pbservice.NodeService.RaftIndex:type_name -> common.RaftIndex + 13, // 13: pbservice.NodeService.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_proto_pbservice_node_proto_init() } +func file_proto_pbservice_node_proto_init() { + if File_proto_pbservice_node_proto != nil { + return + } + file_proto_pbservice_healthcheck_proto_init() + file_proto_pbservice_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_proto_pbservice_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckServiceNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbservice_node_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbservice_node_proto_goTypes, + DependencyIndexes: file_proto_pbservice_node_proto_depIdxs, + MessageInfos: file_proto_pbservice_node_proto_msgTypes, + }.Build() + File_proto_pbservice_node_proto = out.File + file_proto_pbservice_node_proto_rawDesc = nil + file_proto_pbservice_node_proto_goTypes = nil + file_proto_pbservice_node_proto_depIdxs = nil } diff --git a/proto/pbservice/service.pb.go b/proto/pbservice/service.pb.go index b71a0a387..5749c3928 100644 --- a/proto/pbservice/service.pb.go +++ b/proto/pbservice/service.pb.go @@ -1,26 +1,31 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbservice/service.proto package pbservice import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - _struct "github.com/golang/protobuf/ptypes/struct" pbcommon "github.com/hashicorp/consul/proto/pbcommon" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // ConnectProxyConfig describes the configuration needed for any proxy managed // or unmanaged. It describes a single logical service's listener and optionally @@ -34,6 +39,10 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // output=service.gen.go // name=Structs type ConnectProxyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // DestinationServiceName is required and is the name of the service to accept // traffic for. DestinationServiceName string `protobuf:"bytes,1,opt,name=DestinationServiceName,proto3" json:"DestinationServiceName,omitempty"` @@ -58,7 +67,7 @@ type ConnectProxyConfig struct { // Config is the arbitrary configuration data provided with the proxy // registration. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - Config *_struct.Struct `protobuf:"bytes,5,opt,name=Config,proto3" json:"Config,omitempty"` + Config *structpb.Struct `protobuf:"bytes,5,opt,name=Config,proto3" json:"Config,omitempty"` // Upstreams describes any upstream dependencies the proxy instance should // setup. // mog: func-to=UpstreamsToStructs func-from=NewUpstreamsFromStructs @@ -74,110 +83,114 @@ type ConnectProxyConfig struct { // transparent mode. TransparentProxy *TransparentProxyConfig `protobuf:"bytes,10,opt,name=TransparentProxy,proto3" json:"TransparentProxy,omitempty"` // LocalServiceSocketPath is the path to the unix domain socket for the local service instance - LocalServiceSocketPath string `protobuf:"bytes,11,opt,name=LocalServiceSocketPath,proto3" json:"LocalServiceSocketPath,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LocalServiceSocketPath string `protobuf:"bytes,11,opt,name=LocalServiceSocketPath,proto3" json:"LocalServiceSocketPath,omitempty"` } -func (m *ConnectProxyConfig) Reset() { *m = ConnectProxyConfig{} } -func (m *ConnectProxyConfig) String() string { return proto.CompactTextString(m) } -func (*ConnectProxyConfig) ProtoMessage() {} +func (x *ConnectProxyConfig) Reset() { + *x = ConnectProxyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectProxyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectProxyConfig) ProtoMessage() {} + +func (x *ConnectProxyConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectProxyConfig.ProtoReflect.Descriptor instead. func (*ConnectProxyConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{0} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{0} } -func (m *ConnectProxyConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectProxyConfig.Unmarshal(m, b) -} -func (m *ConnectProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectProxyConfig.Marshal(b, m, deterministic) -} -func (m *ConnectProxyConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectProxyConfig.Merge(m, src) -} -func (m *ConnectProxyConfig) XXX_Size() int { - return xxx_messageInfo_ConnectProxyConfig.Size(m) -} -func (m *ConnectProxyConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectProxyConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectProxyConfig proto.InternalMessageInfo - -func (m *ConnectProxyConfig) GetDestinationServiceName() string { - if m != nil { - return m.DestinationServiceName +func (x *ConnectProxyConfig) GetDestinationServiceName() string { + if x != nil { + return x.DestinationServiceName } return "" } -func (m *ConnectProxyConfig) GetDestinationServiceID() string { - if m != nil { - return m.DestinationServiceID +func (x *ConnectProxyConfig) GetDestinationServiceID() string { + if x != nil { + return x.DestinationServiceID } return "" } -func (m *ConnectProxyConfig) GetLocalServiceAddress() string { - if m != nil { - return m.LocalServiceAddress +func (x *ConnectProxyConfig) GetLocalServiceAddress() string { + if x != nil { + return x.LocalServiceAddress } return "" } -func (m *ConnectProxyConfig) GetLocalServicePort() int32 { - if m != nil { - return m.LocalServicePort +func (x *ConnectProxyConfig) GetLocalServicePort() int32 { + if x != nil { + return x.LocalServicePort } return 0 } -func (m *ConnectProxyConfig) GetConfig() *_struct.Struct { - if m != nil { - return m.Config +func (x *ConnectProxyConfig) GetConfig() *structpb.Struct { + if x != nil { + return x.Config } return nil } -func (m *ConnectProxyConfig) GetUpstreams() []*Upstream { - if m != nil { - return m.Upstreams +func (x *ConnectProxyConfig) GetUpstreams() []*Upstream { + if x != nil { + return x.Upstreams } return nil } -func (m *ConnectProxyConfig) GetMeshGateway() *MeshGatewayConfig { - if m != nil { - return m.MeshGateway +func (x *ConnectProxyConfig) GetMeshGateway() *MeshGatewayConfig { + if x != nil { + return x.MeshGateway } return nil } -func (m *ConnectProxyConfig) GetExpose() *ExposeConfig { - if m != nil { - return m.Expose +func (x *ConnectProxyConfig) GetExpose() *ExposeConfig { + if x != nil { + return x.Expose } return nil } -func (m *ConnectProxyConfig) GetMode() string { - if m != nil { - return m.Mode +func (x *ConnectProxyConfig) GetMode() string { + if x != nil { + return x.Mode } return "" } -func (m *ConnectProxyConfig) GetTransparentProxy() *TransparentProxyConfig { - if m != nil { - return m.TransparentProxy +func (x *ConnectProxyConfig) GetTransparentProxy() *TransparentProxyConfig { + if x != nil { + return x.TransparentProxy } return nil } -func (m *ConnectProxyConfig) GetLocalServiceSocketPath() string { - if m != nil { - return m.LocalServiceSocketPath +func (x *ConnectProxyConfig) GetLocalServiceSocketPath() string { + if x != nil { + return x.LocalServiceSocketPath } return "" } @@ -194,6 +207,10 @@ func (m *ConnectProxyConfig) GetLocalServiceSocketPath() string { // name=Structs // ignore-fields=IngressHosts type Upstream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Destination fields are the required ones for determining what this upstream // points to. Depending on DestinationType some other fields below might // further restrict the set of instances allowable. @@ -219,126 +236,130 @@ type Upstream struct { // It can be used to pass arbitrary configuration for this specific upstream // to the proxy. // mog: func-to=ProtobufTypesStructToMapStringInterface func-from=MapStringInterfaceToProtobufTypesStruct - Config *_struct.Struct `protobuf:"bytes,7,opt,name=Config,proto3" json:"Config,omitempty"` + Config *structpb.Struct `protobuf:"bytes,7,opt,name=Config,proto3" json:"Config,omitempty"` // MeshGateway is the configuration for mesh gateway usage of this upstream MeshGateway *MeshGatewayConfig `protobuf:"bytes,8,opt,name=MeshGateway,proto3" json:"MeshGateway,omitempty"` // CentrallyConfigured indicates whether the upstream was defined in a proxy // instance registration or whether it was generated from a config entry. CentrallyConfigured bool `protobuf:"varint,9,opt,name=CentrallyConfigured,proto3" json:"CentrallyConfigured,omitempty"` // LocalBindSocketPath is the socket to create to connect to the upstream service - LocalBindSocketPath string `protobuf:"bytes,10,opt,name=LocalBindSocketPath,proto3" json:"LocalBindSocketPath,omitempty"` - LocalBindSocketMode string `protobuf:"bytes,11,opt,name=LocalBindSocketMode,proto3" json:"LocalBindSocketMode,omitempty"` - DestinationPartition string `protobuf:"bytes,12,opt,name=DestinationPartition,proto3" json:"DestinationPartition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LocalBindSocketPath string `protobuf:"bytes,10,opt,name=LocalBindSocketPath,proto3" json:"LocalBindSocketPath,omitempty"` + LocalBindSocketMode string `protobuf:"bytes,11,opt,name=LocalBindSocketMode,proto3" json:"LocalBindSocketMode,omitempty"` + DestinationPartition string `protobuf:"bytes,12,opt,name=DestinationPartition,proto3" json:"DestinationPartition,omitempty"` } -func (m *Upstream) Reset() { *m = Upstream{} } -func (m *Upstream) String() string { return proto.CompactTextString(m) } -func (*Upstream) ProtoMessage() {} +func (x *Upstream) Reset() { + *x = Upstream{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Upstream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Upstream) ProtoMessage() {} + +func (x *Upstream) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Upstream.ProtoReflect.Descriptor instead. func (*Upstream) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{1} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{1} } -func (m *Upstream) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Upstream.Unmarshal(m, b) -} -func (m *Upstream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Upstream.Marshal(b, m, deterministic) -} -func (m *Upstream) XXX_Merge(src proto.Message) { - xxx_messageInfo_Upstream.Merge(m, src) -} -func (m *Upstream) XXX_Size() int { - return xxx_messageInfo_Upstream.Size(m) -} -func (m *Upstream) XXX_DiscardUnknown() { - xxx_messageInfo_Upstream.DiscardUnknown(m) -} - -var xxx_messageInfo_Upstream proto.InternalMessageInfo - -func (m *Upstream) GetDestinationType() string { - if m != nil { - return m.DestinationType +func (x *Upstream) GetDestinationType() string { + if x != nil { + return x.DestinationType } return "" } -func (m *Upstream) GetDestinationNamespace() string { - if m != nil { - return m.DestinationNamespace +func (x *Upstream) GetDestinationNamespace() string { + if x != nil { + return x.DestinationNamespace } return "" } -func (m *Upstream) GetDestinationName() string { - if m != nil { - return m.DestinationName +func (x *Upstream) GetDestinationName() string { + if x != nil { + return x.DestinationName } return "" } -func (m *Upstream) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *Upstream) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func (m *Upstream) GetLocalBindAddress() string { - if m != nil { - return m.LocalBindAddress +func (x *Upstream) GetLocalBindAddress() string { + if x != nil { + return x.LocalBindAddress } return "" } -func (m *Upstream) GetLocalBindPort() int32 { - if m != nil { - return m.LocalBindPort +func (x *Upstream) GetLocalBindPort() int32 { + if x != nil { + return x.LocalBindPort } return 0 } -func (m *Upstream) GetConfig() *_struct.Struct { - if m != nil { - return m.Config +func (x *Upstream) GetConfig() *structpb.Struct { + if x != nil { + return x.Config } return nil } -func (m *Upstream) GetMeshGateway() *MeshGatewayConfig { - if m != nil { - return m.MeshGateway +func (x *Upstream) GetMeshGateway() *MeshGatewayConfig { + if x != nil { + return x.MeshGateway } return nil } -func (m *Upstream) GetCentrallyConfigured() bool { - if m != nil { - return m.CentrallyConfigured +func (x *Upstream) GetCentrallyConfigured() bool { + if x != nil { + return x.CentrallyConfigured } return false } -func (m *Upstream) GetLocalBindSocketPath() string { - if m != nil { - return m.LocalBindSocketPath +func (x *Upstream) GetLocalBindSocketPath() string { + if x != nil { + return x.LocalBindSocketPath } return "" } -func (m *Upstream) GetLocalBindSocketMode() string { - if m != nil { - return m.LocalBindSocketMode +func (x *Upstream) GetLocalBindSocketMode() string { + if x != nil { + return x.LocalBindSocketMode } return "" } -func (m *Upstream) GetDestinationPartition() string { - if m != nil { - return m.DestinationPartition +func (x *Upstream) GetDestinationPartition() string { + if x != nil { + return x.DestinationPartition } return "" } @@ -351,6 +372,10 @@ func (m *Upstream) GetDestinationPartition() string { // output=service.gen.go // name=Structs type ServiceConnect struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Native is true when this service can natively understand Connect. Native bool `protobuf:"varint,1,opt,name=Native,proto3" json:"Native,omitempty"` // SidecarService is a nested Service Definition to register at the same time. @@ -361,47 +386,51 @@ type ServiceConnect struct { // result is identical to just making a second service registration via any // other means. // mog: func-to=ServiceDefinitionPtrToStructs func-from=NewServiceDefinitionPtrFromStructs - SidecarService *ServiceDefinition `protobuf:"bytes,3,opt,name=SidecarService,proto3" json:"SidecarService,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SidecarService *ServiceDefinition `protobuf:"bytes,3,opt,name=SidecarService,proto3" json:"SidecarService,omitempty"` } -func (m *ServiceConnect) Reset() { *m = ServiceConnect{} } -func (m *ServiceConnect) String() string { return proto.CompactTextString(m) } -func (*ServiceConnect) ProtoMessage() {} +func (x *ServiceConnect) Reset() { + *x = ServiceConnect{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceConnect) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConnect) ProtoMessage() {} + +func (x *ServiceConnect) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConnect.ProtoReflect.Descriptor instead. func (*ServiceConnect) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{2} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{2} } -func (m *ServiceConnect) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceConnect.Unmarshal(m, b) -} -func (m *ServiceConnect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceConnect.Marshal(b, m, deterministic) -} -func (m *ServiceConnect) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceConnect.Merge(m, src) -} -func (m *ServiceConnect) XXX_Size() int { - return xxx_messageInfo_ServiceConnect.Size(m) -} -func (m *ServiceConnect) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceConnect.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceConnect proto.InternalMessageInfo - -func (m *ServiceConnect) GetNative() bool { - if m != nil { - return m.Native +func (x *ServiceConnect) GetNative() bool { + if x != nil { + return x.Native } return false } -func (m *ServiceConnect) GetSidecarService() *ServiceDefinition { - if m != nil { - return m.SidecarService +func (x *ServiceConnect) GetSidecarService() *ServiceDefinition { + if x != nil { + return x.SidecarService } return nil } @@ -415,52 +444,60 @@ func (m *ServiceConnect) GetSidecarService() *ServiceDefinition { // output=service.gen.go // name=Structs type ExposeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Checks defines whether paths associated with Consul checks will be exposed. // This flag triggers exposing all HTTP and GRPC check paths registered for the service. Checks bool `protobuf:"varint,1,opt,name=Checks,proto3" json:"Checks,omitempty"` // Paths is the list of paths exposed through the proxy. // mog: func-to=ExposePathSliceToStructs func-from=NewExposePathSliceFromStructs - Paths []*ExposePath `protobuf:"bytes,2,rep,name=Paths,proto3" json:"Paths,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Paths []*ExposePath `protobuf:"bytes,2,rep,name=Paths,proto3" json:"Paths,omitempty"` } -func (m *ExposeConfig) Reset() { *m = ExposeConfig{} } -func (m *ExposeConfig) String() string { return proto.CompactTextString(m) } -func (*ExposeConfig) ProtoMessage() {} +func (x *ExposeConfig) Reset() { + *x = ExposeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExposeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeConfig) ProtoMessage() {} + +func (x *ExposeConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeConfig.ProtoReflect.Descriptor instead. func (*ExposeConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{3} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{3} } -func (m *ExposeConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExposeConfig.Unmarshal(m, b) -} -func (m *ExposeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExposeConfig.Marshal(b, m, deterministic) -} -func (m *ExposeConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExposeConfig.Merge(m, src) -} -func (m *ExposeConfig) XXX_Size() int { - return xxx_messageInfo_ExposeConfig.Size(m) -} -func (m *ExposeConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ExposeConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ExposeConfig proto.InternalMessageInfo - -func (m *ExposeConfig) GetChecks() bool { - if m != nil { - return m.Checks +func (x *ExposeConfig) GetChecks() bool { + if x != nil { + return x.Checks } return false } -func (m *ExposeConfig) GetPaths() []*ExposePath { - if m != nil { - return m.Paths +func (x *ExposeConfig) GetPaths() []*ExposePath { + if x != nil { + return x.Paths } return nil } @@ -471,6 +508,10 @@ func (m *ExposeConfig) GetPaths() []*ExposePath { // output=service.gen.go // name=Structs type ExposePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ListenerPort defines the port of the proxy's listener for exposed paths. // mog: func-to=int func-from=int32 ListenerPort int32 `protobuf:"varint,1,opt,name=ListenerPort,proto3" json:"ListenerPort,omitempty"` @@ -483,68 +524,72 @@ type ExposePath struct { // Valid values are "http" and "http2", defaults to "http" Protocol string `protobuf:"bytes,4,opt,name=Protocol,proto3" json:"Protocol,omitempty"` // ParsedFromCheck is set if this path was parsed from a registered check - ParsedFromCheck bool `protobuf:"varint,5,opt,name=ParsedFromCheck,proto3" json:"ParsedFromCheck,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ParsedFromCheck bool `protobuf:"varint,5,opt,name=ParsedFromCheck,proto3" json:"ParsedFromCheck,omitempty"` } -func (m *ExposePath) Reset() { *m = ExposePath{} } -func (m *ExposePath) String() string { return proto.CompactTextString(m) } -func (*ExposePath) ProtoMessage() {} +func (x *ExposePath) Reset() { + *x = ExposePath{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExposePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposePath) ProtoMessage() {} + +func (x *ExposePath) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposePath.ProtoReflect.Descriptor instead. func (*ExposePath) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{4} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{4} } -func (m *ExposePath) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExposePath.Unmarshal(m, b) -} -func (m *ExposePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExposePath.Marshal(b, m, deterministic) -} -func (m *ExposePath) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExposePath.Merge(m, src) -} -func (m *ExposePath) XXX_Size() int { - return xxx_messageInfo_ExposePath.Size(m) -} -func (m *ExposePath) XXX_DiscardUnknown() { - xxx_messageInfo_ExposePath.DiscardUnknown(m) -} - -var xxx_messageInfo_ExposePath proto.InternalMessageInfo - -func (m *ExposePath) GetListenerPort() int32 { - if m != nil { - return m.ListenerPort +func (x *ExposePath) GetListenerPort() int32 { + if x != nil { + return x.ListenerPort } return 0 } -func (m *ExposePath) GetPath() string { - if m != nil { - return m.Path +func (x *ExposePath) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *ExposePath) GetLocalPathPort() int32 { - if m != nil { - return m.LocalPathPort +func (x *ExposePath) GetLocalPathPort() int32 { + if x != nil { + return x.LocalPathPort } return 0 } -func (m *ExposePath) GetProtocol() string { - if m != nil { - return m.Protocol +func (x *ExposePath) GetProtocol() string { + if x != nil { + return x.Protocol } return "" } -func (m *ExposePath) GetParsedFromCheck() bool { - if m != nil { - return m.ParsedFromCheck +func (x *ExposePath) GetParsedFromCheck() bool { + if x != nil { + return x.ParsedFromCheck } return false } @@ -555,41 +600,49 @@ func (m *ExposePath) GetParsedFromCheck() bool { // output=service.gen.go // name=Structs type MeshGatewayConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=structs.MeshGatewayMode func-from=string - Mode string `protobuf:"bytes,1,opt,name=Mode,proto3" json:"Mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Mode string `protobuf:"bytes,1,opt,name=Mode,proto3" json:"Mode,omitempty"` } -func (m *MeshGatewayConfig) Reset() { *m = MeshGatewayConfig{} } -func (m *MeshGatewayConfig) String() string { return proto.CompactTextString(m) } -func (*MeshGatewayConfig) ProtoMessage() {} +func (x *MeshGatewayConfig) Reset() { + *x = MeshGatewayConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MeshGatewayConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MeshGatewayConfig) ProtoMessage() {} + +func (x *MeshGatewayConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MeshGatewayConfig.ProtoReflect.Descriptor instead. func (*MeshGatewayConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{5} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{5} } -func (m *MeshGatewayConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MeshGatewayConfig.Unmarshal(m, b) -} -func (m *MeshGatewayConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MeshGatewayConfig.Marshal(b, m, deterministic) -} -func (m *MeshGatewayConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_MeshGatewayConfig.Merge(m, src) -} -func (m *MeshGatewayConfig) XXX_Size() int { - return xxx_messageInfo_MeshGatewayConfig.Size(m) -} -func (m *MeshGatewayConfig) XXX_DiscardUnknown() { - xxx_messageInfo_MeshGatewayConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_MeshGatewayConfig proto.InternalMessageInfo - -func (m *MeshGatewayConfig) GetMode() string { - if m != nil { - return m.Mode +func (x *MeshGatewayConfig) GetMode() string { + if x != nil { + return x.Mode } return "" } @@ -600,52 +653,60 @@ func (m *MeshGatewayConfig) GetMode() string { // output=service.gen.go // name=Structs type TransparentProxyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=int func-from=int32 OutboundListenerPort int32 `protobuf:"varint,1,opt,name=OutboundListenerPort,proto3" json:"OutboundListenerPort,omitempty"` // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly. // The discovery chain is not considered when dialing a service instance directly. // This setting is useful when addressing stateful services, such as a database cluster with a leader node. - DialedDirectly bool `protobuf:"varint,2,opt,name=DialedDirectly,proto3" json:"DialedDirectly,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + DialedDirectly bool `protobuf:"varint,2,opt,name=DialedDirectly,proto3" json:"DialedDirectly,omitempty"` } -func (m *TransparentProxyConfig) Reset() { *m = TransparentProxyConfig{} } -func (m *TransparentProxyConfig) String() string { return proto.CompactTextString(m) } -func (*TransparentProxyConfig) ProtoMessage() {} +func (x *TransparentProxyConfig) Reset() { + *x = TransparentProxyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransparentProxyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparentProxyConfig) ProtoMessage() {} + +func (x *TransparentProxyConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparentProxyConfig.ProtoReflect.Descriptor instead. func (*TransparentProxyConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{6} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{6} } -func (m *TransparentProxyConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TransparentProxyConfig.Unmarshal(m, b) -} -func (m *TransparentProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TransparentProxyConfig.Marshal(b, m, deterministic) -} -func (m *TransparentProxyConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransparentProxyConfig.Merge(m, src) -} -func (m *TransparentProxyConfig) XXX_Size() int { - return xxx_messageInfo_TransparentProxyConfig.Size(m) -} -func (m *TransparentProxyConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TransparentProxyConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TransparentProxyConfig proto.InternalMessageInfo - -func (m *TransparentProxyConfig) GetOutboundListenerPort() int32 { - if m != nil { - return m.OutboundListenerPort +func (x *TransparentProxyConfig) GetOutboundListenerPort() int32 { + if x != nil { + return x.OutboundListenerPort } return 0 } -func (m *TransparentProxyConfig) GetDialedDirectly() bool { - if m != nil { - return m.DialedDirectly +func (x *TransparentProxyConfig) GetDialedDirectly() bool { + if x != nil { + return x.DialedDirectly } return false } @@ -659,6 +720,10 @@ func (m *TransparentProxyConfig) GetDialedDirectly() bool { // output=service.gen.go // name=Structs type ServiceDefinition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=structs.ServiceKind func-from=string Kind string `protobuf:"bytes,1,opt,name=Kind,proto3" json:"Kind,omitempty"` ID string `protobuf:"bytes,2,opt,name=ID,proto3" json:"ID,omitempty"` @@ -694,342 +759,660 @@ type ServiceDefinition struct { // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,17,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` // mog: func-to=ServiceConnectPtrToStructs func-from=NewServiceConnectPtrFromStructs - Connect *ServiceConnect `protobuf:"bytes,15,opt,name=Connect,proto3" json:"Connect,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Connect *ServiceConnect `protobuf:"bytes,15,opt,name=Connect,proto3" json:"Connect,omitempty"` } -func (m *ServiceDefinition) Reset() { *m = ServiceDefinition{} } -func (m *ServiceDefinition) String() string { return proto.CompactTextString(m) } -func (*ServiceDefinition) ProtoMessage() {} +func (x *ServiceDefinition) Reset() { + *x = ServiceDefinition{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDefinition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDefinition) ProtoMessage() {} + +func (x *ServiceDefinition) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDefinition.ProtoReflect.Descriptor instead. func (*ServiceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{7} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{7} } -func (m *ServiceDefinition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDefinition.Unmarshal(m, b) -} -func (m *ServiceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDefinition.Marshal(b, m, deterministic) -} -func (m *ServiceDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDefinition.Merge(m, src) -} -func (m *ServiceDefinition) XXX_Size() int { - return xxx_messageInfo_ServiceDefinition.Size(m) -} -func (m *ServiceDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDefinition proto.InternalMessageInfo - -func (m *ServiceDefinition) GetKind() string { - if m != nil { - return m.Kind +func (x *ServiceDefinition) GetKind() string { + if x != nil { + return x.Kind } return "" } -func (m *ServiceDefinition) GetID() string { - if m != nil { - return m.ID +func (x *ServiceDefinition) GetID() string { + if x != nil { + return x.ID } return "" } -func (m *ServiceDefinition) GetName() string { - if m != nil { - return m.Name +func (x *ServiceDefinition) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *ServiceDefinition) GetTags() []string { - if m != nil { - return m.Tags +func (x *ServiceDefinition) GetTags() []string { + if x != nil { + return x.Tags } return nil } -func (m *ServiceDefinition) GetAddress() string { - if m != nil { - return m.Address +func (x *ServiceDefinition) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *ServiceDefinition) GetTaggedAddresses() map[string]*ServiceAddress { - if m != nil { - return m.TaggedAddresses +func (x *ServiceDefinition) GetTaggedAddresses() map[string]*ServiceAddress { + if x != nil { + return x.TaggedAddresses } return nil } -func (m *ServiceDefinition) GetMeta() map[string]string { - if m != nil { - return m.Meta +func (x *ServiceDefinition) GetMeta() map[string]string { + if x != nil { + return x.Meta } return nil } -func (m *ServiceDefinition) GetPort() int32 { - if m != nil { - return m.Port +func (x *ServiceDefinition) GetPort() int32 { + if x != nil { + return x.Port } return 0 } -func (m *ServiceDefinition) GetSocketPath() string { - if m != nil { - return m.SocketPath +func (x *ServiceDefinition) GetSocketPath() string { + if x != nil { + return x.SocketPath } return "" } -func (m *ServiceDefinition) GetCheck() *CheckType { - if m != nil { - return m.Check +func (x *ServiceDefinition) GetCheck() *CheckType { + if x != nil { + return x.Check } return nil } -func (m *ServiceDefinition) GetChecks() []*CheckType { - if m != nil { - return m.Checks +func (x *ServiceDefinition) GetChecks() []*CheckType { + if x != nil { + return x.Checks } return nil } -func (m *ServiceDefinition) GetWeights() *Weights { - if m != nil { - return m.Weights +func (x *ServiceDefinition) GetWeights() *Weights { + if x != nil { + return x.Weights } return nil } -func (m *ServiceDefinition) GetToken() string { - if m != nil { - return m.Token +func (x *ServiceDefinition) GetToken() string { + if x != nil { + return x.Token } return "" } -func (m *ServiceDefinition) GetEnableTagOverride() bool { - if m != nil { - return m.EnableTagOverride +func (x *ServiceDefinition) GetEnableTagOverride() bool { + if x != nil { + return x.EnableTagOverride } return false } -func (m *ServiceDefinition) GetProxy() *ConnectProxyConfig { - if m != nil { - return m.Proxy +func (x *ServiceDefinition) GetProxy() *ConnectProxyConfig { + if x != nil { + return x.Proxy } return nil } -func (m *ServiceDefinition) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { - if m != nil { - return m.EnterpriseMeta +func (x *ServiceDefinition) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if x != nil { + return x.EnterpriseMeta } return nil } -func (m *ServiceDefinition) GetConnect() *ServiceConnect { - if m != nil { - return m.Connect +func (x *ServiceDefinition) GetConnect() *ServiceConnect { + if x != nil { + return x.Connect } return nil } // Type to hold an address and port of a service type ServiceAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Address string `protobuf:"bytes,1,opt,name=Address,proto3" json:"Address,omitempty"` // mog: func-to=int func-from=int32 - Port int32 `protobuf:"varint,2,opt,name=Port,proto3" json:"Port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Port int32 `protobuf:"varint,2,opt,name=Port,proto3" json:"Port,omitempty"` } -func (m *ServiceAddress) Reset() { *m = ServiceAddress{} } -func (m *ServiceAddress) String() string { return proto.CompactTextString(m) } -func (*ServiceAddress) ProtoMessage() {} +func (x *ServiceAddress) Reset() { + *x = ServiceAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceAddress) ProtoMessage() {} + +func (x *ServiceAddress) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceAddress.ProtoReflect.Descriptor instead. func (*ServiceAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{8} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{8} } -func (m *ServiceAddress) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceAddress.Unmarshal(m, b) -} -func (m *ServiceAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceAddress.Marshal(b, m, deterministic) -} -func (m *ServiceAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceAddress.Merge(m, src) -} -func (m *ServiceAddress) XXX_Size() int { - return xxx_messageInfo_ServiceAddress.Size(m) -} -func (m *ServiceAddress) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceAddress proto.InternalMessageInfo - -func (m *ServiceAddress) GetAddress() string { - if m != nil { - return m.Address +func (x *ServiceAddress) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *ServiceAddress) GetPort() int32 { - if m != nil { - return m.Port +func (x *ServiceAddress) GetPort() int32 { + if x != nil { + return x.Port } return 0 } // Weights represent the weight used by DNS for a given status type Weights struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // mog: func-to=int func-from=int32 Passing int32 `protobuf:"varint,1,opt,name=Passing,proto3" json:"Passing,omitempty"` // mog: func-to=int func-from=int32 - Warning int32 `protobuf:"varint,2,opt,name=Warning,proto3" json:"Warning,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Warning int32 `protobuf:"varint,2,opt,name=Warning,proto3" json:"Warning,omitempty"` } -func (m *Weights) Reset() { *m = Weights{} } -func (m *Weights) String() string { return proto.CompactTextString(m) } -func (*Weights) ProtoMessage() {} +func (x *Weights) Reset() { + *x = Weights{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Weights) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Weights) ProtoMessage() {} + +func (x *Weights) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Weights.ProtoReflect.Descriptor instead. func (*Weights) Descriptor() ([]byte, []int) { - return fileDescriptor_cbb99233b75fb80b, []int{9} + return file_proto_pbservice_service_proto_rawDescGZIP(), []int{9} } -func (m *Weights) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Weights.Unmarshal(m, b) -} -func (m *Weights) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Weights.Marshal(b, m, deterministic) -} -func (m *Weights) XXX_Merge(src proto.Message) { - xxx_messageInfo_Weights.Merge(m, src) -} -func (m *Weights) XXX_Size() int { - return xxx_messageInfo_Weights.Size(m) -} -func (m *Weights) XXX_DiscardUnknown() { - xxx_messageInfo_Weights.DiscardUnknown(m) -} - -var xxx_messageInfo_Weights proto.InternalMessageInfo - -func (m *Weights) GetPassing() int32 { - if m != nil { - return m.Passing +func (x *Weights) GetPassing() int32 { + if x != nil { + return x.Passing } return 0 } -func (m *Weights) GetWarning() int32 { - if m != nil { - return m.Warning +func (x *Weights) GetWarning() int32 { + if x != nil { + return x.Warning } return 0 } -func init() { - proto.RegisterType((*ConnectProxyConfig)(nil), "pbservice.ConnectProxyConfig") - proto.RegisterType((*Upstream)(nil), "pbservice.Upstream") - proto.RegisterType((*ServiceConnect)(nil), "pbservice.ServiceConnect") - proto.RegisterType((*ExposeConfig)(nil), "pbservice.ExposeConfig") - proto.RegisterType((*ExposePath)(nil), "pbservice.ExposePath") - proto.RegisterType((*MeshGatewayConfig)(nil), "pbservice.MeshGatewayConfig") - proto.RegisterType((*TransparentProxyConfig)(nil), "pbservice.TransparentProxyConfig") - proto.RegisterType((*ServiceDefinition)(nil), "pbservice.ServiceDefinition") - proto.RegisterMapType((map[string]string)(nil), "pbservice.ServiceDefinition.MetaEntry") - proto.RegisterMapType((map[string]*ServiceAddress)(nil), "pbservice.ServiceDefinition.TaggedAddressesEntry") - proto.RegisterType((*ServiceAddress)(nil), "pbservice.ServiceAddress") - proto.RegisterType((*Weights)(nil), "pbservice.Weights") +var File_proto_pbservice_service_proto protoreflect.FileDescriptor + +var file_proto_pbservice_service_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x09, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xce, 0x04, 0x0a, 0x12, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x36, 0x0a, 0x16, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x16, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x44, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x13, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x0a, 0x09, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x09, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x3e, 0x0a, + 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, + 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x2f, 0x0a, + 0x06, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x12, 0x36, 0x0a, 0x16, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x16, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x22, 0xbf, 0x04, 0x0a, 0x08, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x32, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x44, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, + 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x2a, + 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, + 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x2f, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x12, 0x30, 0x0a, 0x13, 0x43, 0x65, 0x6e, 0x74, 0x72, 0x61, 0x6c, 0x6c, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x43, 0x65, 0x6e, 0x74, 0x72, 0x61, 0x6c, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, + 0x6e, 0x64, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6e, 0x0a, 0x0e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x4e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x44, 0x0a, 0x0e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x53, 0x69, 0x64, + 0x65, 0x63, 0x61, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x53, 0x0a, 0x0c, 0x45, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, + 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x22, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x50, 0x61, 0x74, 0x68, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x61, 0x72, + 0x73, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0f, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x22, 0x27, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x74, 0x0a, 0x16, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x14, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x44, 0x69, + 0x61, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6c, 0x79, 0x22, 0xd4, 0x06, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, + 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5b, + 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x04, 0x4d, + 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x05, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2c, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x3e, 0x0a, 0x0e, + 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x3d, 0x0a, 0x07, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, + 0x0a, 0x07, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x07, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbservice/service.proto", fileDescriptor_cbb99233b75fb80b) +var ( + file_proto_pbservice_service_proto_rawDescOnce sync.Once + file_proto_pbservice_service_proto_rawDescData = file_proto_pbservice_service_proto_rawDesc +) + +func file_proto_pbservice_service_proto_rawDescGZIP() []byte { + file_proto_pbservice_service_proto_rawDescOnce.Do(func() { + file_proto_pbservice_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbservice_service_proto_rawDescData) + }) + return file_proto_pbservice_service_proto_rawDescData } -var fileDescriptor_cbb99233b75fb80b = []byte{ - // 1086 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xef, 0x6e, 0x1b, 0x45, - 0x10, 0x97, 0xff, 0xdb, 0x93, 0xe0, 0x24, 0x9b, 0x10, 0x8e, 0xd0, 0xa2, 0xf4, 0x84, 0x4a, 0xd4, - 0x06, 0xbb, 0x4d, 0x24, 0x40, 0x95, 0x88, 0x44, 0xe3, 0x80, 0x2a, 0x9a, 0xd6, 0xba, 0x18, 0x55, - 0x02, 0xf1, 0x61, 0x7d, 0xb7, 0xb1, 0x4f, 0xb9, 0xec, 0x59, 0xbb, 0xeb, 0x50, 0xbf, 0x15, 0x6f, - 0xc0, 0x1b, 0xf0, 0x89, 0x07, 0x42, 0x3b, 0xbb, 0x77, 0x59, 0xdf, 0x1d, 0x15, 0x7c, 0xf2, 0xee, - 0xfc, 0xe6, 0x37, 0xbb, 0x9e, 0xf9, 0xcd, 0xec, 0xc1, 0xc3, 0x85, 0x48, 0x55, 0x3a, 0x5c, 0x4c, - 0x25, 0x13, 0x77, 0x71, 0xc8, 0x86, 0xf6, 0x77, 0x80, 0x76, 0xd2, 0xcb, 0x81, 0x83, 0x07, 0xb3, - 0x34, 0x9d, 0x25, 0x6c, 0x88, 0xc0, 0x74, 0x79, 0x3d, 0x94, 0x4a, 0x2c, 0x43, 0x65, 0x1c, 0x0f, - 0x3e, 0xcb, 0xe2, 0x84, 0xe9, 0xed, 0x6d, 0xca, 0x87, 0xe6, 0xc7, 0x82, 0x8f, 0x8a, 0x87, 0xcc, - 0x19, 0x4d, 0xd4, 0x3c, 0x9c, 0xb3, 0xf0, 0xc6, 0xb8, 0xf8, 0x7f, 0x35, 0x81, 0x9c, 0xa7, 0x9c, - 0xb3, 0x50, 0x8d, 0x45, 0xfa, 0x7e, 0x75, 0x9e, 0xf2, 0xeb, 0x78, 0x46, 0xbe, 0x86, 0xfd, 0x11, - 0x93, 0x2a, 0xe6, 0x54, 0xc5, 0x29, 0xbf, 0x32, 0xf4, 0x37, 0xf4, 0x96, 0x79, 0xb5, 0xc3, 0xda, - 0x51, 0x2f, 0xf8, 0x17, 0x94, 0x9c, 0xc0, 0x5e, 0x19, 0x79, 0x35, 0xf2, 0xea, 0xc8, 0xaa, 0xc4, - 0xc8, 0x33, 0xd8, 0x7d, 0x9d, 0x86, 0x34, 0xb1, 0x96, 0xef, 0xa3, 0x48, 0x30, 0x29, 0xbd, 0x06, - 0x52, 0xaa, 0x20, 0xf2, 0x04, 0xb6, 0x5d, 0xf3, 0x38, 0x15, 0xca, 0x6b, 0x1e, 0xd6, 0x8e, 0x5a, - 0x41, 0xc9, 0x4e, 0x86, 0xd0, 0x36, 0xff, 0xc9, 0x6b, 0x1d, 0xd6, 0x8e, 0x36, 0x4e, 0x3e, 0x19, - 0x98, 0x7c, 0x0e, 0xb2, 0x7c, 0x0e, 0xae, 0x30, 0x9f, 0x81, 0x75, 0x23, 0xcf, 0xa1, 0xf7, 0xf3, - 0x42, 0x2a, 0xc1, 0xe8, 0xad, 0xf4, 0xda, 0x87, 0x8d, 0xa3, 0x8d, 0x93, 0xdd, 0x41, 0x9e, 0xc2, - 0x41, 0x86, 0x05, 0xf7, 0x5e, 0xe4, 0x0c, 0x36, 0x2e, 0x99, 0x9c, 0xff, 0x48, 0x15, 0xfb, 0x9d, - 0xae, 0xbc, 0x0e, 0x1e, 0xf4, 0xc0, 0x21, 0x39, 0xa8, 0x39, 0x25, 0x70, 0x09, 0xfa, 0x8e, 0x17, - 0xef, 0x17, 0xa9, 0x64, 0x5e, 0xd7, 0xde, 0xf1, 0x9e, 0x6a, 0x00, 0xcb, 0xb2, 0x6e, 0x84, 0x40, - 0xf3, 0x32, 0x8d, 0x98, 0xd7, 0xc3, 0x1c, 0xe1, 0x9a, 0x5c, 0xc2, 0xf6, 0x44, 0x50, 0x2e, 0x17, - 0x54, 0x30, 0x6e, 0x8a, 0xe9, 0x01, 0x86, 0x7b, 0xe4, 0x84, 0x2b, 0xba, 0xd8, 0xc0, 0x25, 0xaa, - 0x56, 0x80, 0x9b, 0xcb, 0xab, 0x34, 0xbc, 0x61, 0x6a, 0x4c, 0xd5, 0xdc, 0xdb, 0x30, 0x0a, 0xa8, - 0x46, 0xfd, 0x3f, 0x9b, 0xd0, 0xcd, 0x32, 0x43, 0x8e, 0x60, 0xcb, 0x29, 0xf9, 0x64, 0xb5, 0xc8, - 0xf4, 0x53, 0x34, 0x17, 0x84, 0xa3, 0xb5, 0x24, 0x17, 0x34, 0x64, 0x15, 0xc2, 0xc9, 0xb1, 0x42, - 0x74, 0x54, 0x67, 0xa3, 0x14, 0x1d, 0x65, 0xf9, 0x39, 0xc0, 0x88, 0x2a, 0x1a, 0x32, 0xae, 0x98, - 0x40, 0xa9, 0xf4, 0x02, 0xc7, 0x92, 0x0b, 0xea, 0x65, 0xcc, 0xa3, 0x4c, 0x7f, 0x2d, 0xf4, 0x2a, - 0xd9, 0xc9, 0x17, 0xf0, 0x51, 0x6e, 0x43, 0xe5, 0xb5, 0x51, 0x79, 0xeb, 0x46, 0x47, 0x76, 0x9d, - 0xff, 0x26, 0xbb, 0x82, 0x86, 0xba, 0xff, 0x57, 0x43, 0xcf, 0x60, 0xf7, 0x9c, 0x71, 0x25, 0x68, - 0x92, 0x58, 0x7c, 0x29, 0x58, 0x84, 0x0a, 0xe9, 0x06, 0x55, 0x50, 0xde, 0x77, 0xfa, 0xce, 0x4e, - 0x79, 0xc1, 0xe9, 0xbb, 0x75, 0xa8, 0x82, 0x81, 0x2a, 0xdc, 0xa8, 0x64, 0xa0, 0x28, 0xd7, 0xcb, - 0x3a, 0xa6, 0x42, 0xc5, 0x7a, 0xe1, 0x6d, 0x96, 0xca, 0x9a, 0x63, 0x3e, 0x87, 0xbe, 0x95, 0x95, - 0x1d, 0x4c, 0x64, 0x1f, 0xda, 0x6f, 0xa8, 0x8a, 0xef, 0x8c, 0x7a, 0xba, 0x81, 0xdd, 0x91, 0x11, - 0xf4, 0xaf, 0xe2, 0x88, 0x85, 0x54, 0x58, 0x02, 0xd6, 0x7f, 0x3d, 0x6d, 0x16, 0x19, 0xb1, 0xeb, - 0x98, 0x63, 0xfc, 0xa0, 0xc0, 0xf1, 0xaf, 0x60, 0xd3, 0x6d, 0x32, 0x7d, 0xda, 0xb9, 0x9e, 0x90, - 0x32, 0x3b, 0xcd, 0xec, 0xc8, 0x53, 0x68, 0xe9, 0x2c, 0x48, 0xaf, 0x8e, 0x43, 0xe1, 0xe3, 0x52, - 0x93, 0x6a, 0x34, 0x30, 0x3e, 0xfe, 0x1f, 0x35, 0x80, 0x7b, 0x2b, 0xf1, 0x61, 0xf3, 0x75, 0x2c, - 0x15, 0xe3, 0x4c, 0xa0, 0x66, 0x6a, 0xa8, 0x99, 0x35, 0x9b, 0x6e, 0x6a, 0x2c, 0x80, 0x91, 0x3c, - 0xae, 0x73, 0xb1, 0xe9, 0x0d, 0x12, 0x1b, 0x8e, 0xd8, 0x32, 0x23, 0x39, 0x80, 0xee, 0x58, 0xcb, - 0x2a, 0x4c, 0x13, 0x2b, 0xee, 0x7c, 0xaf, 0x9b, 0x64, 0x4c, 0x85, 0x64, 0xd1, 0x0f, 0x22, 0xbd, - 0xc5, 0x7f, 0x82, 0xca, 0xee, 0x06, 0x45, 0xb3, 0xff, 0x25, 0xec, 0x94, 0x34, 0x96, 0x4f, 0x9a, - 0xda, 0xfd, 0xa4, 0xf1, 0x15, 0xec, 0x57, 0x8f, 0x11, 0x5d, 0xee, 0xb7, 0x4b, 0x35, 0x4d, 0x97, - 0x3c, 0xaa, 0xf8, 0xbb, 0x95, 0x18, 0x79, 0x0c, 0xfd, 0x51, 0x4c, 0x13, 0x16, 0x8d, 0x62, 0xc1, - 0x42, 0x95, 0xac, 0x30, 0x01, 0xdd, 0xa0, 0x60, 0xf5, 0xff, 0x6e, 0xc3, 0x4e, 0xa9, 0x98, 0xfa, - 0x7e, 0x3f, 0xc5, 0x3c, 0xca, 0xee, 0xa7, 0xd7, 0xa4, 0x0f, 0xf5, 0xfc, 0xc9, 0xa9, 0xbf, 0x1a, - 0x69, 0x1f, 0x67, 0x38, 0xe0, 0x5a, 0xdb, 0x26, 0x74, 0x26, 0xbd, 0xe6, 0x61, 0x43, 0xdb, 0xf4, - 0x9a, 0x78, 0xd0, 0x59, 0x6f, 0xfe, 0x6c, 0x4b, 0x7e, 0x85, 0xad, 0x09, 0x9d, 0xcd, 0x58, 0x36, - 0x04, 0x98, 0xf4, 0xb6, 0x51, 0x04, 0xcf, 0x3f, 0xa4, 0xb4, 0x41, 0x81, 0x73, 0xc1, 0x95, 0x58, - 0x05, 0xc5, 0x48, 0xe4, 0x05, 0x34, 0x2f, 0x99, 0xa2, 0xf6, 0xad, 0x79, 0xfc, 0xc1, 0x88, 0xda, - 0xd1, 0x84, 0x41, 0x0e, 0x6a, 0x46, 0x27, 0xb8, 0x83, 0x09, 0xc6, 0xb5, 0x1e, 0x76, 0x4e, 0x3b, - 0x13, 0x33, 0xec, 0x9c, 0x2e, 0x7e, 0x02, 0x2d, 0xa3, 0x03, 0x33, 0x63, 0xf6, 0x9c, 0x03, 0xd1, - 0xae, 0xe7, 0x71, 0x60, 0x5c, 0xc8, 0x71, 0xde, 0x0b, 0x3d, 0xbc, 0x5d, 0xb5, 0x73, 0xd6, 0x21, - 0xc7, 0xd0, 0x79, 0xc7, 0xe2, 0xd9, 0x5c, 0x49, 0xfb, 0xf2, 0x10, 0xc7, 0xdd, 0x22, 0x41, 0xe6, - 0x42, 0xf6, 0xa0, 0x35, 0x49, 0x6f, 0x18, 0xb7, 0xf3, 0xc3, 0x6c, 0xc8, 0x31, 0xec, 0x5c, 0x70, - 0x3a, 0x4d, 0xd8, 0x84, 0xce, 0xde, 0xde, 0x31, 0x21, 0xe2, 0x88, 0xe1, 0xb8, 0xe8, 0x06, 0x65, - 0x80, 0x9c, 0x42, 0xcb, 0xbc, 0x74, 0x7d, 0x3c, 0xef, 0xa1, 0x7b, 0xbd, 0xd2, 0x57, 0x4d, 0x60, - 0x7c, 0xc9, 0x19, 0xf4, 0x2f, 0xf4, 0xd8, 0x5f, 0x88, 0x58, 0x32, 0x4c, 0xfd, 0x0e, 0xb2, 0xf7, - 0x07, 0xf6, 0xeb, 0x69, 0x1d, 0x0d, 0x0a, 0xde, 0xe4, 0x14, 0x3a, 0x36, 0xb8, 0xb7, 0x85, 0xc4, - 0x4f, 0xcb, 0x35, 0xb3, 0x0e, 0x41, 0xe6, 0x79, 0xf0, 0x1b, 0xec, 0x55, 0xc9, 0x81, 0x6c, 0x43, - 0xe3, 0x86, 0xad, 0xac, 0x7e, 0xf5, 0x92, 0x0c, 0xa1, 0x75, 0x47, 0x93, 0xa5, 0x79, 0xfb, 0x2a, - 0x83, 0xdb, 0x10, 0x81, 0xf1, 0x7b, 0x51, 0xff, 0xb6, 0x76, 0xf0, 0x0d, 0xf4, 0x72, 0x6d, 0x54, - 0xc4, 0xdc, 0x73, 0x63, 0xf6, 0x1c, 0xa2, 0x7f, 0x96, 0x4f, 0xdb, 0x4c, 0xec, 0x4e, 0x1b, 0xd4, - 0xd6, 0xdb, 0x20, 0x53, 0x5b, 0xfd, 0x5e, 0x6d, 0xfe, 0x77, 0x79, 0xcd, 0x35, 0x71, 0x4c, 0xa5, - 0x8c, 0xf9, 0xcc, 0x36, 0x7c, 0xb6, 0xd5, 0xc8, 0x3b, 0x2a, 0xb8, 0x46, 0x0c, 0x37, 0xdb, 0xbe, - 0xfc, 0xea, 0x97, 0xa7, 0xb3, 0x58, 0xcd, 0x97, 0x53, 0x9d, 0xfb, 0xe1, 0x9c, 0xca, 0x79, 0x1c, - 0xa6, 0x62, 0x31, 0x0c, 0x53, 0x2e, 0x97, 0xc9, 0xb0, 0xf0, 0x01, 0x3b, 0x6d, 0xa3, 0xe1, 0xf4, - 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0x23, 0xd7, 0x20, 0x3f, 0x0b, 0x00, 0x00, +var file_proto_pbservice_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_proto_pbservice_service_proto_goTypes = []interface{}{ + (*ConnectProxyConfig)(nil), // 0: pbservice.ConnectProxyConfig + (*Upstream)(nil), // 1: pbservice.Upstream + (*ServiceConnect)(nil), // 2: pbservice.ServiceConnect + (*ExposeConfig)(nil), // 3: pbservice.ExposeConfig + (*ExposePath)(nil), // 4: pbservice.ExposePath + (*MeshGatewayConfig)(nil), // 5: pbservice.MeshGatewayConfig + (*TransparentProxyConfig)(nil), // 6: pbservice.TransparentProxyConfig + (*ServiceDefinition)(nil), // 7: pbservice.ServiceDefinition + (*ServiceAddress)(nil), // 8: pbservice.ServiceAddress + (*Weights)(nil), // 9: pbservice.Weights + nil, // 10: pbservice.ServiceDefinition.TaggedAddressesEntry + nil, // 11: pbservice.ServiceDefinition.MetaEntry + (*structpb.Struct)(nil), // 12: google.protobuf.Struct + (*CheckType)(nil), // 13: pbservice.CheckType + (*pbcommon.EnterpriseMeta)(nil), // 14: common.EnterpriseMeta +} +var file_proto_pbservice_service_proto_depIdxs = []int32{ + 12, // 0: pbservice.ConnectProxyConfig.Config:type_name -> google.protobuf.Struct + 1, // 1: pbservice.ConnectProxyConfig.Upstreams:type_name -> pbservice.Upstream + 5, // 2: pbservice.ConnectProxyConfig.MeshGateway:type_name -> pbservice.MeshGatewayConfig + 3, // 3: pbservice.ConnectProxyConfig.Expose:type_name -> pbservice.ExposeConfig + 6, // 4: pbservice.ConnectProxyConfig.TransparentProxy:type_name -> pbservice.TransparentProxyConfig + 12, // 5: pbservice.Upstream.Config:type_name -> google.protobuf.Struct + 5, // 6: pbservice.Upstream.MeshGateway:type_name -> pbservice.MeshGatewayConfig + 7, // 7: pbservice.ServiceConnect.SidecarService:type_name -> pbservice.ServiceDefinition + 4, // 8: pbservice.ExposeConfig.Paths:type_name -> pbservice.ExposePath + 10, // 9: pbservice.ServiceDefinition.TaggedAddresses:type_name -> pbservice.ServiceDefinition.TaggedAddressesEntry + 11, // 10: pbservice.ServiceDefinition.Meta:type_name -> pbservice.ServiceDefinition.MetaEntry + 13, // 11: pbservice.ServiceDefinition.Check:type_name -> pbservice.CheckType + 13, // 12: pbservice.ServiceDefinition.Checks:type_name -> pbservice.CheckType + 9, // 13: pbservice.ServiceDefinition.Weights:type_name -> pbservice.Weights + 0, // 14: pbservice.ServiceDefinition.Proxy:type_name -> pbservice.ConnectProxyConfig + 14, // 15: pbservice.ServiceDefinition.EnterpriseMeta:type_name -> common.EnterpriseMeta + 2, // 16: pbservice.ServiceDefinition.Connect:type_name -> pbservice.ServiceConnect + 8, // 17: pbservice.ServiceDefinition.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_proto_pbservice_service_proto_init() } +func file_proto_pbservice_service_proto_init() { + if File_proto_pbservice_service_proto != nil { + return + } + file_proto_pbservice_healthcheck_proto_init() + if !protoimpl.UnsafeEnabled { + file_proto_pbservice_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectProxyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Upstream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceConnect); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExposeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExposePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MeshGatewayConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransparentProxyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDefinition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Weights); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbservice_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbservice_service_proto_goTypes, + DependencyIndexes: file_proto_pbservice_service_proto_depIdxs, + MessageInfos: file_proto_pbservice_service_proto_msgTypes, + }.Build() + File_proto_pbservice_service_proto = out.File + file_proto_pbservice_service_proto_rawDesc = nil + file_proto_pbservice_service_proto_goTypes = nil + file_proto_pbservice_service_proto_depIdxs = nil } diff --git a/proto/pbsubscribe/subscribe.pb.go b/proto/pbsubscribe/subscribe.pb.go index 9df5bba25..851991ed0 100644 --- a/proto/pbsubscribe/subscribe.pb.go +++ b/proto/pbsubscribe/subscribe.pb.go @@ -1,29 +1,37 @@ +// +//Package event provides a service for subscribing to state change events. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 // source: proto/pbsubscribe/subscribe.proto package pbsubscribe import ( context "context" - fmt "fmt" proto "github.com/golang/protobuf/proto" pbservice "github.com/hashicorp/consul/proto/pbservice" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Topic enumerates the supported event topics. type Topic int32 @@ -37,24 +45,45 @@ const ( Topic_ServiceHealthConnect Topic = 2 ) -var Topic_name = map[int32]string{ - 0: "Unknown", - 1: "ServiceHealth", - 2: "ServiceHealthConnect", -} +// Enum value maps for Topic. +var ( + Topic_name = map[int32]string{ + 0: "Unknown", + 1: "ServiceHealth", + 2: "ServiceHealthConnect", + } + Topic_value = map[string]int32{ + "Unknown": 0, + "ServiceHealth": 1, + "ServiceHealthConnect": 2, + } +) -var Topic_value = map[string]int32{ - "Unknown": 0, - "ServiceHealth": 1, - "ServiceHealthConnect": 2, +func (x Topic) Enum() *Topic { + p := new(Topic) + *p = x + return p } func (x Topic) String() string { - return proto.EnumName(Topic_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (Topic) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbsubscribe_subscribe_proto_enumTypes[0].Descriptor() +} + +func (Topic) Type() protoreflect.EnumType { + return &file_proto_pbsubscribe_subscribe_proto_enumTypes[0] +} + +func (x Topic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Topic.Descriptor instead. func (Topic) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{0} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{0} } type CatalogOp int32 @@ -64,26 +93,51 @@ const ( CatalogOp_Deregister CatalogOp = 1 ) -var CatalogOp_name = map[int32]string{ - 0: "Register", - 1: "Deregister", -} +// Enum value maps for CatalogOp. +var ( + CatalogOp_name = map[int32]string{ + 0: "Register", + 1: "Deregister", + } + CatalogOp_value = map[string]int32{ + "Register": 0, + "Deregister": 1, + } +) -var CatalogOp_value = map[string]int32{ - "Register": 0, - "Deregister": 1, +func (x CatalogOp) Enum() *CatalogOp { + p := new(CatalogOp) + *p = x + return p } func (x CatalogOp) String() string { - return proto.EnumName(CatalogOp_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (CatalogOp) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbsubscribe_subscribe_proto_enumTypes[1].Descriptor() +} + +func (CatalogOp) Type() protoreflect.EnumType { + return &file_proto_pbsubscribe_subscribe_proto_enumTypes[1] +} + +func (x CatalogOp) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CatalogOp.Descriptor instead. func (CatalogOp) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{1} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{1} } // SubscribeRequest used to subscribe to a topic. type SubscribeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Topic identifies the set of events the subscriber is interested in. Topic Topic `protobuf:"varint,1,opt,name=Topic,proto3,enum=subscribe.Topic" json:"Topic,omitempty"` // Key is a topic-specific identifier that restricts the scope of the @@ -115,82 +169,86 @@ type SubscribeRequest struct { // default partition will be used. // // Partition is an enterprise-only feature. - Partition string `protobuf:"bytes,7,opt,name=Partition,proto3" json:"Partition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Partition string `protobuf:"bytes,7,opt,name=Partition,proto3" json:"Partition,omitempty"` } -func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} } -func (m *SubscribeRequest) String() string { return proto.CompactTextString(m) } -func (*SubscribeRequest) ProtoMessage() {} +func (x *SubscribeRequest) Reset() { + *x = SubscribeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeRequest) ProtoMessage() {} + +func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead. func (*SubscribeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{0} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{0} } -func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SubscribeRequest.Unmarshal(m, b) -} -func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) -} -func (m *SubscribeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubscribeRequest.Merge(m, src) -} -func (m *SubscribeRequest) XXX_Size() int { - return xxx_messageInfo_SubscribeRequest.Size(m) -} -func (m *SubscribeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SubscribeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo - -func (m *SubscribeRequest) GetTopic() Topic { - if m != nil { - return m.Topic +func (x *SubscribeRequest) GetTopic() Topic { + if x != nil { + return x.Topic } return Topic_Unknown } -func (m *SubscribeRequest) GetKey() string { - if m != nil { - return m.Key +func (x *SubscribeRequest) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *SubscribeRequest) GetToken() string { - if m != nil { - return m.Token +func (x *SubscribeRequest) GetToken() string { + if x != nil { + return x.Token } return "" } -func (m *SubscribeRequest) GetIndex() uint64 { - if m != nil { - return m.Index +func (x *SubscribeRequest) GetIndex() uint64 { + if x != nil { + return x.Index } return 0 } -func (m *SubscribeRequest) GetDatacenter() string { - if m != nil { - return m.Datacenter +func (x *SubscribeRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter } return "" } -func (m *SubscribeRequest) GetNamespace() string { - if m != nil { - return m.Namespace +func (x *SubscribeRequest) GetNamespace() string { + if x != nil { + return x.Namespace } return "" } -func (m *SubscribeRequest) GetPartition() string { - if m != nil { - return m.Partition +func (x *SubscribeRequest) GetPartition() string { + if x != nil { + return x.Partition } return "" } @@ -199,6 +257,10 @@ func (m *SubscribeRequest) GetPartition() string { // describe the current "snapshot" of the result as well as ongoing mutations to // that snapshot. type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Index is the raft index at which the mutation took place. At the top // level of a subscription there will always be at most one Event per index. // If multiple events are published to the same topic in a single raft @@ -207,66 +269,118 @@ type Event struct { Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` // Payload is the actual event content. // - // Types that are valid to be assigned to Payload: + // Types that are assignable to Payload: // *Event_EndOfSnapshot // *Event_NewSnapshotToFollow // *Event_EventBatch // *Event_ServiceHealth - Payload isEvent_Payload `protobuf_oneof:"Payload"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Payload isEvent_Payload `protobuf_oneof:"Payload"` } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{1} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{1} } -func (m *Event) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Event.Unmarshal(m, b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return xxx_messageInfo_Event.Size(m) -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *Event) GetIndex() uint64 { - if m != nil { - return m.Index +func (x *Event) GetIndex() uint64 { + if x != nil { + return x.Index } return 0 } +func (m *Event) GetPayload() isEvent_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *Event) GetEndOfSnapshot() bool { + if x, ok := x.GetPayload().(*Event_EndOfSnapshot); ok { + return x.EndOfSnapshot + } + return false +} + +func (x *Event) GetNewSnapshotToFollow() bool { + if x, ok := x.GetPayload().(*Event_NewSnapshotToFollow); ok { + return x.NewSnapshotToFollow + } + return false +} + +func (x *Event) GetEventBatch() *EventBatch { + if x, ok := x.GetPayload().(*Event_EventBatch); ok { + return x.EventBatch + } + return nil +} + +func (x *Event) GetServiceHealth() *ServiceHealthUpdate { + if x, ok := x.GetPayload().(*Event_ServiceHealth); ok { + return x.ServiceHealth + } + return nil +} + type isEvent_Payload interface { isEvent_Payload() } type Event_EndOfSnapshot struct { + // EndOfSnapshot indicates the event stream for the initial snapshot has + // ended. Subsequent Events delivered will be mutations to that result. EndOfSnapshot bool `protobuf:"varint,2,opt,name=EndOfSnapshot,proto3,oneof"` } type Event_NewSnapshotToFollow struct { + // NewSnapshotToFollow indicates that the client view is stale. The client + // must reset its view before handing any more events. Subsequent events + // in the stream will be for a new snapshot until an EndOfSnapshot event + // is received. NewSnapshotToFollow bool `protobuf:"varint,3,opt,name=NewSnapshotToFollow,proto3,oneof"` } type Event_EventBatch struct { + // EventBatch is a set of events. This is typically used as the payload + // type where multiple events are emitted in a single topic and raft + // index (e.g. transactional updates). In this case the Topic and Index + // values of all events will match and the whole set should be delivered + // and consumed atomically. EventBatch *EventBatch `protobuf:"bytes,4,opt,name=EventBatch,proto3,oneof"` } type Event_ServiceHealth struct { + // ServiceHealth is used for ServiceHealth and ServiceHealthConnect + // topics. ServiceHealth *ServiceHealthUpdate `protobuf:"bytes,10,opt,name=ServiceHealth,proto3,oneof"` } @@ -278,185 +392,295 @@ func (*Event_EventBatch) isEvent_Payload() {} func (*Event_ServiceHealth) isEvent_Payload() {} -func (m *Event) GetPayload() isEvent_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *Event) GetEndOfSnapshot() bool { - if x, ok := m.GetPayload().(*Event_EndOfSnapshot); ok { - return x.EndOfSnapshot - } - return false -} - -func (m *Event) GetNewSnapshotToFollow() bool { - if x, ok := m.GetPayload().(*Event_NewSnapshotToFollow); ok { - return x.NewSnapshotToFollow - } - return false -} - -func (m *Event) GetEventBatch() *EventBatch { - if x, ok := m.GetPayload().(*Event_EventBatch); ok { - return x.EventBatch - } - return nil -} - -func (m *Event) GetServiceHealth() *ServiceHealthUpdate { - if x, ok := m.GetPayload().(*Event_ServiceHealth); ok { - return x.ServiceHealth - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Event) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Event_EndOfSnapshot)(nil), - (*Event_NewSnapshotToFollow)(nil), - (*Event_EventBatch)(nil), - (*Event_ServiceHealth)(nil), - } -} - type EventBatch struct { - Events []*Event `protobuf:"bytes,1,rep,name=Events,proto3" json:"Events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Events []*Event `protobuf:"bytes,1,rep,name=Events,proto3" json:"Events,omitempty"` } -func (m *EventBatch) Reset() { *m = EventBatch{} } -func (m *EventBatch) String() string { return proto.CompactTextString(m) } -func (*EventBatch) ProtoMessage() {} +func (x *EventBatch) Reset() { + *x = EventBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventBatch) ProtoMessage() {} + +func (x *EventBatch) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventBatch.ProtoReflect.Descriptor instead. func (*EventBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{2} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{2} } -func (m *EventBatch) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EventBatch.Unmarshal(m, b) -} -func (m *EventBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EventBatch.Marshal(b, m, deterministic) -} -func (m *EventBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventBatch.Merge(m, src) -} -func (m *EventBatch) XXX_Size() int { - return xxx_messageInfo_EventBatch.Size(m) -} -func (m *EventBatch) XXX_DiscardUnknown() { - xxx_messageInfo_EventBatch.DiscardUnknown(m) -} - -var xxx_messageInfo_EventBatch proto.InternalMessageInfo - -func (m *EventBatch) GetEvents() []*Event { - if m != nil { - return m.Events +func (x *EventBatch) GetEvents() []*Event { + if x != nil { + return x.Events } return nil } type ServiceHealthUpdate struct { - Op CatalogOp `protobuf:"varint,1,opt,name=Op,proto3,enum=subscribe.CatalogOp" json:"Op,omitempty"` - CheckServiceNode *pbservice.CheckServiceNode `protobuf:"bytes,2,opt,name=CheckServiceNode,proto3" json:"CheckServiceNode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Op CatalogOp `protobuf:"varint,1,opt,name=Op,proto3,enum=subscribe.CatalogOp" json:"Op,omitempty"` + CheckServiceNode *pbservice.CheckServiceNode `protobuf:"bytes,2,opt,name=CheckServiceNode,proto3" json:"CheckServiceNode,omitempty"` } -func (m *ServiceHealthUpdate) Reset() { *m = ServiceHealthUpdate{} } -func (m *ServiceHealthUpdate) String() string { return proto.CompactTextString(m) } -func (*ServiceHealthUpdate) ProtoMessage() {} +func (x *ServiceHealthUpdate) Reset() { + *x = ServiceHealthUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceHealthUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceHealthUpdate) ProtoMessage() {} + +func (x *ServiceHealthUpdate) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceHealthUpdate.ProtoReflect.Descriptor instead. func (*ServiceHealthUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_ab3eb8c810e315fb, []int{3} + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{3} } -func (m *ServiceHealthUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceHealthUpdate.Unmarshal(m, b) -} -func (m *ServiceHealthUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceHealthUpdate.Marshal(b, m, deterministic) -} -func (m *ServiceHealthUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceHealthUpdate.Merge(m, src) -} -func (m *ServiceHealthUpdate) XXX_Size() int { - return xxx_messageInfo_ServiceHealthUpdate.Size(m) -} -func (m *ServiceHealthUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceHealthUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceHealthUpdate proto.InternalMessageInfo - -func (m *ServiceHealthUpdate) GetOp() CatalogOp { - if m != nil { - return m.Op +func (x *ServiceHealthUpdate) GetOp() CatalogOp { + if x != nil { + return x.Op } return CatalogOp_Register } -func (m *ServiceHealthUpdate) GetCheckServiceNode() *pbservice.CheckServiceNode { - if m != nil { - return m.CheckServiceNode +func (x *ServiceHealthUpdate) GetCheckServiceNode() *pbservice.CheckServiceNode { + if x != nil { + return x.CheckServiceNode } return nil } -func init() { - proto.RegisterEnum("subscribe.Topic", Topic_name, Topic_value) - proto.RegisterEnum("subscribe.CatalogOp", CatalogOp_name, CatalogOp_value) - proto.RegisterType((*SubscribeRequest)(nil), "subscribe.SubscribeRequest") - proto.RegisterType((*Event)(nil), "subscribe.Event") - proto.RegisterType((*EventBatch)(nil), "subscribe.EventBatch") - proto.RegisterType((*ServiceHealthUpdate)(nil), "subscribe.ServiceHealthUpdate") +var File_proto_pbsubscribe_subscribe_proto protoreflect.FileDescriptor + +var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x1a, 0x1a, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x10, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x26, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, + 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x85, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x26, 0x0a, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, 0x4f, + 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, 0x0a, + 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, + 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x42, 0x09, + 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0x84, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, + 0x47, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x2a, 0x41, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, + 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x09, 0x43, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, + 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, + 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("proto/pbsubscribe/subscribe.proto", fileDescriptor_ab3eb8c810e315fb) +var ( + file_proto_pbsubscribe_subscribe_proto_rawDescOnce sync.Once + file_proto_pbsubscribe_subscribe_proto_rawDescData = file_proto_pbsubscribe_subscribe_proto_rawDesc +) + +func file_proto_pbsubscribe_subscribe_proto_rawDescGZIP() []byte { + file_proto_pbsubscribe_subscribe_proto_rawDescOnce.Do(func() { + file_proto_pbsubscribe_subscribe_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbsubscribe_subscribe_proto_rawDescData) + }) + return file_proto_pbsubscribe_subscribe_proto_rawDescData } -var fileDescriptor_ab3eb8c810e315fb = []byte{ - // 527 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6f, 0xda, 0x3c, - 0x14, 0xc6, 0xb4, 0x85, 0xe6, 0xf0, 0xb6, 0xca, 0xeb, 0x32, 0x2d, 0xa2, 0x53, 0xc5, 0xd0, 0x54, - 0xb1, 0x4a, 0x23, 0x13, 0x93, 0xb6, 0xbb, 0x49, 0x83, 0xb6, 0x63, 0x9a, 0x04, 0x55, 0x68, 0x2f, - 0xb6, 0x3b, 0xe3, 0x9c, 0x91, 0x88, 0xd4, 0xf6, 0x12, 0x53, 0xd6, 0xfb, 0xed, 0x1f, 0xee, 0x07, - 0x4d, 0x31, 0x21, 0x04, 0xe8, 0x9d, 0xcf, 0xf3, 0xe1, 0x63, 0x9f, 0x0f, 0x78, 0xa9, 0x62, 0xa9, - 0xa5, 0xab, 0x26, 0xc9, 0x7c, 0x92, 0xf0, 0x38, 0x9c, 0xa0, 0x9b, 0x9f, 0x3a, 0x86, 0xa3, 0x56, - 0x0e, 0x34, 0x1a, 0xb9, 0x1a, 0xe3, 0x87, 0x90, 0xa3, 0x2b, 0xa4, 0x9f, 0xc9, 0x5a, 0x7f, 0x09, - 0xd8, 0xe3, 0x95, 0xd2, 0xc3, 0x9f, 0x73, 0x4c, 0x34, 0x3d, 0x87, 0x83, 0x5b, 0xa9, 0x42, 0xee, - 0x90, 0x26, 0x69, 0x1f, 0x77, 0xed, 0xce, 0xfa, 0x72, 0x83, 0x7b, 0x4b, 0x9a, 0xda, 0xb0, 0xf7, - 0x15, 0x1f, 0x9d, 0x72, 0x93, 0xb4, 0x2d, 0x2f, 0x3d, 0xd2, 0x7a, 0xea, 0x9c, 0xa1, 0x70, 0xf6, - 0x0c, 0xb6, 0x0c, 0x52, 0xf4, 0x8b, 0xf0, 0xf1, 0x97, 0xb3, 0xdf, 0x24, 0xed, 0x7d, 0x6f, 0x19, - 0xd0, 0x33, 0x80, 0x4b, 0xa6, 0x19, 0x47, 0xa1, 0x31, 0x76, 0x0e, 0x8c, 0xa1, 0x80, 0xd0, 0x17, - 0x60, 0x0d, 0xd9, 0x3d, 0x26, 0x8a, 0x71, 0x74, 0x2a, 0x86, 0x5e, 0x03, 0x29, 0x7b, 0xc3, 0x62, - 0x1d, 0xea, 0x50, 0x0a, 0xa7, 0xba, 0x64, 0x73, 0xa0, 0xf5, 0xa7, 0x0c, 0x07, 0x57, 0x0f, 0x28, - 0xf4, 0x3a, 0x37, 0x29, 0xe6, 0x3e, 0x87, 0xa3, 0x2b, 0xe1, 0x8f, 0x7e, 0x8c, 0x05, 0x53, 0x49, - 0x20, 0xb5, 0xf9, 0xc3, 0xe1, 0xa0, 0xe4, 0x6d, 0xc2, 0xb4, 0x0b, 0x27, 0x43, 0x5c, 0xac, 0xc2, - 0x5b, 0x79, 0x2d, 0xa3, 0x48, 0x2e, 0xcc, 0xef, 0x52, 0xf5, 0x53, 0x24, 0xfd, 0x00, 0x60, 0x52, - 0xf7, 0x98, 0xe6, 0x81, 0xf9, 0x72, 0xad, 0xfb, 0xac, 0x50, 0xc2, 0x35, 0x39, 0x28, 0x79, 0x05, - 0x29, 0xbd, 0x86, 0xa3, 0xf1, 0xb2, 0x43, 0x03, 0x64, 0x91, 0x0e, 0x1c, 0x30, 0xde, 0xb3, 0x82, - 0x77, 0x83, 0xbf, 0x53, 0x3e, 0xd3, 0x98, 0x3e, 0x7a, 0x03, 0xee, 0x59, 0x50, 0xbd, 0x61, 0x8f, - 0x91, 0x64, 0x7e, 0xeb, 0x7d, 0xf1, 0x2d, 0xb4, 0x0d, 0x15, 0x13, 0x25, 0x0e, 0x69, 0xee, 0xb5, - 0x6b, 0x1b, 0x8d, 0x35, 0x84, 0x97, 0xf1, 0xad, 0xdf, 0x04, 0x4e, 0x9e, 0xc8, 0x45, 0x5f, 0x41, - 0x79, 0xa4, 0xb2, 0xb1, 0xa8, 0x17, 0xdc, 0x7d, 0xa6, 0x59, 0x24, 0xa7, 0x23, 0xe5, 0x95, 0x47, - 0x8a, 0x7e, 0x06, 0xbb, 0x1f, 0x20, 0x9f, 0x65, 0x37, 0x0c, 0xa5, 0x8f, 0xa6, 0xc0, 0xb5, 0xee, - 0x69, 0x27, 0x9f, 0xc2, 0xce, 0xb6, 0xc4, 0xdb, 0x31, 0x5d, 0x7c, 0xca, 0x06, 0x91, 0xd6, 0xa0, - 0x7a, 0x27, 0x66, 0x42, 0x2e, 0x84, 0x5d, 0xa2, 0xff, 0x6f, 0xd5, 0xc9, 0x26, 0xd4, 0x81, 0xfa, - 0x06, 0xd4, 0x97, 0x42, 0x20, 0xd7, 0x76, 0xf9, 0xe2, 0x35, 0x58, 0xf9, 0xe3, 0xe8, 0x7f, 0x70, - 0xe8, 0xe1, 0x34, 0x4c, 0x34, 0xc6, 0x76, 0x89, 0x1e, 0x03, 0x5c, 0x62, 0xbc, 0x8a, 0x49, 0xf7, - 0x1b, 0x3c, 0x1f, 0x6b, 0xa6, 0xb1, 0x1f, 0x30, 0x31, 0xc5, 0x6c, 0x2b, 0x54, 0x3a, 0x4f, 0xf4, - 0x23, 0x58, 0xf9, 0x96, 0xd0, 0xd3, 0x62, 0x43, 0xb6, 0x76, 0xa7, 0xb1, 0x53, 0xd3, 0x56, 0xe9, - 0x2d, 0xe9, 0xb9, 0xdf, 0xdf, 0x4c, 0x43, 0x1d, 0xcc, 0x27, 0x1d, 0x2e, 0xef, 0xdd, 0x80, 0x25, - 0x41, 0xc8, 0x65, 0xac, 0x5c, 0x2e, 0x45, 0x32, 0x8f, 0xdc, 0x9d, 0x75, 0x9e, 0x54, 0x0c, 0xf4, - 0xee, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xb3, 0x69, 0x51, 0xea, 0x03, 0x00, 0x00, +var file_proto_pbsubscribe_subscribe_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_proto_pbsubscribe_subscribe_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_proto_pbsubscribe_subscribe_proto_goTypes = []interface{}{ + (Topic)(0), // 0: subscribe.Topic + (CatalogOp)(0), // 1: subscribe.CatalogOp + (*SubscribeRequest)(nil), // 2: subscribe.SubscribeRequest + (*Event)(nil), // 3: subscribe.Event + (*EventBatch)(nil), // 4: subscribe.EventBatch + (*ServiceHealthUpdate)(nil), // 5: subscribe.ServiceHealthUpdate + (*pbservice.CheckServiceNode)(nil), // 6: pbservice.CheckServiceNode +} +var file_proto_pbsubscribe_subscribe_proto_depIdxs = []int32{ + 0, // 0: subscribe.SubscribeRequest.Topic:type_name -> subscribe.Topic + 4, // 1: subscribe.Event.EventBatch:type_name -> subscribe.EventBatch + 5, // 2: subscribe.Event.ServiceHealth:type_name -> subscribe.ServiceHealthUpdate + 3, // 3: subscribe.EventBatch.Events:type_name -> subscribe.Event + 1, // 4: subscribe.ServiceHealthUpdate.Op:type_name -> subscribe.CatalogOp + 6, // 5: subscribe.ServiceHealthUpdate.CheckServiceNode:type_name -> pbservice.CheckServiceNode + 2, // 6: subscribe.StateChangeSubscription.Subscribe:input_type -> subscribe.SubscribeRequest + 3, // 7: subscribe.StateChangeSubscription.Subscribe:output_type -> subscribe.Event + 7, // [7:8] is the sub-list for method output_type + 6, // [6:7] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_proto_pbsubscribe_subscribe_proto_init() } +func file_proto_pbsubscribe_subscribe_proto_init() { + if File_proto_pbsubscribe_subscribe_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbsubscribe_subscribe_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbsubscribe_subscribe_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbsubscribe_subscribe_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbsubscribe_subscribe_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceHealthUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proto_pbsubscribe_subscribe_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Event_EndOfSnapshot)(nil), + (*Event_NewSnapshotToFollow)(nil), + (*Event_EventBatch)(nil), + (*Event_ServiceHealth)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbsubscribe_subscribe_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_pbsubscribe_subscribe_proto_goTypes, + DependencyIndexes: file_proto_pbsubscribe_subscribe_proto_depIdxs, + EnumInfos: file_proto_pbsubscribe_subscribe_proto_enumTypes, + MessageInfos: file_proto_pbsubscribe_subscribe_proto_msgTypes, + }.Build() + File_proto_pbsubscribe_subscribe_proto = out.File + file_proto_pbsubscribe_subscribe_proto_rawDesc = nil + file_proto_pbsubscribe_subscribe_proto_goTypes = nil + file_proto_pbsubscribe_subscribe_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. @@ -561,7 +785,7 @@ type StateChangeSubscriptionServer interface { type UnimplementedStateChangeSubscriptionServer struct { } -func (*UnimplementedStateChangeSubscriptionServer) Subscribe(req *SubscribeRequest, srv StateChangeSubscription_SubscribeServer) error { +func (*UnimplementedStateChangeSubscriptionServer) Subscribe(*SubscribeRequest, StateChangeSubscription_SubscribeServer) error { return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") } From 47693e3ebf3412405bc2c2fea72688bdaa76ff12 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 30 Mar 2022 13:27:49 -0500 Subject: [PATCH 045/785] fail on error and use ptypes.MarshalAny for now instead of anypb.New --- agent/xds/clusters.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index fac6d0cfd..d9906058d 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -488,7 +488,9 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam protocol = cfg.Protocol } if protocol == "http2" || protocol == "grpc" { - s.setHttp2ProtocolOptions(c) + if err := s.setHttp2ProtocolOptions(c); err != nil { + return c, err + } } return c, err @@ -539,7 +541,9 @@ func (s *ResourceGenerator) makeUpstreamClusterForPreparedQuery(upstream structs OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), } if cfg.Protocol == "http2" || cfg.Protocol == "grpc" { - s.setHttp2ProtocolOptions(c) + if err := s.setHttp2ProtocolOptions(c); err != nil { + return c, err + } } } @@ -744,7 +748,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( } if proto == "http2" || proto == "grpc" { - s.setHttp2ProtocolOptions(c) + if err := s.setHttp2ProtocolOptions(c); err != nil { + return nil, err + } } commonTLSContext := makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()) @@ -1040,8 +1046,8 @@ func injectLBToCluster(ec *structs.LoadBalancer, c *envoy_cluster_v3.Cluster) er return nil } -func (s *ResourceGenerator) setHttp2ProtocolOptions(c *envoy_cluster_v3.Cluster) { - typedExtensionProtocolOptions := &envoy_upstreams_v3.HttpProtocolOptions{ +func (s *ResourceGenerator) setHttp2ProtocolOptions(c *envoy_cluster_v3.Cluster) error { + cfg := &envoy_upstreams_v3.HttpProtocolOptions{ UpstreamProtocolOptions: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_{ ExplicitHttpConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig{ ProtocolConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{ @@ -1050,10 +1056,13 @@ func (s *ResourceGenerator) setHttp2ProtocolOptions(c *envoy_cluster_v3.Cluster) }, }, } - typedExtensionProtocolOptionsEncoded, err := anypb.New(typedExtensionProtocolOptions) + any, err := ptypes.MarshalAny(cfg) if err != nil { - s.Logger.Warn("failed to convert http protocol options to anypb") + return err } - c.TypedExtensionProtocolOptions = make(map[string]*anypb.Any) - c.TypedExtensionProtocolOptions["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] = typedExtensionProtocolOptionsEncoded + c.TypedExtensionProtocolOptions = map[string]*anypb.Any{ + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": any, + } + + return nil } From ee11dff5a5edb46dcd703c5dcbd3c60ab809749d Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 30 Mar 2022 13:28:00 -0500 Subject: [PATCH 046/785] similar bump --- agent/xds/xds_protocol_helpers_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/agent/xds/xds_protocol_helpers_test.go b/agent/xds/xds_protocol_helpers_test.go index 6f2863f31..c4d5f8673 100644 --- a/agent/xds/xds_protocol_helpers_test.go +++ b/agent/xds/xds_protocol_helpers_test.go @@ -449,10 +449,11 @@ func makeTestCluster(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName st }, }, } - typedExtensionProtocolOptionsEncoded, err := anypb.New(typedExtensionProtocolOptions) + typedExtensionProtocolOptionsEncoded, err := ptypes.MarshalAny(typedExtensionProtocolOptions) require.NoError(t, err) - c.TypedExtensionProtocolOptions = make(map[string]*anypb.Any) - c.TypedExtensionProtocolOptions["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] = typedExtensionProtocolOptionsEncoded + c.TypedExtensionProtocolOptions = map[string]*anypb.Any{ + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": typedExtensionProtocolOptionsEncoded, + } return c case "http:db": return &envoy_cluster_v3.Cluster{ From e9230e93d81a8fca9a05c2f589dc0af696f760dc Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Wed, 30 Mar 2022 13:43:59 -0500 Subject: [PATCH 047/785] xds: adding control of the mesh-wide min/max TLS versions and cipher suites from the mesh config entry (#12601) - `tls.incoming`: applies to the inbound mTLS targeting the public listener on `connect-proxy` and `terminating-gateway` envoy instances - `tls.outgoing`: applies to the outbound mTLS dialing upstreams from `connect-proxy` and `ingress-gateway` envoy instances Fixes #11966 --- .changelog/12601.txt | 3 + agent/proxycfg/connect_proxy.go | 40 +-- agent/proxycfg/ingress_gateway.go | 12 + agent/proxycfg/manager_test.go | 30 +- agent/proxycfg/snapshot.go | 65 ++++- agent/proxycfg/state_test.go | 100 ++++--- agent/proxycfg/terminating_gateway.go | 29 ++ agent/proxycfg/testing.go | 3 + agent/proxycfg/testing_connect_proxy.go | 6 + agent/proxycfg/upstreams.go | 17 ++ agent/structs/config_entry_gateways.go | 32 +-- agent/structs/config_entry_mesh.go | 76 +++++ agent/structs/config_entry_test.go | 54 ++++ agent/xds/clusters.go | 23 +- agent/xds/clusters_test.go | 141 +++++++++ agent/xds/listeners.go | 90 +++++- agent/xds/listeners_ingress.go | 45 +-- agent/xds/listeners_test.go | 139 +++++++++ ...outgoing-cipher-suites.envoy-1-20-x.golden | 151 ++++++++++ ...s-outgoing-max-version.envoy-1-20-x.golden | 145 ++++++++++ ...going-min-version-auto.envoy-1-20-x.golden | 145 ++++++++++ ...s-outgoing-min-version.envoy-1-20-x.golden | 145 ++++++++++ ...outgoing-cipher-suites.envoy-1-20-x.golden | 68 +++++ ...s-outgoing-max-version.envoy-1-20-x.golden | 65 +++++ ...s-outgoing-min-version.envoy-1-20-x.golden | 65 +++++ ...incoming-cipher-suites.envoy-1-20-x.golden | 122 ++++++++ ...s-incoming-max-version.envoy-1-20-x.golden | 119 ++++++++ ...s-incoming-min-version.envoy-1-20-x.golden | 119 ++++++++ ...going-min-version-auto.envoy-1-20-x.golden | 119 ++++++++ ...incoming-cipher-suites.envoy-1-20-x.golden | 268 ++++++++++++++++++ ...s-incoming-max-version.envoy-1-20-x.golden | 256 +++++++++++++++++ ...s-incoming-min-version.envoy-1-20-x.golden | 256 +++++++++++++++++ api/config_entry.go | 7 +- api/config_entry_mesh.go | 13 + api/config_entry_test.go | 36 +++ command/config/write/config_write_test.go | 90 ++++++ .../docs/connect/config-entries/mesh.mdx | 188 ++++++++++++ 37 files changed, 3116 insertions(+), 166 deletions(-) create mode 100644 .changelog/12601.txt create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-max-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-max-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-min-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-cipher-suites.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-max-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-min-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.envoy-1-20-x.golden diff --git a/.changelog/12601.txt b/.changelog/12601.txt new file mode 100644 index 000000000..078da4439 --- /dev/null +++ b/.changelog/12601.txt @@ -0,0 +1,3 @@ +```release-note:feature +xds: adding control of the mesh-wide min/max TLS versions and cipher suites from the mesh config entry +``` diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 64ce9020c..d0849a01e 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -70,6 +70,18 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e return snap, err } + // Get information about the entire service mesh. + err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{ + Kind: structs.MeshConfig, + Name: structs.MeshConfigMesh, + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()), + }, meshConfigEntryID, s.ch) + if err != nil { + return snap, err + } + // Watch for service check updates err = s.cache.Notify(ctx, cachetype.ServiceHTTPChecksName, &cachetype.ServiceHTTPChecksRequest{ ServiceID: s.proxyCfg.DestinationServiceID, @@ -90,17 +102,6 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e if err != nil { return snap, err } - - err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{ - Kind: structs.MeshConfig, - Name: structs.MeshConfigMesh, - Datacenter: s.source.Datacenter, - QueryOptions: structs.QueryOptions{Token: s.token}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()), - }, meshConfigEntryID, s.ch) - if err != nil { - return snap, err - } } // Watch for updates to service endpoints for all upstreams @@ -368,23 +369,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u cache.UpdateEv svcID := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, svcChecksWatchIDPrefix)) snap.ConnectProxy.WatchedServiceChecks[svcID] = resp - case u.CorrelationID == meshConfigEntryID: - resp, ok := u.Result.(*structs.ConfigEntryResponse) - if !ok { - return fmt.Errorf("invalid type for response: %T", u.Result) - } - - if resp.Entry != nil { - meshConf, ok := resp.Entry.(*structs.MeshConfigEntry) - if !ok { - return fmt.Errorf("invalid type for config entry: %T", resp.Entry) - } - snap.ConnectProxy.MeshConfig = meshConf - } else { - snap.ConnectProxy.MeshConfig = nil - } - snap.ConnectProxy.MeshConfigSet = true - default: return (*handlerUpstreams)(s).handleUpdateUpstreams(ctx, u, snap) } diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 1a5eb5ed9..34566201b 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -25,6 +25,18 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot, return snap, err } + // Get information about the entire service mesh. + err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{ + Kind: structs.MeshConfig, + Name: structs.MeshConfigMesh, + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()), + }, meshConfigEntryID, s.ch) + if err != nil { + return snap, err + } + // Watch this ingress gateway's config entry err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{ Kind: structs.IngressGateway, diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index 5ac37b793..61454a074 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -147,6 +147,13 @@ func TestManager_BasicLifecycle(t *testing.T) { }, }, }) + meshCacheKey := testGenCacheKey(&structs.ConfigEntryQuery{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: "my-token"}, + Kind: structs.MeshConfig, + Name: structs.MeshConfigMesh, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }) dbChainCacheKey := testGenCacheKey(&structs.DiscoveryChainRequest{ Name: "db", @@ -214,7 +221,8 @@ func TestManager_BasicLifecycle(t *testing.T) { Roots: roots, ConnectProxy: configSnapshotConnectProxy{ ConfigSnapshotUpstreams: ConfigSnapshotUpstreams{ - Leaf: leaf, + Leaf: leaf, + MeshConfigSet: true, DiscoveryChain: map[UpstreamID]*structs.CompiledDiscoveryChain{ dbUID: dbDefaultChain(), }, @@ -272,7 +280,8 @@ func TestManager_BasicLifecycle(t *testing.T) { Roots: roots, ConnectProxy: configSnapshotConnectProxy{ ConfigSnapshotUpstreams: ConfigSnapshotUpstreams{ - Leaf: leaf, + Leaf: leaf, + MeshConfigSet: true, DiscoveryChain: map[UpstreamID]*structs.CompiledDiscoveryChain{ dbUID: dbSplitChain(), }, @@ -319,6 +328,7 @@ func TestManager_BasicLifecycle(t *testing.T) { types.roots.Set(rootsCacheKey, roots) types.leaf.Set(leafCacheKey, leaf) types.intentions.Set(intentionCacheKey, TestIntentions()) + types.configEntry.Set(meshCacheKey, &structs.ConfigEntryResponse{Entry: nil}) tt.setup(t, types) expectSnapCopy, err := copystructure.Copy(tt.expectSnap) @@ -692,6 +702,22 @@ func TestManager_SyncState_No_Notify(t *testing.T) { } + // update the mesh config entry + notifyCH <- cache.UpdateEvent{ + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + Err: nil, + } + + // at this point the snapshot should not be valid and not be sent + after = time.After(200 * time.Millisecond) + select { + case <-snapSent: + t.Fatal("snap should not be valid") + case <-after: + + } + // prepare to read a snapshot update as the next update should make the snapshot valid readEvent <- true diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 98aafa262..cebf0b2e9 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -17,6 +17,9 @@ import ( type ConfigSnapshotUpstreams struct { Leaf *structs.IssuedCert + MeshConfig *structs.MeshConfigEntry + MeshConfigSet bool + // DiscoveryChain is a map of UpstreamID -> CompiledDiscoveryChain's, and // is used to determine what services could be targeted by this upstream. // We then instantiate watches for those targets. @@ -117,12 +120,10 @@ type configSnapshotConnectProxy struct { // intentions. Intentions structs.Intentions IntentionsSet bool - - MeshConfig *structs.MeshConfigEntry - MeshConfigSet bool } -func (c *configSnapshotConnectProxy) IsEmpty() bool { +// isEmpty is a test helper +func (c *configSnapshotConnectProxy) isEmpty() bool { if c == nil { return true } @@ -143,6 +144,9 @@ func (c *configSnapshotConnectProxy) IsEmpty() bool { } type configSnapshotTerminatingGateway struct { + MeshConfig *structs.MeshConfigEntry + MeshConfigSet bool + // WatchedServices is a map of service name to a cancel function. This cancel // function is tied to the watch of linked service instances for the given // id. If the linked services watch would indicate the removal of @@ -241,7 +245,8 @@ func (c *configSnapshotTerminatingGateway) ValidServices() []structs.ServiceName return out } -func (c *configSnapshotTerminatingGateway) IsEmpty() bool { +// isEmpty is a test helper +func (c *configSnapshotTerminatingGateway) isEmpty() bool { if c == nil { return true } @@ -257,7 +262,8 @@ func (c *configSnapshotTerminatingGateway) IsEmpty() bool { len(c.ServiceConfigs) == 0 && len(c.WatchedConfigs) == 0 && len(c.GatewayServices) == 0 && - len(c.HostnameServices) == 0 + len(c.HostnameServices) == 0 && + !c.MeshConfigSet } type configSnapshotMeshGateway struct { @@ -335,7 +341,8 @@ func (c *configSnapshotMeshGateway) GatewayKeys() []GatewayKey { return keys } -func (c *configSnapshotMeshGateway) IsEmpty() bool { +// isEmpty is a test helper +func (c *configSnapshotMeshGateway) isEmpty() bool { if c == nil { return true } @@ -382,7 +389,8 @@ type configSnapshotIngressGateway struct { Listeners map[IngressListenerKey]structs.IngressListener } -func (c *configSnapshotIngressGateway) IsEmpty() bool { +// isEmpty is a test helper +func (c *configSnapshotIngressGateway) isEmpty() bool { if c == nil { return true } @@ -390,7 +398,8 @@ func (c *configSnapshotIngressGateway) IsEmpty() bool { len(c.UpstreamsSet) == 0 && len(c.DiscoveryChain) == 0 && len(c.WatchedUpstreams) == 0 && - len(c.WatchedUpstreamEndpoints) == 0 + len(c.WatchedUpstreamEndpoints) == 0 && + !c.MeshConfigSet } type IngressListenerKey struct { @@ -451,10 +460,12 @@ func (s *ConfigSnapshot) Valid() bool { } return s.Roots != nil && s.ConnectProxy.Leaf != nil && - s.ConnectProxy.IntentionsSet + s.ConnectProxy.IntentionsSet && + s.ConnectProxy.MeshConfigSet case structs.ServiceKindTerminatingGateway: - return s.Roots != nil + return s.Roots != nil && + s.TerminatingGateway.MeshConfigSet case structs.ServiceKindMeshGateway: if s.ServiceMeta[structs.MetaWANFederationKey] == "1" { @@ -469,7 +480,8 @@ func (s *ConfigSnapshot) Valid() bool { return s.Roots != nil && s.IngressGateway.Leaf != nil && s.IngressGateway.GatewayConfigLoaded && - s.IngressGateway.HostsSet + s.IngressGateway.HostsSet && + s.IngressGateway.MeshConfigSet default: return false } @@ -519,3 +531,32 @@ func (s *ConfigSnapshot) Leaf() *structs.IssuedCert { return nil } } + +func (s *ConfigSnapshot) MeshConfig() *structs.MeshConfigEntry { + switch s.Kind { + case structs.ServiceKindConnectProxy: + return s.ConnectProxy.MeshConfig + case structs.ServiceKindIngressGateway: + return s.IngressGateway.MeshConfig + case structs.ServiceKindTerminatingGateway: + return s.TerminatingGateway.MeshConfig + default: + return nil + } +} + +func (s *ConfigSnapshot) MeshConfigTLSIncoming() *structs.MeshDirectionalTLSConfig { + mesh := s.MeshConfig() + if mesh == nil || mesh.TLS == nil { + return nil + } + return mesh.TLS.Incoming +} + +func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConfig { + mesh := s.MeshConfig() + if mesh == nil || mesh.TLS == nil { + return nil + } + return mesh.TLS.Outgoing +} diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 7e88c6eab..5a88c2880 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -504,6 +504,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { rootsWatchID: genVerifyRootsWatch("dc1"), leafWatchID: genVerifyLeafWatch("web", "dc1"), intentionsWatchID: genVerifyIntentionWatch("web", "dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), "upstream:" + pqUID.String(): genVerifyPreparedQueryWatch("query", "dc1"), fmt.Sprintf("discovery-chain:%s", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ Name: "api", @@ -568,6 +569,10 @@ func TestState_WatchesAndUpdates(t *testing.T) { Result: ixnMatch, Err: nil, }, + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, { CorrelationID: fmt.Sprintf("discovery-chain:%s", apiUID.String()), Result: &structs.DiscoveryChainResponse{ @@ -628,7 +633,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.True(t, snap.Valid()) - require.True(t, snap.MeshGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) @@ -643,6 +648,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch.Matches[0], snap.ConnectProxy.Intentions) + require.True(t, snap.ConnectProxy.MeshConfigSet) }, } @@ -657,7 +663,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.True(t, snap.Valid()) - require.True(t, snap.MeshGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) @@ -740,7 +746,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "gateway without root is not valid") - require.True(t, snap.ConnectProxy.IsEmpty()) + require.True(t, snap.ConnectProxy.isEmpty()) }, }, { @@ -749,7 +755,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "gateway without services is valid") - require.True(t, snap.ConnectProxy.IsEmpty()) + require.True(t, snap.ConnectProxy.isEmpty()) require.Equal(t, indexedRoots, snap.Roots) require.Empty(t, snap.MeshGateway.WatchedServices) require.False(t, snap.MeshGateway.WatchedServicesSet) @@ -771,7 +777,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.True(t, snap.Valid(), "gateway with empty service list is valid") - require.True(t, snap.ConnectProxy.IsEmpty()) + require.True(t, snap.ConnectProxy.isEmpty()) require.Equal(t, indexedRoots, snap.Roots) require.Empty(t, snap.MeshGateway.WatchedServices) require.True(t, snap.MeshGateway.WatchedServicesSet) @@ -940,17 +946,22 @@ func TestState_WatchesAndUpdates(t *testing.T) { { requiredWatches: map[string]verifyWatchRequest{ rootsWatchID: genVerifyRootsWatch("dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), gatewayConfigWatchID: genVerifyConfigEntryWatch(structs.IngressGateway, "ingress-gateway", "dc1"), gatewayServicesWatchID: genVerifyGatewayServiceWatch("ingress-gateway", "dc1"), }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "gateway without root is not valid") - require.True(t, snap.IngressGateway.IsEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) }, }, { events: []cache.UpdateEvent{ rootWatchEvent(), + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "gateway without config entry is not valid") @@ -1104,11 +1115,16 @@ func TestState_WatchesAndUpdates(t *testing.T) { { requiredWatches: map[string]verifyWatchRequest{ rootsWatchID: genVerifyRootsWatch("dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), gatewayConfigWatchID: genVerifyConfigEntryWatch(structs.IngressGateway, "ingress-gateway", "dc1"), gatewayServicesWatchID: genVerifyGatewayServiceWatch("ingress-gateway", "dc1"), }, events: []cache.UpdateEvent{ rootWatchEvent(), + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, ingressConfigWatchEvent(true, false), { CorrelationID: gatewayServicesWatchID, @@ -1179,11 +1195,16 @@ func TestState_WatchesAndUpdates(t *testing.T) { { requiredWatches: map[string]verifyWatchRequest{ rootsWatchID: genVerifyRootsWatch("dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), gatewayConfigWatchID: genVerifyConfigEntryWatch(structs.IngressGateway, "ingress-gateway", "dc1"), gatewayServicesWatchID: genVerifyGatewayServiceWatch("ingress-gateway", "dc1"), }, events: []cache.UpdateEvent{ rootWatchEvent(), + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, ingressConfigWatchEvent(false, true), { CorrelationID: gatewayServicesWatchID, @@ -1266,27 +1287,33 @@ func TestState_WatchesAndUpdates(t *testing.T) { stages: []verificationStage{ { requiredWatches: map[string]verifyWatchRequest{ - rootsWatchID: genVerifyRootsWatch("dc1"), + rootsWatchID: genVerifyRootsWatch("dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), gatewayServicesWatchID: genVerifyServiceSpecificRequest(gatewayServicesWatchID, "terminating-gateway", "", "dc1", false), }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "gateway without root is not valid") - require.True(t, snap.ConnectProxy.IsEmpty()) - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) + require.True(t, snap.ConnectProxy.isEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) }, }, { events: []cache.UpdateEvent{ rootWatchEvent(), + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.True(t, snap.Valid(), "gateway without services is valid") - require.True(t, snap.ConnectProxy.IsEmpty()) - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.ConnectProxy.isEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.False(t, snap.TerminatingGateway.isEmpty()) + require.Nil(t, snap.TerminatingGateway.MeshConfig) require.Equal(t, indexedRoots, snap.Roots) }, }, @@ -1303,12 +1330,17 @@ func TestState_WatchesAndUpdates(t *testing.T) { stages: []verificationStage{ { requiredWatches: map[string]verifyWatchRequest{ - rootsWatchID: genVerifyRootsWatch("dc1"), + rootsWatchID: genVerifyRootsWatch("dc1"), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), gatewayServicesWatchID: genVerifyServiceSpecificRequest(gatewayServicesWatchID, "terminating-gateway", "", "dc1", false), }, events: []cache.UpdateEvent{ rootWatchEvent(), + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{}, + }, { CorrelationID: gatewayServicesWatchID, Result: &structs.IndexedGatewayServices{ @@ -1680,11 +1712,11 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) - require.False(t, snap.ConnectProxy.IsEmpty()) + require.False(t, snap.ConnectProxy.isEmpty()) expectUpstreams := map[UpstreamID]*structs.Upstream{ dbUID: { DestinationName: "db", @@ -1721,9 +1753,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.Leaf()) require.Equal(t, TestIntentions().Matches[0], snap.ConnectProxy.Intentions) - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) require.True(t, snap.ConnectProxy.MeshConfigSet) require.Nil(t, snap.ConnectProxy.MeshConfig) }, @@ -1766,9 +1798,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) // Centrally configured upstream defaults should be stored so that upstreams from intentions can inherit them require.Len(t, snap.ConnectProxy.UpstreamConfig, 1) @@ -1807,9 +1839,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.Leaf()) require.Equal(t, TestIntentions().Matches[0], snap.ConnectProxy.Intentions) - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) require.True(t, snap.ConnectProxy.MeshConfigSet) require.NotNil(t, snap.ConnectProxy.MeshConfig) }, @@ -2333,9 +2365,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) // Centrally configured upstream defaults should be stored so that upstreams from intentions can inherit them require.Len(t, snap.ConnectProxy.UpstreamConfig, 2) @@ -2375,9 +2407,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.Leaf()) require.Equal(t, TestIntentions().Matches[0], snap.ConnectProxy.Intentions) - require.True(t, snap.MeshGateway.IsEmpty()) - require.True(t, snap.IngressGateway.IsEmpty()) - require.True(t, snap.TerminatingGateway.IsEmpty()) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) require.True(t, snap.ConnectProxy.MeshConfigSet) require.NotNil(t, snap.ConnectProxy.MeshConfig) }, diff --git a/agent/proxycfg/terminating_gateway.go b/agent/proxycfg/terminating_gateway.go index b08985b29..73b968272 100644 --- a/agent/proxycfg/terminating_gateway.go +++ b/agent/proxycfg/terminating_gateway.go @@ -28,6 +28,18 @@ func (s *handlerTerminatingGateway) initialize(ctx context.Context) (ConfigSnaps return snap, err } + // Get information about the entire service mesh. + err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{ + Kind: structs.MeshConfig, + Name: structs.MeshConfigMesh, + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()), + }, meshConfigEntryID, s.ch) + if err != nil { + return snap, err + } + // Watch for the terminating-gateway's linked services err = s.cache.Notify(ctx, cachetype.GatewayServicesName, &structs.ServiceSpecificRequest{ Datacenter: s.source.Datacenter, @@ -70,6 +82,23 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u cache.Up } snap.Roots = roots + case u.CorrelationID == meshConfigEntryID: + resp, ok := u.Result.(*structs.ConfigEntryResponse) + if !ok { + return fmt.Errorf("invalid type for response: %T", u.Result) + } + + if resp.Entry != nil { + meshConf, ok := resp.Entry.(*structs.MeshConfigEntry) + if !ok { + return fmt.Errorf("invalid type for config entry: %T", resp.Entry) + } + snap.TerminatingGateway.MeshConfig = meshConf + } else { + snap.TerminatingGateway.MeshConfig = nil + } + snap.TerminatingGateway.MeshConfigSet = true + // Update watches based on the current list of services associated with the terminating-gateway case u.CorrelationID == gatewayServicesWatchID: services, ok := u.Result.(*structs.IndexedGatewayServices) diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index af3a33061..eb2ebfb0b 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -32,6 +32,7 @@ type TestCacheTypes struct { query *ControllableCacheType compiledChain *ControllableCacheType serviceHTTPChecks *ControllableCacheType + configEntry *ControllableCacheType } // NewTestCacheTypes creates a set of ControllableCacheTypes for all types that @@ -46,6 +47,7 @@ func NewTestCacheTypes(t testing.T) *TestCacheTypes { query: NewControllableCacheType(t), compiledChain: NewControllableCacheType(t), serviceHTTPChecks: NewControllableCacheType(t), + configEntry: NewControllableCacheType(t), } ct.query.blocking = false return ct @@ -62,6 +64,7 @@ func TestCacheWithTypes(t testing.T, types *TestCacheTypes) *cache.Cache { c.RegisterType(cachetype.PreparedQueryName, types.query) c.RegisterType(cachetype.CompiledDiscoveryChainName, types.compiledChain) c.RegisterType(cachetype.ServiceHTTPChecksName, types.serviceHTTPChecks) + c.RegisterType(cachetype.ConfigEntryName, types.configEntry) return c } diff --git a/agent/proxycfg/testing_connect_proxy.go b/agent/proxycfg/testing_connect_proxy.go index 35a8e6011..61db728c2 100644 --- a/agent/proxycfg/testing_connect_proxy.go +++ b/agent/proxycfg/testing_connect_proxy.go @@ -125,6 +125,12 @@ func TestConfigSnapshotDiscoveryChain( }, }, }, + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{ + Entry: nil, + }, + }, { CorrelationID: svcChecksWatchIDPrefix + webSN, Result: []structs.CheckType{}, diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index e77b554ff..f8daf340f 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -35,6 +35,23 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u cache.Up } upstreamsSnapshot.Leaf = leaf + case u.CorrelationID == meshConfigEntryID: + resp, ok := u.Result.(*structs.ConfigEntryResponse) + if !ok { + return fmt.Errorf("invalid type for response: %T", u.Result) + } + + if resp.Entry != nil { + meshConf, ok := resp.Entry.(*structs.MeshConfigEntry) + if !ok { + return fmt.Errorf("invalid type for config entry: %T", resp.Entry) + } + upstreamsSnapshot.MeshConfig = meshConf + } else { + upstreamsSnapshot.MeshConfig = nil + } + upstreamsSnapshot.MeshConfigSet = true + case strings.HasPrefix(u.CorrelationID, "discovery-chain:"): resp, ok := u.Result.(*structs.DiscoveryChainResponse) if !ok { diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index 80ef0f98d..94014230d 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -240,37 +240,7 @@ func (e *IngressGatewayConfigEntry) validateServiceSDS(lis IngressListener, svc } func validateGatewayTLSConfig(tlsCfg GatewayTLSConfig) error { - if tlsCfg.TLSMinVersion != types.TLSVersionUnspecified { - if err := types.ValidateTLSVersion(tlsCfg.TLSMinVersion); err != nil { - return err - } - } - - if tlsCfg.TLSMaxVersion != types.TLSVersionUnspecified { - if err := types.ValidateTLSVersion(tlsCfg.TLSMaxVersion); err != nil { - return err - } - - if tlsCfg.TLSMinVersion != types.TLSVersionUnspecified { - if err, maxLessThanMin := tlsCfg.TLSMaxVersion.LessThan(tlsCfg.TLSMinVersion); err == nil && maxLessThanMin { - return fmt.Errorf("configuring max version %s less than the configured min version %s is invalid", tlsCfg.TLSMaxVersion, tlsCfg.TLSMinVersion) - } - } - } - - if len(tlsCfg.CipherSuites) != 0 { - if _, ok := types.TLSVersionsWithConfigurableCipherSuites[tlsCfg.TLSMinVersion]; !ok { - return fmt.Errorf("configuring CipherSuites is only applicable to conncetions negotiated with TLS 1.2 or earlier, TLSMinVersion is set to %s", tlsCfg.TLSMinVersion) - } - - // NOTE: it would be nice to emit a warning but not return an error from - // here if TLSMaxVersion is unspecified, TLS_AUTO or TLSv1_3 - if err := types.ValidateEnvoyCipherSuites(tlsCfg.CipherSuites); err != nil { - return err - } - } - - return nil + return validateTLSConfig(tlsCfg.TLSMinVersion, tlsCfg.TLSMaxVersion, tlsCfg.CipherSuites) } func (e *IngressGatewayConfigEntry) Validate() error { diff --git a/agent/structs/config_entry_mesh.go b/agent/structs/config_entry_mesh.go index 020b76c9c..eb9f34f43 100644 --- a/agent/structs/config_entry_mesh.go +++ b/agent/structs/config_entry_mesh.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/types" ) type MeshConfigEntry struct { @@ -12,6 +13,8 @@ type MeshConfigEntry struct { // when enabled. TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + TLS *MeshTLSConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex @@ -25,6 +28,20 @@ type TransparentProxyMeshConfig struct { MeshDestinationsOnly bool `alias:"mesh_destinations_only"` } +type MeshTLSConfig struct { + Incoming *MeshDirectionalTLSConfig `json:",omitempty"` + Outgoing *MeshDirectionalTLSConfig `json:",omitempty"` +} + +type MeshDirectionalTLSConfig struct { + TLSMinVersion types.TLSVersion `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion types.TLSVersion `json:",omitempty" alias:"tls_max_version"` + + // Define a subset of cipher suites to restrict + // Only applicable to connections negotiated via TLS 1.2 or earlier + CipherSuites []types.TLSCipherSuite `json:",omitempty" alias:"cipher_suites"` +} + func (e *MeshConfigEntry) GetKind() string { return MeshConfig } @@ -57,10 +74,24 @@ func (e *MeshConfigEntry) Validate() error { if e == nil { return fmt.Errorf("config entry is nil") } + if err := validateConfigEntryMeta(e.Meta); err != nil { return err } + if e.TLS != nil { + if e.TLS.Incoming != nil { + if err := validateMeshDirectionalTLSConfig(e.TLS.Incoming); err != nil { + return fmt.Errorf("error in incoming TLS configuration: %v", err) + } + } + if e.TLS.Outgoing != nil { + if err := validateMeshDirectionalTLSConfig(e.TLS.Outgoing); err != nil { + return fmt.Errorf("error in outgoing TLS configuration: %v", err) + } + } + } + return e.validateEnterpriseMeta() } @@ -105,3 +136,48 @@ func (e *MeshConfigEntry) MarshalJSON() ([]byte, error) { } return json.Marshal(source) } + +func validateMeshDirectionalTLSConfig(cfg *MeshDirectionalTLSConfig) error { + if cfg == nil { + return nil + } + return validateTLSConfig(cfg.TLSMinVersion, cfg.TLSMaxVersion, cfg.CipherSuites) +} + +func validateTLSConfig( + tlsMinVersion types.TLSVersion, + tlsMaxVersion types.TLSVersion, + cipherSuites []types.TLSCipherSuite, +) error { + if tlsMinVersion != types.TLSVersionUnspecified { + if err := types.ValidateTLSVersion(tlsMinVersion); err != nil { + return err + } + } + + if tlsMaxVersion != types.TLSVersionUnspecified { + if err := types.ValidateTLSVersion(tlsMaxVersion); err != nil { + return err + } + + if tlsMinVersion != types.TLSVersionUnspecified { + if err, maxLessThanMin := tlsMaxVersion.LessThan(tlsMinVersion); err == nil && maxLessThanMin { + return fmt.Errorf("configuring max version %s less than the configured min version %s is invalid", tlsMaxVersion, tlsMinVersion) + } + } + } + + if len(cipherSuites) != 0 { + if _, ok := types.TLSVersionsWithConfigurableCipherSuites[tlsMinVersion]; !ok { + return fmt.Errorf("configuring CipherSuites is only applicable to connections negotiated with TLS 1.2 or earlier, TLSMinVersion is set to %s", tlsMinVersion) + } + + // NOTE: it would be nice to emit a warning but not return an error from + // here if TLSMaxVersion is unspecified, TLS_AUTO or TLSv1_3 + if err := types.ValidateEnvoyCipherSuites(cipherSuites); err != nil { + return err + } + } + + return nil +} diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index e4d581075..f125998b9 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -1675,6 +1675,24 @@ func TestDecodeConfigEntry(t *testing.T) { transparent_proxy { mesh_destinations_only = true } + tls { + incoming { + tls_min_version = "TLSv1_1" + tls_max_version = "TLSv1_2" + cipher_suites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + outgoing { + tls_min_version = "TLSv1_1" + tls_max_version = "TLSv1_2" + cipher_suites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + } `, camel: ` Kind = "mesh" @@ -1685,6 +1703,24 @@ func TestDecodeConfigEntry(t *testing.T) { TransparentProxy { MeshDestinationsOnly = true } + TLS { + Incoming { + TLSMinVersion = "TLSv1_1" + TLSMaxVersion = "TLSv1_2" + CipherSuites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + Outgoing { + TLSMinVersion = "TLSv1_1" + TLSMaxVersion = "TLSv1_2" + CipherSuites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + } `, expect: &MeshConfigEntry{ Meta: map[string]string{ @@ -1694,6 +1730,24 @@ func TestDecodeConfigEntry(t *testing.T) { TransparentProxy: TransparentProxyMeshConfig{ MeshDestinationsOnly: true, }, + TLS: &MeshTLSConfig{ + Incoming: &MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_1, + TLSMaxVersion: types.TLSv1_2, + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + }, + }, + Outgoing: &MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_1, + TLSMaxVersion: types.TLSv1_2, + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + }, + }, + }, }, }, { diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 716c320d3..94610a828 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -158,8 +158,8 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, // This size is an upper bound. clusters := make([]proto.Message, 0, len(cfgSnap.ConnectProxy.PassthroughUpstreams)+1) - if cfgSnap.ConnectProxy.MeshConfig == nil || - !cfgSnap.ConnectProxy.MeshConfig.TransparentProxy.MeshDestinationsOnly { + if meshConf := cfgSnap.MeshConfig(); meshConf == nil || + !meshConf.TransparentProxy.MeshDestinationsOnly { clusters = append(clusters, &envoy_cluster_v3.Cluster{ Name: OriginalDestinationClusterName, @@ -200,7 +200,11 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, Service: uid.Name, } - commonTLSContext := makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()) + commonTLSContext := makeCommonTLSContextFromLeaf( + cfgSnap, + cfgSnap.Leaf(), + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()), + ) err := injectSANMatcher(commonTLSContext, spiffeID) if err != nil { return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err) @@ -567,7 +571,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPreparedQuery(upstream structs } // Enable TLS upstream with the configured client certificate. - commonTLSContext := makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()) + commonTLSContext := makeCommonTLSContextFromLeaf( + cfgSnap, + cfgSnap.Leaf(), + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()), + ) err = injectSANMatcher(commonTLSContext, spiffeIDs...) if err != nil { return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err) @@ -745,7 +753,12 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( c.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{} } - commonTLSContext := makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()) + commonTLSContext := makeCommonTLSContextFromLeaf( + cfgSnap, + cfgSnap.Leaf(), + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()), + ) + err = injectSANMatcher(commonTLSContext, spiffeIDs...) if err != nil { return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err) diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index 4b4de8d27..c8a1e98e5 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -13,11 +13,13 @@ import ( testinf "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/xds/proxysupport" "github.com/hashicorp/consul/agent/xds/xdscommon" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/types" ) func TestClustersFromSnapshot(t *testing.T) { @@ -36,6 +38,85 @@ func TestClustersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshot(t, nil, nil) }, }, + { + name: "connect-proxy-with-tls-outgoing-min-version-auto", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSVersionAuto, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-outgoing-min-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_3, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-outgoing-max-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMaxVersion: types.TLSv1_2, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-outgoing-cipher-suites", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + }, + }, + }, + }) + }, + }, { name: "custom-local-app", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -322,6 +403,66 @@ func TestClustersFromSnapshot(t *testing.T) { "default", nil, nil, nil) }, }, + { + name: "ingress-gateway-with-tls-outgoing-min-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_3, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "ingress-gateway-with-tls-outgoing-max-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMaxVersion: types.TLSv1_2, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "ingress-gateway-with-tls-outgoing-cipher-suites", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + }, + }, + }, + }) + }, + }, { name: "ingress-gateway-no-services", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 2a671c40d..bca152ad9 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect/ca" + "github.com/hashicorp/consul/types" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -250,8 +251,8 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. }) // Add a catch-all filter chain that acts as a TCP proxy to destinations outside the mesh - if cfgSnap.ConnectProxy.MeshConfig == nil || - !cfgSnap.ConnectProxy.MeshConfig.TransparentProxy.MeshDestinationsOnly { + if meshConf := cfgSnap.MeshConfig(); meshConf == nil || + !meshConf.TransparentProxy.MeshDestinationsOnly { filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ clusterName: OriginalDestinationClusterName, @@ -749,7 +750,11 @@ func injectHTTPFilterOnFilterChains( func (s *ResourceGenerator) injectConnectTLSOnFilterChains(cfgSnap *proxycfg.ConfigSnapshot, listener *envoy_listener_v3.Listener) error { for idx := range listener.FilterChains { tlsContext := &envoy_tls_v3.DownstreamTlsContext{ - CommonTlsContext: makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()), + CommonTlsContext: makeCommonTLSContextFromLeaf( + cfgSnap, + cfgSnap.Leaf(), + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSIncoming()), + ), RequireClientCertificate: &wrappers.BoolValue{Value: true}, } transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext) @@ -1071,7 +1076,11 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway( protocol string, ) (*envoy_listener_v3.FilterChain, error) { tlsContext := &envoy_tls_v3.DownstreamTlsContext{ - CommonTlsContext: makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.TerminatingGateway.ServiceLeaves[service]), + CommonTlsContext: makeCommonTLSContextFromLeaf( + cfgSnap, + cfgSnap.TerminatingGateway.ServiceLeaves[service], + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSIncoming()), + ), RequireClientCertificate: &wrappers.BoolValue{Value: true}, } transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext) @@ -1549,11 +1558,11 @@ func makeEnvoyHTTPFilter(name string, cfg proto.Message) (*envoy_http_v3.HttpFil }, nil } -func makeCommonTLSContextFromLeafWithoutParams(cfgSnap *proxycfg.ConfigSnapshot, leaf *structs.IssuedCert) *envoy_tls_v3.CommonTlsContext { - return makeCommonTLSContextFromLeaf(cfgSnap, leaf, nil) -} - -func makeCommonTLSContextFromLeaf(cfgSnap *proxycfg.ConfigSnapshot, leaf *structs.IssuedCert, tlsParams *envoy_tls_v3.TlsParameters) *envoy_tls_v3.CommonTlsContext { +func makeCommonTLSContextFromLeaf( + cfgSnap *proxycfg.ConfigSnapshot, + leaf *structs.IssuedCert, + tlsParams *envoy_tls_v3.TlsParameters, +) *envoy_tls_v3.CommonTlsContext { // Concatenate all the root PEMs into one. if cfgSnap.Roots == nil { return nil @@ -1662,3 +1671,66 @@ func makeCommonTLSContextFromFiles(caFile, certFile, keyFile string) *envoy_tls_ return &ctx } + +func validateListenerTLSConfig(tlsMinVersion types.TLSVersion, cipherSuites []types.TLSCipherSuite) error { + // Validate. Configuring cipher suites is only applicable to connections negotiated + // via TLS 1.2 or earlier. Other cases shouldn't be possible as we validate them at + // input but be resilient to bugs later. + if len(cipherSuites) != 0 { + if _, ok := tlsVersionsWithConfigurableCipherSuites[tlsMinVersion]; !ok { + return fmt.Errorf("configuring CipherSuites is only applicable to connections negotiated with TLS 1.2 or earlier, TLSMinVersion is set to %s in config", tlsMinVersion) + } + } + + return nil +} + +var tlsVersionsWithConfigurableCipherSuites = map[types.TLSVersion]struct{}{ + // Remove these two if Envoy ever sets TLS 1.3 as default minimum + types.TLSVersionUnspecified: {}, + types.TLSVersionAuto: {}, + + types.TLSv1_0: {}, + types.TLSv1_1: {}, + types.TLSv1_2: {}, +} + +func makeTLSParametersFromProxyTLSConfig(tlsConf *structs.MeshDirectionalTLSConfig) *envoy_tls_v3.TlsParameters { + if tlsConf == nil { + return &envoy_tls_v3.TlsParameters{} + } + + return makeTLSParametersFromTLSConfig(tlsConf.TLSMinVersion, tlsConf.TLSMaxVersion, tlsConf.CipherSuites) +} + +func makeTLSParametersFromTLSConfig( + tlsMinVersion types.TLSVersion, + tlsMaxVersion types.TLSVersion, + cipherSuites []types.TLSCipherSuite, +) *envoy_tls_v3.TlsParameters { + tlsParams := envoy_tls_v3.TlsParameters{} + + if tlsMinVersion != types.TLSVersionUnspecified { + if minVersion, ok := envoyTLSVersions[tlsMinVersion]; ok { + tlsParams.TlsMinimumProtocolVersion = minVersion + } + } + if tlsMaxVersion != types.TLSVersionUnspecified { + if maxVersion, ok := envoyTLSVersions[tlsMaxVersion]; ok { + tlsParams.TlsMaximumProtocolVersion = maxVersion + } + } + if len(cipherSuites) != 0 { + tlsParams.CipherSuites = types.MarshalEnvoyTLSCipherSuiteStrings(cipherSuites) + } + + return &tlsParams +} + +var envoyTLSVersions = map[types.TLSVersion]envoy_tls_v3.TlsParameters_TlsProtocol{ + types.TLSVersionAuto: envoy_tls_v3.TlsParameters_TLS_AUTO, + types.TLSv1_0: envoy_tls_v3.TlsParameters_TLSv1_0, + types.TLSv1_1: envoy_tls_v3.TlsParameters_TLSv1_1, + types.TLSv1_2: envoy_tls_v3.TlsParameters_TLSv1_2, + types.TLSv1_3: envoy_tls_v3.TlsParameters_TLSv1_3, +} diff --git a/agent/xds/listeners_ingress.go b/agent/xds/listeners_ingress.go index cd2023d2b..5261bdb50 100644 --- a/agent/xds/listeners_ingress.go +++ b/agent/xds/listeners_ingress.go @@ -214,23 +214,8 @@ func resolveListenerTLSConfig(gatewayTLSCfg *structs.GatewayTLSConfig, listenerC } } - var TLSVersionsWithConfigurableCipherSuites = map[types.TLSVersion]struct{}{ - // Remove these two if Envoy ever sets TLS 1.3 as default minimum - types.TLSVersionUnspecified: {}, - types.TLSVersionAuto: {}, - - types.TLSv1_0: {}, - types.TLSv1_1: {}, - types.TLSv1_2: {}, - } - - // Validate. Configuring cipher suites is only applicable to connections negotiated - // via TLS 1.2 or earlier. Other cases shouldn't be possible as we validate them at - // input but be resilient to bugs later. - if len(mergedCfg.CipherSuites) != 0 { - if _, ok := TLSVersionsWithConfigurableCipherSuites[mergedCfg.TLSMinVersion]; !ok { - return nil, fmt.Errorf("configuring CipherSuites is only applicable to connections negotiated with TLS 1.2 or earlier, TLSMinVersion is set to %s in listener or gateway config", mergedCfg.TLSMinVersion) - } + if err := validateListenerTLSConfig(mergedCfg.TLSMinVersion, mergedCfg.CipherSuites); err != nil { + return nil, err } return &mergedCfg, nil @@ -365,32 +350,8 @@ func makeSDSOverrideFilterChains(cfgSnap *proxycfg.ConfigSnapshot, return chains, nil } -var envoyTLSVersions = map[types.TLSVersion]envoy_tls_v3.TlsParameters_TlsProtocol{ - types.TLSVersionAuto: envoy_tls_v3.TlsParameters_TLS_AUTO, - types.TLSv1_0: envoy_tls_v3.TlsParameters_TLSv1_0, - types.TLSv1_1: envoy_tls_v3.TlsParameters_TLSv1_1, - types.TLSv1_2: envoy_tls_v3.TlsParameters_TLSv1_2, - types.TLSv1_3: envoy_tls_v3.TlsParameters_TLSv1_3, -} - func makeTLSParametersFromGatewayTLSConfig(tlsCfg structs.GatewayTLSConfig) *envoy_tls_v3.TlsParameters { - tlsParams := envoy_tls_v3.TlsParameters{} - - if tlsCfg.TLSMinVersion != types.TLSVersionUnspecified { - if minVersion, ok := envoyTLSVersions[tlsCfg.TLSMinVersion]; ok { - tlsParams.TlsMinimumProtocolVersion = minVersion - } - } - if tlsCfg.TLSMaxVersion != types.TLSVersionUnspecified { - if maxVersion, ok := envoyTLSVersions[tlsCfg.TLSMaxVersion]; ok { - tlsParams.TlsMaximumProtocolVersion = maxVersion - } - } - if len(tlsCfg.CipherSuites) != 0 { - tlsParams.CipherSuites = types.MarshalEnvoyTLSCipherSuiteStrings(tlsCfg.CipherSuites) - } - - return &tlsParams + return makeTLSParametersFromTLSConfig(tlsCfg.TLSMinVersion, tlsCfg.TLSMaxVersion, tlsCfg.CipherSuites) } func makeCommonTLSContextFromGatewayTLSConfig(tlsCfg structs.GatewayTLSConfig) *envoy_tls_v3.CommonTlsContext { diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index 71c5a09fe..d80cde7b1 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -43,6 +43,85 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshot(t, nil, nil) }, }, + { + name: "connect-proxy-with-tls-outgoing-min-version-auto", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Outgoing: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSVersionAuto, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-incoming-min-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_3, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-incoming-max-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + TLSMaxVersion: types.TLSv1_2, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "connect-proxy-with-tls-incoming-cipher-suites", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + }, + }, + }, + }) + }, + }, { name: "listener-bind-address", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -477,6 +556,66 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, nil) }, }, + { + name: "terminating-gateway-with-tls-incoming-min-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + TLSMinVersion: types.TLSv1_3, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "terminating-gateway-with-tls-incoming-max-version", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + TLSMaxVersion: types.TLSv1_2, + }, + }, + }, + }, + }, + }) + }, + }, + { + name: "terminating-gateway-with-tls-incoming-cipher-suites", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, []cache.UpdateEvent{ + { + CorrelationID: "mesh", + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TLS: &structs.MeshTLSConfig{ + Incoming: &structs.MeshDirectionalTLSConfig{ + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + }, + }, + }, + }) + }, + }, { name: "terminating-gateway-no-services", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden new file mode 100644 index 000000000..3efce306b --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden @@ -0,0 +1,151 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-max-version.envoy-1-20-x.golden b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-max-version.envoy-1-20-x.golden new file mode 100644 index 000000000..afef227a2 --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-max-version.envoy-1-20-x.golden @@ -0,0 +1,145 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden new file mode 100644 index 000000000..2e3c9ea20 --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden @@ -0,0 +1,145 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version.envoy-1-20-x.golden b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version.envoy-1-20-x.golden new file mode 100644 index 000000000..9d8c283a4 --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-tls-outgoing-min-version.envoy-1-20-x.golden @@ -0,0 +1,145 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden new file mode 100644 index 000000000..6a64ad79b --- /dev/null +++ b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-cipher-suites.envoy-1-20-x.golden @@ -0,0 +1,68 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-max-version.envoy-1-20-x.golden b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-max-version.envoy-1-20-x.golden new file mode 100644 index 000000000..eb24656eb --- /dev/null +++ b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-max-version.envoy-1-20-x.golden @@ -0,0 +1,65 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-min-version.envoy-1-20-x.golden b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-min-version.envoy-1-20-x.golden new file mode 100644 index 000000000..1c29f4d25 --- /dev/null +++ b/agent/xds/testdata/clusters/ingress-gateway-with-tls-outgoing-min-version.envoy-1-20-x.golden @@ -0,0 +1,65 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-cipher-suites.envoy-1-20-x.golden b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-cipher-suites.envoy-1-20-x.golden new file mode 100644 index 000000000..05ed17d4e --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-cipher-suites.envoy-1-20-x.golden @@ -0,0 +1,122 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-max-version.envoy-1-20-x.golden b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-max-version.envoy-1-20-x.golden new file mode 100644 index 000000000..8f157291c --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-max-version.envoy-1-20-x.golden @@ -0,0 +1,119 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-min-version.envoy-1-20-x.golden b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-min-version.envoy-1-20-x.golden new file mode 100644 index 000000000..1081c4431 --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-with-tls-incoming-min-version.envoy-1-20-x.golden @@ -0,0 +1,119 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden b/agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden new file mode 100644 index 000000000..57d50f71c --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-min-version-auto.envoy-1-20-x.golden @@ -0,0 +1,119 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.envoy-1-20-x.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.envoy-1-20-x.golden new file mode 100644 index 000000000..7b8a7eb8f --- /dev/null +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.envoy-1-20-x.golden @@ -0,0 +1,268 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.web.default.default.dc1", + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster" + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector" + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.envoy-1-20-x.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.envoy-1-20-x.golden new file mode 100644 index 000000000..433a49902 --- /dev/null +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.envoy-1-20-x.golden @@ -0,0 +1,256 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.web.default.default.dc1", + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster" + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector" + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.envoy-1-20-x.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.envoy-1-20-x.golden new file mode 100644 index 000000000..74a08b900 --- /dev/null +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.envoy-1-20-x.golden @@ -0,0 +1,256 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.web.default.default.dc1", + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster" + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector" + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/api/config_entry.go b/api/config_entry.go index 91c407bb5..ace5894cb 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -244,9 +244,10 @@ type ProxyConfigEntry struct { Config map[string]interface{} `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (p *ProxyConfigEntry) GetKind() string { return p.Kind } diff --git a/api/config_entry_mesh.go b/api/config_entry_mesh.go index f58fabc17..30fab166c 100644 --- a/api/config_entry_mesh.go +++ b/api/config_entry_mesh.go @@ -17,6 +17,8 @@ type MeshConfigEntry struct { // in transparent mode. TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + TLS *MeshTLSConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` // CreateIndex is the Raft index this entry was created at. This is a @@ -33,6 +35,17 @@ type TransparentProxyMeshConfig struct { MeshDestinationsOnly bool `alias:"mesh_destinations_only"` } +type MeshTLSConfig struct { + Incoming *MeshDirectionalTLSConfig `json:",omitempty"` + Outgoing *MeshDirectionalTLSConfig `json:",omitempty"` +} + +type MeshDirectionalTLSConfig struct { + TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` +} + func (e *MeshConfigEntry) GetKind() string { return MeshConfig } func (e *MeshConfigEntry) GetName() string { return MeshConfigMesh } func (e *MeshConfigEntry) GetPartition() string { return e.Partition } diff --git a/api/config_entry_test.go b/api/config_entry_test.go index f072bea91..0f38f62cd 100644 --- a/api/config_entry_test.go +++ b/api/config_entry_test.go @@ -1260,6 +1260,24 @@ func TestDecodeConfigEntry(t *testing.T) { }, "TransparentProxy": { "MeshDestinationsOnly": true + }, + "TLS": { + "Incoming": { + "TLSMinVersion": "TLSv1_1", + "TLSMaxVersion": "TLSv1_2", + "CipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + }, + "Outgoing": { + "TLSMinVersion": "TLSv1_1", + "TLSMaxVersion": "TLSv1_2", + "CipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } } } `, @@ -1271,6 +1289,24 @@ func TestDecodeConfigEntry(t *testing.T) { TransparentProxy: TransparentProxyMeshConfig{ MeshDestinationsOnly: true, }, + TLS: &MeshTLSConfig{ + Incoming: &MeshDirectionalTLSConfig{ + TLSMinVersion: "TLSv1_1", + TLSMaxVersion: "TLSv1_2", + CipherSuites: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + }, + }, + Outgoing: &MeshDirectionalTLSConfig{ + TLSMinVersion: "TLSv1_1", + TLSMaxVersion: "TLSv1_2", + CipherSuites: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + }, + }, + }, }, }, } { diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index 3128233fe..a55253d48 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -2737,6 +2737,24 @@ func TestParseConfigEntry(t *testing.T) { transparent_proxy { mesh_destinations_only = true } + tls { + incoming { + tls_min_version = "TLSv1_1" + tls_max_version = "TLSv1_2" + cipher_suites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + outgoing { + tls_min_version = "TLSv1_1" + tls_max_version = "TLSv1_2" + cipher_suites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + } `, camel: ` Kind = "mesh" @@ -2747,6 +2765,24 @@ func TestParseConfigEntry(t *testing.T) { TransparentProxy { MeshDestinationsOnly = true } + TLS { + Incoming { + TLSMinVersion = "TLSv1_1" + TLSMaxVersion = "TLSv1_2" + CipherSuites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + Outgoing { + TLSMinVersion = "TLSv1_1" + TLSMaxVersion = "TLSv1_2" + CipherSuites = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } + } `, snakeJSON: ` { @@ -2757,6 +2793,24 @@ func TestParseConfigEntry(t *testing.T) { }, "transparent_proxy": { "mesh_destinations_only": true + }, + "tls": { + "incoming": { + "tls_min_version": "TLSv1_1", + "tls_max_version": "TLSv1_2", + "cipher_suites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + }, + "outgoing": { + "tls_min_version": "TLSv1_1", + "tls_max_version": "TLSv1_2", + "cipher_suites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } } } `, @@ -2769,6 +2823,24 @@ func TestParseConfigEntry(t *testing.T) { }, "TransparentProxy": { "MeshDestinationsOnly": true + }, + "TLS": { + "Incoming": { + "TLSMinVersion": "TLSv1_1", + "TLSMaxVersion": "TLSv1_2", + "CipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + }, + "Outgoing": { + "TLSMinVersion": "TLSv1_1", + "TLSMaxVersion": "TLSv1_2", + "CipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ] + } } } `, @@ -2780,6 +2852,24 @@ func TestParseConfigEntry(t *testing.T) { TransparentProxy: api.TransparentProxyMeshConfig{ MeshDestinationsOnly: true, }, + TLS: &api.MeshTLSConfig{ + Incoming: &api.MeshDirectionalTLSConfig{ + TLSMinVersion: "TLSv1_1", + TLSMaxVersion: "TLSv1_2", + CipherSuites: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + }, + }, + Outgoing: &api.MeshDirectionalTLSConfig{ + TLSMinVersion: "TLSv1_1", + TLSMaxVersion: "TLSv1_2", + CipherSuites: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + }, + }, + }, }, }, { diff --git a/website/content/docs/connect/config-entries/mesh.mdx b/website/content/docs/connect/config-entries/mesh.mdx index 7a633e056..a72a54aa0 100644 --- a/website/content/docs/connect/config-entries/mesh.mdx +++ b/website/content/docs/connect/config-entries/mesh.mdx @@ -17,6 +17,99 @@ Settings in this config entry apply across all namespaces and federated datacent ## Sample Configuration Entries +### Mesh-wide TLS Min Version + +Enforce that service mesh mTLS traffic uses TLS v1.2 or newer. + + + + + + +```hcl +Kind = "mesh" +TLS { + Incoming { + TLSMinVersion = "TLSv1_2" + } +} +``` + +```yaml +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: mesh +spec: + tls: + incoming: + tlsMinVersion: TLSv1_2 +``` + +```json +{ + "Kind": "mesh", + "TLS": { + "Incoming": { + "TLSMinVersion": "TLSv1_2" + } + } +} +``` + + + + + + +The `mesh` configuration entry can only be created in the `default` namespace and will apply to proxies across **all** namespaces. + + + +```hcl +Kind = "mesh" +Namespace = "default" # Can only be set to "default". +Partition = "default" + +TLS { + Incoming { + TLSMinVersion = "TLSv1_2" + } +} +``` + +```yaml +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: mesh + namespace: default +spec: + tls: + incoming: + tlsMinVersion: TLSv1_2 +``` + +```json +{ + "Kind": "mesh", + "Namespace": "default", + "Partition": "default", + "TLS": { + "Incoming": { + "TLSMinVersion": "TLSv1_2" + } + } +} +``` + + + + + + +Note that the Kubernetes example does not include a `partition` field. Configuration entries are applied on Kubernetes using [custom resource definitions (CRD)](/docs/k8s/crds), which can only be scoped to their own partition. + ### Mesh Destinations Only Only allow transparent proxies to dial addresses in the mesh. @@ -171,6 +264,101 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, ], }, + { + name: 'TLS', + type: 'TLSConfig: ', + description: 'TLS configuration for the service mesh.', + children: [ + { + name: 'Incoming', + yaml: false, + type: 'TLSDirectionConfig: ', + description: `TLS configuration for inbound mTLS connections targeting + the public listener on \`connect-proxy\` and \`terminating-gateway\` + proxy kinds.`, + children: [ + { + name: 'TLSMinVersion', + yaml: false, + type: 'string: ""', + description: + "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", + }, + { + name: 'TLSMaxVersion', + yaml: false, + type: 'string: ""', + description: { + hcl: + "Set the default maximum TLS version supported. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.3 as a max version for incoming connections.", + yaml: + "Set the default maximum TLS version supported. Must be greater than or equal to `tls_min_version`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.3 as a max version for incoming connections.", + }, + }, + { + name: 'CipherSuites', + yaml: false, + type: 'array: ', + description: `Set the default list of TLS cipher suites + to support when negotiating connections using + TLS 1.2 or earlier. If unspecified, Envoy will use a + [default server cipher list](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites). + The list of supported cipher suites can seen in + [\`consul/types/tls.go\`](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) + and is dependent on underlying support in Envoy. Future + releases of Envoy may remove currently-supported but + insecure cipher suites, and future releases of Consul + may add new supported cipher suites if any are added to + Envoy.`, + }, + ], + }, + { + name: 'Outgoing', + yaml: false, + type: 'TLSDirectionConfig: ', + description: `TLS configuration for outbound mTLS connections dialing upstreams + from \`connect-proxy\` and \`ingress-gateway\` + proxy kinds.`, + children: [ + { + name: 'TLSMinVersion', + yaml: false, + type: 'string: ""', + description: + "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", + }, + { + name: 'TLSMaxVersion', + yaml: false, + type: 'string: ""', + description: { + hcl: + "Set the default maximum TLS version supported. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.2 as a max version for outgoing connections, but future Envoy releases [may change this to TLS 1.3](https://github.com/envoyproxy/envoy/issues/9300).", + yaml: + "Set the default maximum TLS version supported. Must be greater than or equal to `tls_min_version`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.2 as a max version for outgoing connections, but future Envoy releases [may change this to TLS 1.3](https://github.com/envoyproxy/envoy/issues/9300).", + }, + }, + { + name: 'CipherSuites', + yaml: false, + type: 'array: ', + description: `Set the default list of TLS cipher suites + to support when negotiating connections using + TLS 1.2 or earlier. If unspecified, Envoy will use a + [default server cipher list](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites). + The list of supported cipher suites can seen in + [\`consul/types/tls.go\`](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) + and is dependent on underlying support in Envoy. Future + releases of Envoy may remove currently-supported but + insecure cipher suites, and future releases of Consul + may add new supported cipher suites if any are added to + Envoy.`, + }, + ], + }, + ], + }, ]} /> From 3b6fd762ae9b6a954438b252e5f6022208b7fbee Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Wed, 30 Mar 2022 16:05:00 -0400 Subject: [PATCH 048/785] website(api-gateway): add common errors page (#12643) * Adding common errors page for API Gateway * website(api-gateway): add missing CRDs common error message * Update website/content/docs/api-gateway/common-errors.mdx Co-authored-by: Nathan Coleman * Update website/content/docs/api-gateway/common-errors.mdx Co-authored-by: Nathan Coleman * Update website/content/docs/api-gateway/common-errors.mdx Co-authored-by: Nathan Coleman * Update website/content/docs/api-gateway/common-errors.mdx * Additional page editing instructions and formating * Update website/content/docs/api-gateway/common-errors.mdx * Update website/content/docs/api-gateway/common-errors.mdx * Update website/content/docs/api-gateway/common-errors.mdx * Update website/content/docs/api-gateway/common-errors.mdx * Update website/content/docs/api-gateway/common-errors.mdx Co-authored-by: mrspanishviking * Apply suggestions from code review Co-authored-by: Jeff-Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Nathan Coleman Co-authored-by: mrspanishviking --- .../docs/api-gateway/common-errors.mdx | 67 +++++++++++++++++++ website/data/docs-nav-data.json | 4 ++ 2 files changed, 71 insertions(+) create mode 100644 website/content/docs/api-gateway/common-errors.mdx diff --git a/website/content/docs/api-gateway/common-errors.mdx b/website/content/docs/api-gateway/common-errors.mdx new file mode 100644 index 000000000..d3ba51dca --- /dev/null +++ b/website/content/docs/api-gateway/common-errors.mdx @@ -0,0 +1,67 @@ +--- +layout: docs +page_title: Common Error Messages +--- + +# Common Error Messages + +Some of the errors messages commonly encountered during installation and operations of Consul API Gateway are listed below, along with suggested methods for resolving them. + +If the error message is not listed on this page, it may be listed on the main [Consul Common errors][consul-common-errors] page. If the error message is not listed on that page either, please consider following our general [Troubleshooting Guide][troubleshooting] or reach out to us in [Discuss](https://discuss.hashicorp.com/). + + + +### Helm installation failed: "no matches for kind" + +``` +Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: [unable to recognize "": no matches for kind "GatewayClass" in version "gateway.networking.k8s.io/v1alpha2", unable to recognize "": no matches for kind "GatewayClassConfig" in version "api-gateway.consul.hashicorp.com/v1alpha1"] +``` +**Conditions:** +When this error occurs during the process of installing Consul API Gateway, it is usually caused by not having the required CRD files installed in Kubernetes prior to installing Consul API Gateway. + +**Impact:** +The installation process will typically fail after this error message is generated + +**Recommended Action:** +Install the required CRDs by using the command in Step 1 of the [Consul API Gateway installation instructions][install-instructions] and then retry installing Consul API Gateway. + + + + + +[consul-common-errors]: /docs/troubleshoot/common-errors +[troubleshooting]: https://learn.hashicorp.com/consul/day-2-operations/advanced-operations/troubleshooting +[install-instructions]: /docs/api-gateway/api-gateway-usage#installation \ No newline at end of file diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 2f5934a12..f6fc6bbd1 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -388,6 +388,10 @@ { "title": "Technical Specifications", "path": "api-gateway/tech-specs" + }, + { + "title": "Common Errors", + "path": "api-gateway/common-errors" } ] }, From 04ec4c2aa43d8f143c29773a2b99184da1729154 Mon Sep 17 00:00:00 2001 From: Bryce Kalow Date: Wed, 30 Mar 2022 16:16:26 -0500 Subject: [PATCH 049/785] website: redirect /api to /api-docs (#12660) --- website/content/api-docs/acl/auth-methods.mdx | 38 ++++---- .../content/api-docs/acl/binding-rules.mdx | 30 +++---- website/content/api-docs/acl/index.mdx | 60 ++++++------- website/content/api-docs/acl/legacy.mdx | 38 ++++---- website/content/api-docs/acl/policies.mdx | 38 ++++---- website/content/api-docs/acl/roles.mdx | 36 ++++---- website/content/api-docs/acl/tokens.mdx | 44 +++++----- website/content/api-docs/admin-partitions.mdx | 30 +++---- website/content/api-docs/agent/check.mdx | 42 ++++----- website/content/api-docs/agent/connect.mdx | 26 +++--- website/content/api-docs/agent/index.mdx | 66 +++++++------- website/content/api-docs/agent/service.mdx | 48 +++++----- website/content/api-docs/catalog.mdx | 66 +++++++------- website/content/api-docs/config.mdx | 24 ++--- website/content/api-docs/connect/ca.mdx | 18 ++-- website/content/api-docs/connect/index.mdx | 2 +- .../content/api-docs/connect/intentions.mdx | 62 ++++++------- website/content/api-docs/coordinate.mdx | 24 ++--- website/content/api-docs/discovery-chain.mdx | 6 +- website/content/api-docs/event.mdx | 12 +-- .../content/api-docs/features/consistency.mdx | 2 +- .../content/api-docs/features/filtering.mdx | 2 +- website/content/api-docs/health.mdx | 30 +++---- website/content/api-docs/kv.mdx | 22 ++--- website/content/api-docs/namespaces.mdx | 30 +++---- website/content/api-docs/operator/area.mdx | 42 ++++----- .../content/api-docs/operator/autopilot.mdx | 24 ++--- website/content/api-docs/operator/keyring.mdx | 24 ++--- website/content/api-docs/operator/license.mdx | 18 ++-- website/content/api-docs/operator/raft.mdx | 14 +-- website/content/api-docs/operator/segment.mdx | 6 +- website/content/api-docs/query.mdx | 42 ++++----- website/content/api-docs/session.mdx | 36 ++++---- website/content/api-docs/snapshot.mdx | 12 +-- website/content/api-docs/status.mdx | 12 +-- website/content/api-docs/txn.mdx | 12 +-- .../commands/acl/auth-method/create.mdx | 2 +- .../commands/acl/auth-method/delete.mdx | 2 +- .../commands/acl/auth-method/index.mdx | 2 +- .../content/commands/acl/auth-method/list.mdx | 2 +- .../content/commands/acl/auth-method/read.mdx | 2 +- .../commands/acl/auth-method/update.mdx | 2 +- .../commands/acl/binding-rule/create.mdx | 2 +- .../commands/acl/binding-rule/delete.mdx | 2 +- .../commands/acl/binding-rule/index.mdx | 2 +- .../commands/acl/binding-rule/list.mdx | 2 +- .../commands/acl/binding-rule/read.mdx | 2 +- .../commands/acl/binding-rule/update.mdx | 2 +- website/content/commands/acl/bootstrap.mdx | 2 +- .../content/commands/acl/policy/create.mdx | 2 +- .../content/commands/acl/policy/delete.mdx | 2 +- website/content/commands/acl/policy/index.mdx | 2 +- website/content/commands/acl/policy/list.mdx | 2 +- website/content/commands/acl/policy/read.mdx | 2 +- .../content/commands/acl/policy/update.mdx | 2 +- website/content/commands/acl/role/create.mdx | 2 +- website/content/commands/acl/role/delete.mdx | 2 +- website/content/commands/acl/role/index.mdx | 2 +- website/content/commands/acl/role/list.mdx | 2 +- website/content/commands/acl/role/read.mdx | 2 +- website/content/commands/acl/role/update.mdx | 2 +- .../content/commands/acl/set-agent-token.mdx | 2 +- website/content/commands/acl/token/clone.mdx | 2 +- website/content/commands/acl/token/create.mdx | 2 +- website/content/commands/acl/token/delete.mdx | 2 +- website/content/commands/acl/token/index.mdx | 2 +- website/content/commands/acl/token/list.mdx | 2 +- website/content/commands/acl/token/read.mdx | 2 +- website/content/commands/acl/token/update.mdx | 2 +- .../content/commands/acl/translate-rules.mdx | 2 +- .../content/commands/catalog/datacenters.mdx | 2 +- website/content/commands/catalog/index.mdx | 2 +- website/content/commands/catalog/nodes.mdx | 4 +- website/content/commands/catalog/services.mdx | 2 +- website/content/commands/config/delete.mdx | 2 +- website/content/commands/config/list.mdx | 2 +- website/content/commands/config/read.mdx | 2 +- website/content/commands/config/write.mdx | 2 +- website/content/commands/connect/ca.mdx | 6 +- website/content/commands/event.mdx | 2 +- website/content/commands/force-leave.mdx | 2 +- website/content/commands/intention/check.mdx | 2 +- website/content/commands/intention/create.mdx | 2 +- website/content/commands/intention/delete.mdx | 2 +- website/content/commands/intention/get.mdx | 2 +- website/content/commands/intention/index.mdx | 2 +- website/content/commands/intention/list.mdx | 2 +- website/content/commands/intention/match.mdx | 2 +- website/content/commands/join.mdx | 2 +- website/content/commands/keyring.mdx | 2 +- website/content/commands/kv/delete.mdx | 2 +- website/content/commands/kv/export.mdx | 2 +- website/content/commands/kv/get.mdx | 2 +- website/content/commands/kv/import.mdx | 2 +- website/content/commands/kv/index.mdx | 2 +- website/content/commands/kv/put.mdx | 2 +- website/content/commands/leave.mdx | 2 +- website/content/commands/license.mdx | 6 +- website/content/commands/login.mdx | 2 +- website/content/commands/logout.mdx | 2 +- website/content/commands/maint.mdx | 2 +- website/content/commands/members.mdx | 2 +- website/content/commands/namespace/create.mdx | 2 +- website/content/commands/namespace/delete.mdx | 2 +- website/content/commands/namespace/list.mdx | 2 +- website/content/commands/namespace/read.mdx | 2 +- website/content/commands/namespace/update.mdx | 2 +- website/content/commands/namespace/write.mdx | 2 +- website/content/commands/operator/area.mdx | 12 +-- .../content/commands/operator/autopilot.mdx | 6 +- website/content/commands/operator/index.mdx | 2 +- website/content/commands/operator/raft.mdx | 4 +- website/content/commands/reload.mdx | 2 +- website/content/commands/rtt.mdx | 2 +- .../content/commands/services/deregister.mdx | 2 +- .../content/commands/services/register.mdx | 6 +- website/content/commands/snapshot/agent.mdx | 6 +- website/content/commands/snapshot/index.mdx | 2 +- website/content/commands/snapshot/inspect.mdx | 2 +- website/content/commands/snapshot/restore.mdx | 4 +- website/content/commands/snapshot/save.mdx | 4 +- website/content/docs/agent/config-entries.mdx | 2 +- website/content/docs/agent/options.mdx | 40 ++++----- website/content/docs/agent/sentinel.mdx | 2 +- website/content/docs/agent/telemetry.mdx | 2 +- .../content/docs/architecture/consensus.mdx | 4 +- .../content/docs/architecture/coordinates.mdx | 10 +-- website/content/docs/connect/ca/consul.mdx | 6 +- website/content/docs/connect/ca/index.mdx | 12 +-- .../config-entries/ingress-gateway.mdx | 4 +- .../config-entries/service-resolver.mdx | 4 +- .../config-entries/terminating-gateway.mdx | 4 +- .../docs/connect/connect-internals.mdx | 10 +-- .../docs/connect/gateways/ingress-gateway.mdx | 6 +- .../wan-federation-via-mesh-gateways.mdx | 2 +- .../connect/gateways/terminating-gateway.mdx | 10 +-- .../docs/connect/intentions-legacy.mdx | 2 +- website/content/docs/connect/intentions.mdx | 2 +- .../connect/l7-traffic/discovery-chain.mdx | 8 +- website/content/docs/connect/native/go.mdx | 2 +- website/content/docs/connect/native/index.mdx | 26 +++--- .../docs/connect/proxies/integrate.mdx | 18 ++-- .../connect/proxies/managed-deprecated.mdx | 2 +- website/content/docs/discovery/checks.mdx | 6 +- website/content/docs/discovery/dns.mdx | 10 +-- .../content/docs/dynamic-app-config/kv.mdx | 2 +- .../content/docs/enterprise/namespaces.mdx | 2 +- website/content/docs/install/performance.mdx | 4 +- .../docs/k8s/connect/connect-ca-provider.mdx | 4 +- .../content/docs/k8s/installation/install.mdx | 2 +- .../docs/nia/installation/requirements.mdx | 2 +- .../acl/acl-federated-datacenters.mdx | 2 +- .../content/docs/security/acl/acl-legacy.mdx | 88 +++++++++---------- .../docs/security/acl/acl-migrate-tokens.mdx | 4 +- .../docs/security/acl/acl-policies.mdx | 4 +- .../content/docs/security/acl/acl-roles.mdx | 4 +- .../content/docs/security/acl/acl-rules.mdx | 56 ++++++------ .../content/docs/security/acl/acl-tokens.mdx | 8 +- .../docs/security/acl/auth-methods/index.mdx | 6 +- .../docs/security/acl/auth-methods/jwt.mdx | 2 +- .../security/acl/auth-methods/kubernetes.mdx | 14 +-- .../docs/security/acl/auth-methods/oidc.mdx | 2 +- website/content/docs/security/acl/index.mdx | 4 +- .../docs/upgrading/upgrade-specific.mdx | 10 +-- website/next.config.js | 6 -- website/redirects.next.js | 9 +- 166 files changed, 837 insertions(+), 838 deletions(-) diff --git a/website/content/api-docs/acl/auth-methods.mdx b/website/content/api-docs/acl/auth-methods.mdx index 2e662213b..d5fa84d86 100644 --- a/website/content/api-docs/acl/auth-methods.mdx +++ b/website/content/api-docs/acl/auth-methods.mdx @@ -25,9 +25,9 @@ This endpoint creates a new ACL auth method. | `PUT` | `/acl/auth-method` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -54,7 +54,7 @@ The corresponding CLI command is [`consul acl auth-method create`](/commands/acl - `MaxTokenTTL` `(duration: 0s)` - This specifies the maximum life of any token created by this auth method. When set it will initialize the - [`ExpirationTime`](/api/acl/tokens#expirationtime) field on all tokens + [`ExpirationTime`](/api-docs/acl/tokens#expirationtime) field on all tokens to a value of `Token.CreateTime + AuthMethod.MaxTokenTTL`. This field is not persisted beyond its initial use. Can be specified in the form of `"60s"` or `"5m"` (i.e., 60 seconds or 5 minutes, respectively). This value must be no @@ -153,9 +153,9 @@ auth method exists with the given name, a 404 is returned instead of a | `GET` | `/acl/auth-method/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -207,9 +207,9 @@ This endpoint updates an existing ACL auth method. | `PUT` | `/acl/auth-method/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -237,7 +237,7 @@ The corresponding CLI command is [`consul acl auth-method update`](/commands/acl - `MaxTokenTTL` `(duration: 0s)` - This specifies the maximum life of any token created by this auth method. When set it will initialize the - [`ExpirationTime`](/api/acl/tokens#expirationtime) field on all tokens + [`ExpirationTime`](/api-docs/acl/tokens#expirationtime) field on all tokens to a value of `Token.CreateTime + AuthMethod.MaxTokenTTL`. This field is not persisted beyond its initial use. Can be specified in the form of `"60s"` or `"5m"` (i.e., 60 seconds or 5 minutes, respectively). This value must be no @@ -329,8 +329,8 @@ $ curl --request PUT \ This endpoint deletes an ACL auth method. ~> Deleting an auth method will also immediately delete all associated -[binding rules](/api/acl/binding-rules) as well as any -outstanding [tokens](/api/acl/tokens) created from this auth method. +[binding rules](/api-docs/acl/binding-rules) as well as any +outstanding [tokens](/api-docs/acl/tokens) created from this auth method. | Method | Path | Produces | | -------- | ------------------------ | ------------------ | @@ -340,9 +340,9 @@ Even though the return type is application/json, the value is either true or false indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -384,9 +384,9 @@ This endpoint lists all the ACL auth methods. | `GET` | `/acl/auth-methods` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/binding-rules.mdx b/website/content/api-docs/acl/binding-rules.mdx index e979d3440..3a17106bd 100644 --- a/website/content/api-docs/acl/binding-rules.mdx +++ b/website/content/api-docs/acl/binding-rules.mdx @@ -25,9 +25,9 @@ This endpoint creates a new ACL binding rule. | `PUT` | `/acl/binding-rule` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -151,9 +151,9 @@ response. | `GET` | `/acl/binding-rule/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -203,9 +203,9 @@ This endpoint updates an existing ACL binding rule. | `PUT` | `/acl/binding-rule/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -335,9 +335,9 @@ Even though the return type is application/json, the value is either true or false indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -379,9 +379,9 @@ This endpoint lists all the ACL binding rules. | `GET` | `/acl/binding-rules` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/index.mdx b/website/content/api-docs/acl/index.mdx index 3445d233d..081025ced 100644 --- a/website/content/api-docs/acl/index.mdx +++ b/website/content/api-docs/acl/index.mdx @@ -6,9 +6,9 @@ description: The /acl endpoints manage the Consul's ACL system. # ACL HTTP API --> **1.4.0+:** This API documentation is for Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api/acl/legacy). +-> **1.4.0+:** This API documentation is for Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api-docs/acl/legacy). -The `/acl` endpoints are used to manage ACL tokens and policies in Consul, [bootstrap the ACL system](#bootstrap-acls), [check ACL replication status](#check-acl-replication), and [translate rules](#translate-rules). There are additional pages for managing [tokens](/api/acl/tokens) and [policies](/api/acl/policies) with the `/acl` endpoints. +The `/acl` endpoints are used to manage ACL tokens and policies in Consul, [bootstrap the ACL system](#bootstrap-acls), [check ACL replication status](#check-acl-replication), and [translate rules](#translate-rules). There are additional pages for managing [tokens](/api-docs/acl/tokens) and [policies](/api-docs/acl/policies) with the `/acl` endpoints. For more information on how to setup ACLs, please check the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production). @@ -29,9 +29,9 @@ configuration files. | `PUT` | `/acl/bootstrap` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -96,9 +96,9 @@ for more details. | `GET` | `/acl/replication` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -157,17 +157,17 @@ $ curl \ - `ReplicatedIndex` - The last index that was successfully replicated. Which data the replicated index refers to depends on the replication type. For `legacy` replication this can be compared with the value of the `X-Consul-Index` header - returned by the [`/v1/acl/list`](/api/acl/legacy#list-acls) endpoint to + returned by the [`/v1/acl/list`](/api-docs/acl/legacy#list-acls) endpoint to determine if the replication process has gotten all available ACLs. When in either `tokens` or `policies` mode, this index can be compared with the value of the - `X-Consul-Index` header returned by the [`/v1/acl/policies`](/api/acl/policies#list-policies) + `X-Consul-Index` header returned by the [`/v1/acl/policies`](/api-docs/acl/policies#list-policies) endpoint to determine if the policy replication process has gotten all available ACL policies. Note that ACL replication is rate limited so the indexes may lag behind the primary datacenter. - `ReplicatedTokenIndex` - The last token index that was successfully replicated. This index can be compared with the value of the `X-Consul-Index` header returned - by the [`/v1/acl/tokens`](/api/acl/tokens#list-tokens) endpoint to determine + by the [`/v1/acl/tokens`](/api-docs/acl/tokens#list-tokens) endpoint to determine if the replication process has gotten all available ACL tokens. Note that ACL replication is rate limited so the indexes may lag behind the primary datacenter. @@ -199,9 +199,9 @@ migrations. | `POST` | `/acl/rules/translate` | `text/plain` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -241,16 +241,16 @@ This endpoint translates the legacy rules embedded within a legacy ACL into the syntax. It is intended to be used by operators managing Consul's ACLs and performing legacy token to new policy migrations. Note that this API requires the auto-generated Accessor ID of the legacy token. This ID can be retrieved using the -[`/v1/acl/token/self`](/api/acl/tokens#read-self-token) endpoint. +[`/v1/acl/token/self`](/api-docs/acl/tokens#read-self-token) endpoint. | Method | Path | Produces | | ------ | ----------------------------------- | ------------ | | `GET` | `/acl/rules/translate/:accessor_id` | `text/plain` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -284,9 +284,9 @@ Consul ACL token. | `POST` | `/acl/login` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -376,9 +376,9 @@ with the `X-Consul-Token` header or the `token` query parameter. | `POST` | `/acl/logout` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -414,9 +414,9 @@ URL from Consul to start an [OIDC login flow](/docs/security/acl/auth-methods/oi | `POST` | `/acl/oidc/auth-url` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -494,9 +494,9 @@ for a newly-created Consul ACL token. | `POST` | `/acl/oidc/callback` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/legacy.mdx b/website/content/api-docs/acl/legacy.mdx index ab3bbb70f..c465f6d8e 100644 --- a/website/content/api-docs/acl/legacy.mdx +++ b/website/content/api-docs/acl/legacy.mdx @@ -10,7 +10,7 @@ description: >- -> **The legacy ACL system was deprecated in Consul 1.4.0 and removed in Consul 1.11.0.** It's _strongly_ recommended you do not build anything using the legacy system and use -the new ACL [Token](/api/acl/tokens) and [Policy](/api/acl/policies) APIs instead. +the new ACL [Token](/api-docs/acl/tokens) and [Policy](/api-docs/acl/policies) APIs instead. The legacy `/acl` endpoints to create, update, destroy, and query legacy ACL tokens in Consul. @@ -25,9 +25,9 @@ This endpoint makes a new ACL token. | `PUT` | `/acl/create` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -84,9 +84,9 @@ generating a new token ID, the `ID` field must be provided. | `PUT` | `/acl/update` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -138,9 +138,9 @@ Even though the return type is application/json, the value is either true or false, indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -175,9 +175,9 @@ This endpoint reads an ACL token with the given ID. | `GET` | `/acl/info/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -224,9 +224,9 @@ complex rule management. | `PUT` | `/acl/clone/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -263,9 +263,9 @@ This endpoint lists all the active ACL tokens. | `GET` | `/acl/list` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/policies.mdx b/website/content/api-docs/acl/policies.mdx index f5ef0a38d..02db7380d 100644 --- a/website/content/api-docs/acl/policies.mdx +++ b/website/content/api-docs/acl/policies.mdx @@ -6,7 +6,7 @@ description: The /acl/policy endpoints manage Consul's ACL policies. # ACL Policy HTTP API --> **1.4.0+:** The APIs are available in Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api/acl/legacy). +-> **1.4.0+:** The APIs are available in Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api-docs/acl/legacy). The `/acl/policy` endpoints [create](#create-a-policy), [read](#read-a-policy), [update](#update-a-policy), [list](#list-policies) and @@ -24,9 +24,9 @@ This endpoint creates a new ACL policy. | `PUT` | `/acl/policy` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -99,9 +99,9 @@ This endpoint reads an ACL policy with the given ID. | `GET` | `/acl/policy/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -151,9 +151,9 @@ This endpoint reads an ACL policy with the given ID. | `GET` | `/acl/policy/name/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -203,9 +203,9 @@ This endpoint updates an existing ACL policy. | `PUT` | `/acl/policy/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -284,9 +284,9 @@ Even though the return type is application/json, the value is either true or false indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -328,9 +328,9 @@ This endpoint lists all the ACL policies. | `GET` | `/acl/policies` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/roles.mdx b/website/content/api-docs/acl/roles.mdx index 5b73c7325..f76ab9e31 100644 --- a/website/content/api-docs/acl/roles.mdx +++ b/website/content/api-docs/acl/roles.mdx @@ -23,9 +23,9 @@ This endpoint creates a new ACL role. | `PUT` | `/acl/role` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -165,9 +165,9 @@ given ID, a 404 is returned instead of a 200 response. | `GET` | `/acl/role/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -237,9 +237,9 @@ given name, a 404 is returned instead of a 200 response. | `GET` | `/acl/role/name/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -308,9 +308,9 @@ This endpoint updates an existing ACL role. | `PUT` | `/acl/role/:id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -426,9 +426,9 @@ Even though the return type is application/json, the value is either true or false indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -470,9 +470,9 @@ This endpoint lists all the ACL roles. | `GET` | `/acl/roles` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/acl/tokens.mdx b/website/content/api-docs/acl/tokens.mdx index 31456965d..b73acc50e 100644 --- a/website/content/api-docs/acl/tokens.mdx +++ b/website/content/api-docs/acl/tokens.mdx @@ -6,7 +6,7 @@ description: The /acl/token endpoints manage Consul's ACL Tokens. # ACL Token HTTP API --> **1.4.0+:** The APIs are available in Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api/acl/legacy). +-> **1.4.0+:** The APIs are available in Consul versions 1.4.0 and later. The documentation for the legacy ACL API is [here](/api-docs/acl/legacy). The `/acl/token` endpoints [create](#create-a-token), [read](#read-a-token), [update](#update-a-token), [list](#list-tokens), [clone](#clone-a-token) and [delete](#delete-a-token) ACL tokens in Consul. @@ -23,9 +23,9 @@ This endpoint creates a new ACL token. | `PUT` | `/acl/token` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -166,9 +166,9 @@ This endpoint reads an ACL token with the given Accessor ID. | `GET` | `/acl/token/:AccessorID` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -235,9 +235,9 @@ specified with the `X-Consul-Token` header or the `token` query parameter. | `GET` | `/acl/token/self` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -290,9 +290,9 @@ This endpoint updates an existing ACL token. | `PUT` | `/acl/token/:AccessorID` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -440,9 +440,9 @@ This endpoint clones an existing ACL token. | `PUT` | `/acl/token/:AccessorID/clone` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -521,9 +521,9 @@ Even though the return type is application/json, the value is either true or false, indicating whether the delete succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -565,9 +565,9 @@ This endpoint lists all the ACL tokens. | `GET` | `/acl/tokens` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/admin-partitions.mdx b/website/content/api-docs/admin-partitions.mdx index 061187d6b..422223b4a 100644 --- a/website/content/api-docs/admin-partitions.mdx +++ b/website/content/api-docs/admin-partitions.mdx @@ -20,9 +20,9 @@ This endpoint creates a new Partition. | `PUT` | `/partition` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -76,9 +76,9 @@ This endpoint reads a Partition with the given name. | `GET` | `/partition/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -121,9 +121,9 @@ This endpoint updates a Partition description. | `PUT` | `/partition/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -185,9 +185,9 @@ This endpoint will return no data. Success or failure is indicated by the status code returned. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -230,9 +230,9 @@ This endpoint lists all the Partitions. | `GET` | `/partitions` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/agent/check.mdx b/website/content/api-docs/agent/check.mdx index 706d07369..e0b69258f 100644 --- a/website/content/api-docs/agent/check.mdx +++ b/website/content/api-docs/agent/check.mdx @@ -28,9 +28,9 @@ everything will be in sync within a few seconds. | `GET` | `/agent/checks` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -109,9 +109,9 @@ check and keeping the Catalog in sync. | `PUT` | `/agent/check/register` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -300,9 +300,9 @@ not exist, no action is taken. | `PUT` | `/agent/check/deregister/:check_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -338,9 +338,9 @@ This endpoint is used with a TTL type check to set the status of the check to | `PUT` | `/agent/check/pass/:check_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -379,9 +379,9 @@ This endpoint is used with a TTL type check to set the status of the check to | `PUT` | `/agent/check/warn/:check_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -419,9 +419,9 @@ This endpoint is used with a TTL type check to set the status of the check to | `PUT` | `/agent/check/fail/:check_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -459,9 +459,9 @@ to reset the TTL clock. | `PUT` | `/agent/check/update/:check_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/agent/connect.mdx b/website/content/api-docs/agent/connect.mdx index 8b7da249d..0570070e5 100644 --- a/website/content/api-docs/agent/connect.mdx +++ b/website/content/api-docs/agent/connect.mdx @@ -11,7 +11,7 @@ description: >- The `/agent/connect` endpoints interact with [Connect](/docs/connect) with agent-local operations. -These endpoints may mirror the [non-agent Connect endpoints](/api/connect) +These endpoints may mirror the [non-agent Connect endpoints](/api-docs/connect) in some cases. Almost all agent-local Connect endpoints perform local caching to optimize performance of Connect without having to make requests to the server. @@ -22,7 +22,7 @@ defined as _deny_ intentions during evaluation, as this endpoint is only suited for networking layer 4 (e.g. TCP) integration. For performance and reliability reasons it is desirable to implement intention enforcement by listing [intentions that match the -destination](/api/connect/intentions#list-matching-intentions) and representing +destination](/api-docs/connect/intentions#list-matching-intentions) and representing them in the native configuration of the proxy itself (such as RBAC for Envoy). This endpoint tests whether a connection attempt is authorized between @@ -43,9 +43,9 @@ connection attempt. | `POST` | `/agent/connect/authorize` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -105,7 +105,7 @@ This is used by [proxies](/docs/connect/proxies) or [native integrations](/docs/connect/native) to verify served client or server certificates are valid. -This is equivalent to the [non-Agent Connect endpoint](/api/connect), +This is equivalent to the [non-Agent Connect endpoint](/api-docs/connect), but the response of this request is cached locally at the agent. This allows for very fast response times and for fail open behavior if the server is unavailable. This endpoint should be used by proxies and native integrations. @@ -115,9 +115,9 @@ unavailable. This endpoint should be used by proxies and native integrations. | `GET` | `/agent/connect/ca/roots` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -165,7 +165,7 @@ connections and is also used as the client certificate for establishing outbound connections to other services. The agent generates a CSR locally and calls the -[CA sign API](/api/connect/ca) to sign it. The resulting certificate +[CA sign API](/api-docs/connect/ca) to sign it. The resulting certificate is cached and returned by this API until it is near expiry or the root certificates change. @@ -183,9 +183,9 @@ wait for certificate rotations. | `GET` | `/agent/connect/ca/leaf/:service` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/agent/index.mdx b/website/content/api-docs/agent/index.mdx index 50c013ee0..8a7b4a693 100644 --- a/website/content/api-docs/agent/index.mdx +++ b/website/content/api-docs/agent/index.mdx @@ -31,9 +31,9 @@ GitHub issue to discuss your use case. | `GET` | `/agent/host` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -220,9 +220,9 @@ by agent. The strongly consistent view of nodes is instead provided by | `GET` | `/agent/members` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -288,9 +288,9 @@ to change without notice or deprecation. | `GET` | `/agent/self` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -368,9 +368,9 @@ section on the agent options page for details on which options are supported. | `PUT` | `/agent/reload` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -401,9 +401,9 @@ restart. | `PUT` | `/agent/maintenance` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -462,9 +462,9 @@ endpoint. | `GET` | `/agent/metrics?format=prometheus` | `text/plain; version=0.0.4; charset=utf-8` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -584,9 +584,9 @@ This endpoint streams logs from the local agent until the connection is closed. | `GET` | `/agent/monitor` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -628,9 +628,9 @@ This endpoint instructs the agent to attempt to connect to a given address. | `PUT` | `/agent/join/:address` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -670,9 +670,9 @@ can affect cluster availability. | `PUT` | `/agent/leave` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -709,9 +709,9 @@ the list of members entirely. | `PUT` | `/agent/force-leave/:node?prune` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -785,9 +785,9 @@ The paths above correspond to the token names as found in the agent configuratio [`acl_replication_token`](/docs/agent/options#acl_replication_token_legacy). The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/agent/service.mdx b/website/content/api-docs/agent/service.mdx index 489556919..9254bfd05 100644 --- a/website/content/api-docs/agent/service.mdx +++ b/website/content/api-docs/agent/service.mdx @@ -30,9 +30,9 @@ everything will be in sync within a few seconds. | `GET` | `/agent/services` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -147,9 +147,9 @@ everything will be in sync within a few seconds. | `GET` | `/agent/service/:service_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -235,7 +235,7 @@ $ curl \ The response has the same structure as the [service definition](/docs/discovery/services) with one extra field `ContentHash` which contains the [hash-based blocking -query](/api/features/blocking#hash-based-blocking-queries) hash for the result. The +query](/api-docs/features/blocking#hash-based-blocking-queries) hash for the result. The same hash is also present in `X-Consul-ContentHash`. ## Get local service health @@ -253,9 +253,9 @@ the URL or use Mime Content negotiation by specifying a HTTP Header | `GET` | `/agent/health/service/name/:service_name?format=text` | `text/plain` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -294,7 +294,7 @@ Those endpoints might be useful for the following use-cases: ##### Note If you know the ID of service you want to target, it is recommended to use -[`/v1/agent/health/service/id/:service_id`](/api/agent/service#get-local-service-health-by-id) +[`/v1/agent/health/service/id/:service_id`](/api-docs/agent/service#get-local-service-health-by-id) so you have the result for the service only. When requesting `/v1/agent/health/service/name/:service_name`, the caller will receive the worst state of all services having the given name. @@ -430,7 +430,7 @@ Retrieve the health state of a specific service on the local agent by ID. | `GET` | `/agent/health/service/id/:service_id?format=text` | `text/plain` | The supported request parameters are the same as -[`/v1/agent/health/service/name/:service_name`](/api/agent/service#get-local-service-health). +[`/v1/agent/health/service/name/:service_name`](/api-docs/agent/service#get-local-service-health). ### Sample Requests @@ -586,9 +586,9 @@ For "connect-proxy" kind services, the `service:write` ACL for the | `PUT` | `/agent/service/register` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -655,13 +655,13 @@ service definition keys for compatibility with the config file format. [Connect Structure](#connect-structure) section below for supported fields. - `Check` `(Check: nil)` - Specifies a check. Please see the - [check documentation](/api/agent/check) for more information about the + [check documentation](/api-docs/agent/check) for more information about the accepted fields. If you don't provide a name or id for the check then they will be generated. To provide a custom id and/or name set the `CheckID` and/or `Name` field. - `Checks` `(array: nil)` - Specifies a list of checks. Please see the - [check documentation](/api/agent/check) for more information about the + [check documentation](/api-docs/agent/check) for more information about the accepted fields. If you don't provide a name or id for the check then they will be generated. To provide a custom id and/or name set the `CheckID` and/or `Name` field. The automatically generated `Name` and `CheckID` depend @@ -671,7 +671,7 @@ service definition keys for compatibility with the config file format. - `EnableTagOverride` `(bool: false)` - Specifies to disable the anti-entropy feature for this service's tags. If `EnableTagOverride` is set to `true` then - external agents can update this service in the [catalog](/api/catalog) + external agents can update this service in the [catalog](/api-docs/catalog) and modify the tags. Subsequent local sync operations by this agent will ignore the updated tags. For instance, if an external agent modified both the tags and the port for this service and `EnableTagOverride` was set to `true` @@ -761,9 +761,9 @@ is an associated check, that is also deregistered. | `PUT` | `/agent/service/deregister/:service_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -803,9 +803,9 @@ will be automatically restored on agent restart. | `PUT` | `/agent/service/maintenance/:service_id` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/catalog.mdx b/website/content/api-docs/catalog.mdx index 64011347c..4b2e93033 100644 --- a/website/content/api-docs/catalog.mdx +++ b/website/content/api-docs/catalog.mdx @@ -16,7 +16,7 @@ API methods look similar. This endpoint is a low-level mechanism for registering or updating entries in the catalog. It is usually preferable to instead use the -[agent endpoints](/api/agent) for registration as they are simpler and +[agent endpoints](/api-docs/agent) for registration as they are simpler and perform [anti-entropy](/docs/architecture/anti-entropy). | Method | Path | Produces | @@ -24,9 +24,9 @@ perform [anti-entropy](/docs/architecture/anti-entropy). | `PUT` | `/catalog/register` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -57,7 +57,7 @@ The table below shows this endpoint's support for for service definition names for [compatibility with external DNS](/docs/discovery/services#service-and-tag-names-with-dns). The service `Tags`, `Address`, `Meta`, and `Port` fields are all optional. For more information about these fields and the implications of setting them, - see the [Service - Agent API](/api/agent/service) page + see the [Service - Agent API](/api-docs/agent/service) page as registering services differs between using this or the Services Agent endpoint. - `Check` `(Check: nil)` - Specifies to register a check. The register API @@ -167,7 +167,7 @@ $ curl \ This endpoint is a low-level mechanism for directly removing entries from the Catalog. It is usually preferable to instead use the -[agent endpoints](/api/agent) for deregistration as they are simpler and +[agent endpoints](/api-docs/agent) for deregistration as they are simpler and perform [anti-entropy](/docs/architecture/anti-entropy). | Method | Path | Produces | @@ -175,9 +175,9 @@ perform [anti-entropy](/docs/architecture/anti-entropy). | `PUT` | `/catalog/deregister` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -257,9 +257,9 @@ Consul servers are routable. | `GET` | `/catalog/datacenters` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -292,9 +292,9 @@ This endpoint and returns the nodes registered in a given datacenter. | `GET` | `/catalog/nodes` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -391,9 +391,9 @@ This endpoint returns the services registered in a given datacenter. | `GET` | `/catalog/services` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -449,9 +449,9 @@ This endpoint returns the nodes providing a service in a given datacenter. | `GET` | `/catalog/service/:service` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -592,7 +592,7 @@ $ curl \ service instance. This includes both the address as well as the port. - `ServiceKind` is the kind of service, usually "". See the Agent - [service registration API](/api/agent/service#kind) for more information. + [service registration API](/api-docs/agent/service#kind) for more information. - `ServiceProxy` is the proxy config as specified in [Connect Proxies](/docs/connect/proxies). @@ -664,7 +664,7 @@ so this endpoint may be used to filter only the Connect-capable endpoints. | `GET` | `/catalog/connect/:service` | `application/json` | Parameters and response format are the same as -[`/catalog/service/:service`](/api/catalog#list-nodes-for-service). +[`/catalog/service/:service`](/api-docs/catalog#list-nodes-for-service). ## Retrieve Map of Services for a Node @@ -677,9 +677,9 @@ This endpoint returns the node's registered services. | `GET` | `/catalog/node/:node` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -808,9 +808,9 @@ This endpoint returns the node's registered services. | `GET` | `/catalog/node-services/:node` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -944,9 +944,9 @@ This endpoint returns the services associated with an ingress gateway or termina | `GET` | `/catalog/gateway-services/:gateway` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -1057,7 +1057,7 @@ $ curl \ - `Service.Namespace` is the Consul Enterprise namespace of a service associated with the gateway - `GatewayKind` is the kind of service, will be one of "ingress-gateway" or "terminating-gateway". See the Agent - [service registration API](/api/agent/service#kind) for more information. + [service registration API](/api-docs/agent/service#kind) for more information. - `CAFile` is the path to a CA file the gateway will use for TLS origination to the associated service diff --git a/website/content/api-docs/config.mdx b/website/content/api-docs/config.mdx index 151af061f..27d665aa6 100644 --- a/website/content/api-docs/config.mdx +++ b/website/content/api-docs/config.mdx @@ -24,9 +24,9 @@ This endpoint creates or updates the given config entry. | `PUT` | `/config` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -95,9 +95,9 @@ This endpoint returns a specific config entry. | `GET` | `/config/:kind/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -168,9 +168,9 @@ This endpoint returns all config entries of the given kind. | `GET` | `/config/:kind` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -244,9 +244,9 @@ This endpoint deletes the given config entry. | `DELETE` | `/config/:kind/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/connect/ca.mdx b/website/content/api-docs/connect/ca.mdx index 90a49d8c6..86c729460 100644 --- a/website/content/api-docs/connect/ca.mdx +++ b/website/content/api-docs/connect/ca.mdx @@ -21,9 +21,9 @@ the cluster. | `GET` | `/connect/ca/roots` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -113,9 +113,9 @@ This endpoint returns the current CA configuration. | `GET` | `/connect/ca/configuration` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -158,9 +158,9 @@ new root certificate being used, the [Root Rotation](/docs/connect/ca#root-certi | `PUT` | `/connect/ca/configuration` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/connect/index.mdx b/website/content/api-docs/connect/index.mdx index 508c30b1d..4c5d209aa 100644 --- a/website/content/api-docs/connect/index.mdx +++ b/website/content/api-docs/connect/index.mdx @@ -13,7 +13,7 @@ The `/connect` endpoints provide access to intentions and the certificate authority. There are also Connect-related endpoints in the -[Agent](/api/agent) and [Catalog](/api/catalog) APIs. For example, +[Agent](/api-docs/agent) and [Catalog](/api-docs/catalog) APIs. For example, the API for requesting a TLS certificate for a service is part of the agent APIs. And the catalog API has an endpoint for finding all Connect-capable services in the catalog. diff --git a/website/content/api-docs/connect/intentions.mdx b/website/content/api-docs/connect/intentions.mdx index 2ffcc58bc..49b6ac660 100644 --- a/website/content/api-docs/connect/intentions.mdx +++ b/website/content/api-docs/connect/intentions.mdx @@ -36,9 +36,9 @@ be persisted using this endpoint and will require editing the enclosing | `PUT` | `/connect/intentions/exact` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -147,9 +147,9 @@ existing intention or delete it prior to creating a new one. | `POST` | `/connect/intentions` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -247,9 +247,9 @@ This endpoint updates an intention with the given values. | `PUT` | `/connect/intentions/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -301,9 +301,9 @@ This endpoint reads a specific intention by its unique source and destination. | `GET` | `/connect/intentions/exact` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -378,9 +378,9 @@ This endpoint reads a specific intention. | `GET` | `/connect/intentions/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -442,9 +442,9 @@ This endpoint lists all intentions. | `GET` | `/connect/intentions` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -533,9 +533,9 @@ This endpoint deletes a specific intention by its unique source and destination. | `DELETE` | `/connect/intentions/exact` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -593,9 +593,9 @@ This endpoint deletes a specific intention. | `DELETE` | `/connect/intentions/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -638,7 +638,7 @@ networking layer 4 (e.g. TCP) integration. For performance and reliability reasons it is desirable to implement intention enforcement by listing [intentions that match the -destination](/api/connect/intentions#list-matching-intentions) and representing +destination](/api-docs/connect/intentions#list-matching-intentions) and representing them in the native configuration of the proxy itself (such as RBAC for Envoy). This endpoint will work even if the destination service has @@ -650,9 +650,9 @@ does not contain any information about the intention itself. | `GET` | `/connect/intentions/check` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -715,9 +715,9 @@ The intentions in the response are in evaluation order. | `GET` | `/connect/intentions/match` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/coordinate.mdx b/website/content/api-docs/coordinate.mdx index 2e6c5fe2b..bbba8afaf 100644 --- a/website/content/api-docs/coordinate.mdx +++ b/website/content/api-docs/coordinate.mdx @@ -29,9 +29,9 @@ cluster. | `GET` | `/coordinate/datacenters` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -85,9 +85,9 @@ datacenter. | `GET` | `/coordinate/nodes` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -144,9 +144,9 @@ This endpoint returns the LAN network coordinates for the given node. | `GET` | `/coordinate/node/:node` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -202,9 +202,9 @@ datacenter. | `PUT` | `/coordinate/update` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/discovery-chain.mdx b/website/content/api-docs/discovery-chain.mdx index e7e2de6e5..29adab555 100644 --- a/website/content/api-docs/discovery-chain.mdx +++ b/website/content/api-docs/discovery-chain.mdx @@ -38,9 +38,9 @@ the `POST` method must be used, otherwise `GET` is sufficient.

The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/event.mdx b/website/content/api-docs/event.mdx index c9edbe47b..1d795997a 100644 --- a/website/content/api-docs/event.mdx +++ b/website/content/api-docs/event.mdx @@ -20,9 +20,9 @@ This endpoint triggers a new user event. | `PUT` | `/event/fire/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -100,9 +100,9 @@ nor do they make a promise of delivery. | `GET` | `/event/list` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/features/consistency.mdx b/website/content/api-docs/features/consistency.mdx index a50658069..71fd92ffb 100644 --- a/website/content/api-docs/features/consistency.mdx +++ b/website/content/api-docs/features/consistency.mdx @@ -42,7 +42,7 @@ should be provided on requests. It is an error to provide both. Note that some endpoints support a `cached` parameter which has some of the same semantics as `stale` but different trade offs. This behavior is described in -[agent caching feature documentation](/api/features/caching). +[agent caching feature documentation](/api-docs/features/caching). To support bounding the acceptable staleness of data, responses provide the `X-Consul-LastContact` header containing the time in milliseconds that a server diff --git a/website/content/api-docs/features/filtering.mdx b/website/content/api-docs/features/filtering.mdx index bcf86b9d8..b4a25b242 100644 --- a/website/content/api-docs/features/filtering.mdx +++ b/website/content/api-docs/features/filtering.mdx @@ -122,7 +122,7 @@ example, the following two expressions would be equivalent. Generally, only the main object is filtered. When filtering for an item within an array that is not at the top level, the entire array that contains the item will be returned. This is usually the outermost object of a response, -but in some cases such the [`/catalog/node/:node`](/api/catalog#list-services-for-node) +but in some cases such the [`/catalog/node/:node`](/api-docs/catalog#list-services-for-node) endpoint the filtering is performed on a object embedded within the results. ### Performance diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx index 1ae4c2701..6d8a1c677 100644 --- a/website/content/api-docs/health.mdx +++ b/website/content/api-docs/health.mdx @@ -25,9 +25,9 @@ This endpoint returns the checks specific to the node provided on the path. | `GET` | `/health/node/:node` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -122,9 +122,9 @@ path. | `GET` | `/health/checks/:service` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -214,9 +214,9 @@ incorporating the use of health checks. | `GET` | `/health/service/:service` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -422,7 +422,7 @@ so this endpoint may be used to filter only the Connect-capable endpoints. | `GET` | `/health/connect/:service` | `application/json` | Parameters and response format are the same as -[`/health/service/:service`](/api/health#list-nodes-for-service). +[`/health/service/:service`](/api-docs/health#list-nodes-for-service). ## List Service Instances for Ingress Gateways Associated with a Service @@ -438,10 +438,10 @@ gateway](/docs/connect/gateways/ingress-gateway) for a service in a given datace | `GET` | `/health/ingress/:service` | `application/json` | Parameters and response format are the same as -[`/health/service/:service`](/api/health#list-nodes-for-service). +[`/health/service/:service`](/api-docs/health#list-nodes-for-service). **Note** that unlike `/health/connect/:service` and `/health/service/:service` this -endpoint does not support the [streaming backend](/api/features/blocking#streaming-backend). +endpoint does not support the [streaming backend](/api-docs/features/blocking#streaming-backend). ## List Checks in State @@ -455,9 +455,9 @@ This endpoint returns the checks in the state provided on the path. | `GET` | `/health/state/:state` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/kv.mdx b/website/content/api-docs/kv.mdx index 029c3ae81..ba77717f4 100644 --- a/website/content/api-docs/kv.mdx +++ b/website/content/api-docs/kv.mdx @@ -19,7 +19,7 @@ replication between datacenters, please view the ~> Values in the KV store cannot be larger than 512kb. In order to perform atomic operations on multiple KV pairs (up to a limit of 64) -please consider using [transactions](/api/txn) instead. +please consider using [transactions](/api-docs/txn) instead. ## Read Key @@ -27,7 +27,7 @@ This endpoint returns the specified key. If no key exists at the given path, a 404 is returned instead of a 200 response. For multi-key reads (up to a limit of 64 KV operations) please consider using -[transactions](/api/txn) instead. +[transactions](/api-docs/txn) instead. If the [`recurse`](#recurse) or [`keys`](#keys) query parameters are `true`, this endpoint will return an array of keys. In this case, @@ -40,9 +40,9 @@ Refer to the [HTTP API documentation](/api-docs#results-filtered-by-acls) for mo | `GET` | `/kv/:key` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -172,9 +172,9 @@ Even though the return type is `application/json`, the value is either `true` or `false`, indicating whether the create/update succeeded. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -260,9 +260,9 @@ This endpoint deletes a single key or all keys sharing a prefix. | `DELETE` | `/kv/:key` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/namespaces.mdx b/website/content/api-docs/namespaces.mdx index c14c9a721..67fc5d6b3 100644 --- a/website/content/api-docs/namespaces.mdx +++ b/website/content/api-docs/namespaces.mdx @@ -20,9 +20,9 @@ This endpoint creates a new Namespace. | `PUT` | `/namespace` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -151,9 +151,9 @@ This endpoint reads a Namespace with the given name. | `GET` | `/namespace/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -222,9 +222,9 @@ This endpoint updates a Namespace. | `PUT` | `/namespace/:name` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -361,9 +361,9 @@ This endpoint will return no data. Success or failure is indicated by the status code returned. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -434,9 +434,9 @@ privileges of the ACL token used for the request. | `GET` | `/namespaces` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/area.mdx b/website/content/api-docs/operator/area.mdx index 505057666..6f10fa2dd 100644 --- a/website/content/api-docs/operator/area.mdx +++ b/website/content/api-docs/operator/area.mdx @@ -36,9 +36,9 @@ successfully. | `POST` | `/operator/area` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -104,9 +104,9 @@ This endpoint lists all network areas. | `GET` | `/operator/area` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -149,9 +149,9 @@ This endpoint updates a network area to the given configuration. | `PUT` | `/operator/area/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -195,9 +195,9 @@ This endpoint lists a specific network area. | `GET` | `/operator/area/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -241,9 +241,9 @@ This endpoint deletes a specific network area. | `DELETE` | `/operator/area/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -279,9 +279,9 @@ area. | `PUT` | `/operator/area/:uuid/join` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -354,9 +354,9 @@ network area. | `GET` | `/operator/area/:uuid/members` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/autopilot.mdx b/website/content/api-docs/operator/autopilot.mdx index 89ffb67a3..e1d050871 100644 --- a/website/content/api-docs/operator/autopilot.mdx +++ b/website/content/api-docs/operator/autopilot.mdx @@ -24,9 +24,9 @@ This endpoint retrieves its latest Autopilot configuration. | `GET` | `/operator/autopilot/configuration` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -80,9 +80,9 @@ This endpoint updates the Autopilot configuration of the cluster. | `PUT` | `/operator/autopilot/configuration` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -159,9 +159,9 @@ This endpoint queries the health of the autopilot status. | `GET` | `/operator/autopilot/health` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -264,9 +264,9 @@ This endpoint queries the health of the autopilot status. | `GET` | `/operator/autopilot/state` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/keyring.mdx b/website/content/api-docs/operator/keyring.mdx index 3b3690101..83d1b89bd 100644 --- a/website/content/api-docs/operator/keyring.mdx +++ b/website/content/api-docs/operator/keyring.mdx @@ -26,9 +26,9 @@ read privileges. | `GET` | `/operator/keyring` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -113,9 +113,9 @@ This endpoint installs a new gossip encryption key into the cluster. | `POST` | `/operator/keyring` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -161,9 +161,9 @@ installed before this operation can succeed. | `PUT` | `/operator/keyring` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -209,9 +209,9 @@ may only be performed on keys which are not currently the primary key. | `DELETE` | `/operator/keyring` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/license.mdx b/website/content/api-docs/operator/license.mdx index ff0c0b657..c2c9186fe 100644 --- a/website/content/api-docs/operator/license.mdx +++ b/website/content/api-docs/operator/license.mdx @@ -22,9 +22,9 @@ This endpoint gets information about the current license. | `GET` | `/operator/license` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -89,9 +89,9 @@ license contents as well as any warning messages regarding its validity. | `PUT` | `/operator/license` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -161,9 +161,9 @@ This endpoint resets the Consul license to the license included in the Enterpris | `DELETE` | `/operator/license` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/raft.mdx b/website/content/api-docs/operator/raft.mdx index 5225208a3..8d1259454 100644 --- a/website/content/api-docs/operator/raft.mdx +++ b/website/content/api-docs/operator/raft.mdx @@ -23,9 +23,9 @@ This endpoint reads the current raft configuration. | `GET` | `/operator/raft/configuration` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -43,7 +43,7 @@ The table below shows this endpoint's support for Raft configuration from any of the Consul servers. Not setting this will choose the default consistency mode which will forward the request to the leader for processing but not re-confirm the server is still the leader before returning - results. See [default consistency](/api/features/consistency#default) for more details. + results. See [default consistency](/api-docs/features/consistency#default) for more details. ### Sample Request @@ -121,9 +121,9 @@ write privileges. | `DELETE` | `/operator/raft/peer` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/operator/segment.mdx b/website/content/api-docs/operator/segment.mdx index 713fae4e6..360a6f1b0 100644 --- a/website/content/api-docs/operator/segment.mdx +++ b/website/content/api-docs/operator/segment.mdx @@ -29,9 +29,9 @@ This endpoint lists all network areas. | `GET` | `/operator/segment` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/query.mdx b/website/content/api-docs/query.mdx index 2adaa152e..c91cdacdd 100644 --- a/website/content/api-docs/query.mdx +++ b/website/content/api-docs/query.mdx @@ -141,9 +141,9 @@ successfully. | `POST` | `/query` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -306,9 +306,9 @@ This endpoint returns a list of all prepared queries. | `GET` | `/query` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -369,9 +369,9 @@ given ID, an error is returned. | `PUT` | `/query/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -409,9 +409,9 @@ given ID, an error is returned. | `GET` | `/query/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -449,9 +449,9 @@ given ID, an error is returned. | `DELETE` | `/query/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -489,9 +489,9 @@ Refer to the [HTTP API documentation](/api-docs#results-filtered-by-acls) for mo | `GET` | `/query/:uuid/execute` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -618,9 +618,9 @@ interpolation. | `GET` | `/query/:uuid/explain` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/session.mdx b/website/content/api-docs/session.mdx index efaccbcc3..15415c0c4 100644 --- a/website/content/api-docs/session.mdx +++ b/website/content/api-docs/session.mdx @@ -18,9 +18,9 @@ node and may be associated with any number of checks. | `PUT` | `/session/create` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -127,9 +127,9 @@ either a literal `true` or `false`, indicating of whether the destroy was successful. The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -173,9 +173,9 @@ This endpoint returns the requested session information. | `GET` | `/session/info/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -237,9 +237,9 @@ This endpoint returns the active sessions for a given node. | `GET` | `/session/node/:node` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -301,9 +301,9 @@ This endpoint returns the list of active sessions. | `GET` | `/session/list` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -360,9 +360,9 @@ TTL, and it extends the expiration by the TTL. | `PUT` | `/session/renew/:uuid` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/snapshot.mdx b/website/content/api-docs/snapshot.mdx index 885cdd0a3..2e3419279 100644 --- a/website/content/api-docs/snapshot.mdx +++ b/website/content/api-docs/snapshot.mdx @@ -30,9 +30,9 @@ restore. | `GET` | `/snapshot` | `200 application/x-gzip` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -87,9 +87,9 @@ call to the `GET` method. | `PUT` | `/snapshot` | `200 text/plain (empty body)` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/status.mdx b/website/content/api-docs/status.mdx index c7cb3e557..8948a5a7f 100644 --- a/website/content/api-docs/status.mdx +++ b/website/content/api-docs/status.mdx @@ -23,9 +23,9 @@ running. | `GET` | `/status/leader` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -61,9 +61,9 @@ determining when a given server has successfully joined the cluster. | `GET` | `/status/peers` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | diff --git a/website/content/api-docs/txn.mdx b/website/content/api-docs/txn.mdx index bf5a3fdfa..2fa10bcb0 100644 --- a/website/content/api-docs/txn.mdx +++ b/website/content/api-docs/txn.mdx @@ -36,9 +36,9 @@ the leader via the Raft consensus protocol. | `PUT` | `/txn` | `application/json` | The table below shows this endpoint's support for -[blocking queries](/api/features/blocking), -[consistency modes](/api/features/consistency), -[agent caching](/api/features/caching), and +[blocking queries](/api-docs/features/blocking), +[consistency modes](/api-docs/features/consistency), +[agent caching](/api-docs/features/caching), and [required ACLs](/api#authentication). | Blocking Queries | Consistency Modes | Agent Caching | ACL Required | @@ -86,7 +86,7 @@ The table below shows this endpoint's support for - `Verb` `(string: )` - Specifies the type of operation to perform. - `Node` `(Node: )` - Specifies the node information to use - for the operation. See the [catalog endpoint](/api/catalog#parameters) for the fields in this object. Note the only the node can be specified here, not any services or checks - separate service or check operations must be used for those. + for the operation. See the [catalog endpoint](/api-docs/catalog#parameters) for the fields in this object. Note the only the node can be specified here, not any services or checks - separate service or check operations must be used for those. - `Service` operations have the following fields: @@ -96,14 +96,14 @@ The table below shows this endpoint's support for this service operation. - `Service` `(Service: )` - Specifies the service instance information to use - for the operation. See the [catalog endpoint](/api/catalog#parameters) for the fields in this object. + for the operation. See the [catalog endpoint](/api-docs/catalog#parameters) for the fields in this object. - `Check` operations have the following fields: - `Verb` `(string: )` - Specifies the type of operation to perform. - `Check` `(Service: )` - Specifies the check to use - for the operation. See the [catalog endpoint](/api/catalog#parameters) for the fields in this object. + for the operation. See the [catalog endpoint](/api-docs/catalog#parameters) for the fields in this object. Please see the table below for available verbs. diff --git a/website/content/commands/acl/auth-method/create.mdx b/website/content/commands/acl/auth-method/create.mdx index a47477493..dfdce5d13 100644 --- a/website/content/commands/acl/auth-method/create.mdx +++ b/website/content/commands/acl/auth-method/create.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/acl/auth-method](/api-docs/acl/aut The `acl auth-method create` command creates new auth methods. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/auth-method/delete.mdx b/website/content/commands/acl/auth-method/delete.mdx index 3ba8324a4..a0865825b 100644 --- a/website/content/commands/acl/auth-method/delete.mdx +++ b/website/content/commands/acl/auth-method/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/acl/auth-method/:name](/api-doc The `acl auth-method delete` command deletes an auth method. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/auth-method/index.mdx b/website/content/commands/acl/auth-method/index.mdx index 1ae28de78..db92f6f35 100644 --- a/website/content/commands/acl/auth-method/index.mdx +++ b/website/content/commands/acl/auth-method/index.mdx @@ -11,7 +11,7 @@ The `acl auth-method` command is used to manage Consul's ACL auth methods. It exposes commands for creating, updating, reading, deleting, and listing auth methods. This command is available in Consul 1.5.0 and newer. -ACL auth methods may also be managed via the [HTTP API](/api/acl/auth-methods). +ACL auth methods may also be managed via the [HTTP API](/api-docs/acl/auth-methods). -> **Note:** All of the example subcommands in this document will require a valid Consul token with the appropriate permissions. Either set the diff --git a/website/content/commands/acl/auth-method/list.mdx b/website/content/commands/acl/auth-method/list.mdx index 643e1c02c..93387e588 100644 --- a/website/content/commands/acl/auth-method/list.mdx +++ b/website/content/commands/acl/auth-method/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/auth-methods](/api-docs/acl/au The `acl auth-method list` command lists all auth methods. By default it will not show metadata. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/auth-method/read.mdx b/website/content/commands/acl/auth-method/read.mdx index fbe7d055c..f9ec06619 100644 --- a/website/content/commands/acl/auth-method/read.mdx +++ b/website/content/commands/acl/auth-method/read.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/auth-method/:name](/api-docs/a The `acl auth-method read` command reads and displays an auth method's details. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/auth-method/update.mdx b/website/content/commands/acl/auth-method/update.mdx index c960a09d0..a7fae8dc3 100644 --- a/website/content/commands/acl/auth-method/update.mdx +++ b/website/content/commands/acl/auth-method/update.mdx @@ -15,7 +15,7 @@ provided to the command invocation. Therefore to update just one field, only the `-name` options and the option to modify must be provided. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/binding-rule/create.mdx b/website/content/commands/acl/binding-rule/create.mdx index df65da77a..9df0d2691 100644 --- a/website/content/commands/acl/binding-rule/create.mdx +++ b/website/content/commands/acl/binding-rule/create.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/acl/binding-rule](/api-docs/acl/bi The `acl binding-rule create` command creates new binding rules. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/binding-rule/delete.mdx b/website/content/commands/acl/binding-rule/delete.mdx index e59d533fd..0ef61278b 100644 --- a/website/content/commands/acl/binding-rule/delete.mdx +++ b/website/content/commands/acl/binding-rule/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/acl/binding-rule/:id](/api-docs The `acl binding-rule delete` command deletes a binding rule. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/binding-rule/index.mdx b/website/content/commands/acl/binding-rule/index.mdx index 440bd92e0..911ada43d 100644 --- a/website/content/commands/acl/binding-rule/index.mdx +++ b/website/content/commands/acl/binding-rule/index.mdx @@ -11,7 +11,7 @@ The `acl binding-rule` command is used to manage Consul's ACL binding rules. It exposes commands for creating, updating, reading, deleting, and listing binding rules. This command is available in Consul 1.5.0 and newer. -ACL binding rules may also be managed via the [HTTP API](/api/acl/binding-rules). +ACL binding rules may also be managed via the [HTTP API](/api-docs/acl/binding-rules). -> **Note:** All of the example subcommands in this document will require a valid Consul token with the appropriate permissions. Either set the diff --git a/website/content/commands/acl/binding-rule/list.mdx b/website/content/commands/acl/binding-rule/list.mdx index 1753f3f9b..322d647d2 100644 --- a/website/content/commands/acl/binding-rule/list.mdx +++ b/website/content/commands/acl/binding-rule/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/binding-rules](/api-docs/acl/b The `acl binding-rule list` command lists all binding rules. By default it will not show metadata. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/binding-rule/read.mdx b/website/content/commands/acl/binding-rule/read.mdx index 15b66acb6..1ac0e2c85 100644 --- a/website/content/commands/acl/binding-rule/read.mdx +++ b/website/content/commands/acl/binding-rule/read.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/binding-rule/:id](/api-docs/ac The `acl binding-rule read` command reads and displays a binding rules details. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/binding-rule/update.mdx b/website/content/commands/acl/binding-rule/update.mdx index 2fb1496ab..320979663 100644 --- a/website/content/commands/acl/binding-rule/update.mdx +++ b/website/content/commands/acl/binding-rule/update.mdx @@ -15,7 +15,7 @@ provided to the command invocation. Therefore to update just one field, only the `-id` option and the option to modify must be provided. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/bootstrap.mdx b/website/content/commands/acl/bootstrap.mdx index 28592f0e7..534674267 100644 --- a/website/content/commands/acl/bootstrap.mdx +++ b/website/content/commands/acl/bootstrap.mdx @@ -15,7 +15,7 @@ will be disabled. If all tokens are lost and you need to bootstrap again you can [reset procedure](https://learn.hashicorp.com/consul/security-networking/acl-troubleshooting?utm_source=consul.io&utm_medium=docs#reset-the-acl-system). The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/policy/create.mdx b/website/content/commands/acl/policy/create.mdx index 8cdcd20b7..2cf29f87b 100644 --- a/website/content/commands/acl/policy/create.mdx +++ b/website/content/commands/acl/policy/create.mdx @@ -20,7 +20,7 @@ To load the value from a file prefix the value with an `@`. Any other values will be used directly. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/policy/delete.mdx b/website/content/commands/acl/policy/delete.mdx index c903545c7..9449a5edc 100644 --- a/website/content/commands/acl/policy/delete.mdx +++ b/website/content/commands/acl/policy/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/acl/policy/:id](/api-docs/acl/p The `acl policy delete` command deletes a policy. Policies may be deleted by their ID or by name. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/policy/index.mdx b/website/content/commands/acl/policy/index.mdx index 79f4a136a..dcfdbdf31 100644 --- a/website/content/commands/acl/policy/index.mdx +++ b/website/content/commands/acl/policy/index.mdx @@ -11,7 +11,7 @@ The `acl policy` command is used to manage Consul's ACL policies. It exposes commands for creating, updating, reading, deleting, and listing policies. This command is available in Consul 1.4.0 and newer. -ACL policies may also be managed via the [HTTP API](/api/acl/policies). +ACL policies may also be managed via the [HTTP API](/api-docs/acl/policies). -> **Note:** All of the example subcommands in this document will require a valid Consul token with the appropriate permissions. Either set the diff --git a/website/content/commands/acl/policy/list.mdx b/website/content/commands/acl/policy/list.mdx index dd2a6606b..acb0bebae 100644 --- a/website/content/commands/acl/policy/list.mdx +++ b/website/content/commands/acl/policy/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/policies](/api-docs/acl/polici The `acl policy list` command lists all policies. By default it will not show metadata. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/policy/read.mdx b/website/content/commands/acl/policy/read.mdx index 0f45b18cb..119a71c1d 100644 --- a/website/content/commands/acl/policy/read.mdx +++ b/website/content/commands/acl/policy/read.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoints: [\[GET\] /v1/acl/policy/:id](/api-docs/acl/pol The `acl policy read` command reads and displays a policies details. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/policy/update.mdx b/website/content/commands/acl/policy/update.mdx index 4beeec210..c3b23278e 100644 --- a/website/content/commands/acl/policy/update.mdx +++ b/website/content/commands/acl/policy/update.mdx @@ -16,7 +16,7 @@ policies requires both the `-id` and `-name` as the new name cannot yet be used policy. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/role/create.mdx b/website/content/commands/acl/role/create.mdx index 37b3a9a00..fea5919b8 100644 --- a/website/content/commands/acl/role/create.mdx +++ b/website/content/commands/acl/role/create.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/acl/role](/api-docs/acl/roles#crea The `acl role create` command creates new roles. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/role/delete.mdx b/website/content/commands/acl/role/delete.mdx index 3f37cf51e..d0b9823d2 100644 --- a/website/content/commands/acl/role/delete.mdx +++ b/website/content/commands/acl/role/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/acl/role/:id](/api-docs/acl/rol The `acl role delete` command deletes a role. Roles may be deleted by their ID or by name. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/role/index.mdx b/website/content/commands/acl/role/index.mdx index 422c514af..b14cd6f63 100644 --- a/website/content/commands/acl/role/index.mdx +++ b/website/content/commands/acl/role/index.mdx @@ -11,7 +11,7 @@ The `acl role` command is used to manage Consul's ACL roles. It exposes commands for creating, updating, reading, deleting, and listing roles. This command is available in Consul 1.5.0 and newer. -ACL roles may also be managed via the [HTTP API](/api/acl/roles). +ACL roles may also be managed via the [HTTP API](/api-docs/acl/roles). -> **Note:** All of the example subcommands in this document will require a valid Consul token with the appropriate permissions. Either set the diff --git a/website/content/commands/acl/role/list.mdx b/website/content/commands/acl/role/list.mdx index 9ba9115ce..dd973c630 100644 --- a/website/content/commands/acl/role/list.mdx +++ b/website/content/commands/acl/role/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/roles](/api-docs/acl/roles#lis The `acl role list` command lists all roles. By default it will not show metadata. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/role/read.mdx b/website/content/commands/acl/role/read.mdx index b28aa6bfd..ef45e636c 100644 --- a/website/content/commands/acl/role/read.mdx +++ b/website/content/commands/acl/role/read.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoints: [\[GET\] /v1/acl/role/:id](/api-docs/acl/roles The `acl role read` command reads and displays a roles details. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/role/update.mdx b/website/content/commands/acl/role/update.mdx index 17469059c..52133e117 100644 --- a/website/content/commands/acl/role/update.mdx +++ b/website/content/commands/acl/role/update.mdx @@ -16,7 +16,7 @@ modify must be provided. Note that renaming roles requires both the `-id` and `-name` as the new name cannot yet be used to lookup the role. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/set-agent-token.mdx b/website/content/commands/acl/set-agent-token.mdx index af142a694..201e8b6ed 100644 --- a/website/content/commands/acl/set-agent-token.mdx +++ b/website/content/commands/acl/set-agent-token.mdx @@ -17,7 +17,7 @@ is `true`, so tokens will need to be updated again if that option is `false` and the agent is restarted. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/clone.mdx b/website/content/commands/acl/token/clone.mdx index 2b7b3ca9b..7dafeb34e 100644 --- a/website/content/commands/acl/token/clone.mdx +++ b/website/content/commands/acl/token/clone.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/acl/token/:AccessorID/clone](/api- The `acl token clone` command clones an existing token. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/create.mdx b/website/content/commands/acl/token/create.mdx index b9e2e0ae1..8690e5b76 100644 --- a/website/content/commands/acl/token/create.mdx +++ b/website/content/commands/acl/token/create.mdx @@ -14,7 +14,7 @@ either the `-policy-id` or the `-policy-name` options. When specifying policies may use a unique prefix of the UUID as a shortcut for specifying the entire UUID. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/delete.mdx b/website/content/commands/acl/token/delete.mdx index 20545e319..eee3e5db6 100644 --- a/website/content/commands/acl/token/delete.mdx +++ b/website/content/commands/acl/token/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/acl/token/:AccessorID](/api-doc The `acl token delete` command deletes a token. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/index.mdx b/website/content/commands/acl/token/index.mdx index 2198eb572..9c3e497d7 100644 --- a/website/content/commands/acl/token/index.mdx +++ b/website/content/commands/acl/token/index.mdx @@ -11,7 +11,7 @@ The `acl token` command is used to manage Consul's ACL tokens. It exposes commands for creating, updating, reading, deleting, and listing tokens. This command is available in Consul 1.4.0 and newer. -ACL tokens may also be managed via the [HTTP API](/api/acl/tokens). +ACL tokens may also be managed via the [HTTP API](/api-docs/acl/tokens). -> **Note:** All of the example subcommands in this document will require a valid Consul token with the appropriate permissions. Either set the diff --git a/website/content/commands/acl/token/list.mdx b/website/content/commands/acl/token/list.mdx index 0439dc7ed..f3a64c1ca 100644 --- a/website/content/commands/acl/token/list.mdx +++ b/website/content/commands/acl/token/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/tokens](/api-docs/acl/tokens#l The `acl token list` command lists all tokens. By default it will not show metadata. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/read.mdx b/website/content/commands/acl/token/read.mdx index 6d3aefadb..e249e1b38 100644 --- a/website/content/commands/acl/token/read.mdx +++ b/website/content/commands/acl/token/read.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/token/:AccessorID](/api-docs/a The `acl token read` command reads and displays a token details. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/token/update.mdx b/website/content/commands/acl/token/update.mdx index ca9c04e1f..b7c2b5bbc 100644 --- a/website/content/commands/acl/token/update.mdx +++ b/website/content/commands/acl/token/update.mdx @@ -13,7 +13,7 @@ The `acl token update` command will update a token. Some parts of the token like token is local to the datacenter cannot be changed. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/acl/translate-rules.mdx b/website/content/commands/acl/translate-rules.mdx index 6501b56e0..1255de367 100644 --- a/website/content/commands/acl/translate-rules.mdx +++ b/website/content/commands/acl/translate-rules.mdx @@ -15,7 +15,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/acl/rules/translate/:accessor_id]( This command translates the legacy ACL rule syntax into the new syntax. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/catalog/datacenters.mdx b/website/content/commands/catalog/datacenters.mdx index 8d329b786..1f823d1d1 100644 --- a/website/content/commands/catalog/datacenters.mdx +++ b/website/content/commands/catalog/datacenters.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/catalog/datacenters](/api-docs/cat The `catalog datacenters` command prints all known datacenters. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/catalog/index.mdx b/website/content/commands/catalog/index.mdx index c95bae537..63b213a6c 100644 --- a/website/content/commands/catalog/index.mdx +++ b/website/content/commands/catalog/index.mdx @@ -11,7 +11,7 @@ The `catalog` command is used to interact with Consul's catalog via the command line. It exposes top-level commands for reading and filtering data from the registry. -The catalog is also accessible via the [HTTP API](/api/catalog). +The catalog is also accessible via the [HTTP API](/api-docs/catalog). ## Basic Examples diff --git a/website/content/commands/catalog/nodes.mdx b/website/content/commands/catalog/nodes.mdx index 386948d29..7537ce1ef 100644 --- a/website/content/commands/catalog/nodes.mdx +++ b/website/content/commands/catalog/nodes.mdx @@ -14,7 +14,7 @@ It can also query for nodes that match a particular metadata or provide a particular service. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -84,7 +84,7 @@ Usage: `consul catalog nodes [options]` - `-filter=` - Expression to use for filtering the results. Can be passed via stdin by using `-` for the value or from a file by passing `@`. - See the [`/catalog/nodes` API documentation](/api/catalog#filtering) for a + See the [`/catalog/nodes` API documentation](/api-docs/catalog#filtering) for a description of what is filterable. #### Enterprise Options diff --git a/website/content/commands/catalog/services.mdx b/website/content/commands/catalog/services.mdx index 8c414653c..d4653f04f 100644 --- a/website/content/commands/catalog/services.mdx +++ b/website/content/commands/catalog/services.mdx @@ -14,7 +14,7 @@ for services that match particular metadata or list the services that a particular node provides. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/config/delete.mdx b/website/content/commands/config/delete.mdx index a869d17aa..bee966700 100644 --- a/website/content/commands/config/delete.mdx +++ b/website/content/commands/config/delete.mdx @@ -14,7 +14,7 @@ kind and name. See the [configuration entries docs](/docs/agent/config-entries) for more details about configuration entries. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required1 | diff --git a/website/content/commands/config/list.mdx b/website/content/commands/config/list.mdx index a2e5a7c49..8ceec02b6 100644 --- a/website/content/commands/config/list.mdx +++ b/website/content/commands/config/list.mdx @@ -14,7 +14,7 @@ See the [configuration entries docs](/docs/agent/config-entries) for more details about configuration entries. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required1 | diff --git a/website/content/commands/config/read.mdx b/website/content/commands/config/read.mdx index 3df4009b3..b57f74de9 100644 --- a/website/content/commands/config/read.mdx +++ b/website/content/commands/config/read.mdx @@ -15,7 +15,7 @@ kind and name and outputs its JSON representation. See the details about configuration entries. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required1 | diff --git a/website/content/commands/config/write.mdx b/website/content/commands/config/write.mdx index 214b92a24..01c0b7d06 100644 --- a/website/content/commands/config/write.mdx +++ b/website/content/commands/config/write.mdx @@ -14,7 +14,7 @@ See the [configuration entries docs](/docs/agent/config-entries) for more details about configuration entries. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required1 | diff --git a/website/content/commands/connect/ca.mdx b/website/content/commands/connect/ca.mdx index 3b037ea4a..e00be9f94 100644 --- a/website/content/commands/connect/ca.mdx +++ b/website/content/commands/connect/ca.mdx @@ -43,7 +43,7 @@ Subcommands: This command displays the current CA configuration. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -78,7 +78,7 @@ being used, the [Root Rotation](/docs/connect/ca#root-certificate-rotation) proc will be triggered. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -99,7 +99,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/connect/ca/configuration](/api-doc - `-config-file` - (required) Specifies a JSON-formatted file to use for the new configuration. The format of this config file matches the request payload documented in the - [Update CA Configuration API](/api/connect/ca#update-ca-configuration). + [Update CA Configuration API](/api-docs/connect/ca#update-ca-configuration). - `-force-without-cross-signing` `(bool: )` - Indicates that the CA change should be forced to complete even if the current CA doesn't support cross diff --git a/website/content/commands/event.mdx b/website/content/commands/event.mdx index 96edc4d34..ec85dba6c 100644 --- a/website/content/commands/event.mdx +++ b/website/content/commands/event.mdx @@ -38,7 +38,7 @@ parameters of the event, but the payload should be kept very small (< 100 bytes). Specifying too large of an event will return an error. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/force-leave.mdx b/website/content/commands/force-leave.mdx index f923b746f..399bf412b 100644 --- a/website/content/commands/force-leave.mdx +++ b/website/content/commands/force-leave.mdx @@ -33,7 +33,7 @@ if the agent returns after transitioning to the "left" state, but before it is r from the member list, then it will rejoin the cluster. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/check.mdx b/website/content/commands/intention/check.mdx index a8641b653..02b6abb73 100644 --- a/website/content/commands/intention/check.mdx +++ b/website/content/commands/intention/check.mdx @@ -24,7 +24,7 @@ defined as _deny_ intentions during evaluation, as this endpoint is only suited for networking layer 4 (e.g. TCP) integration. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/create.mdx b/website/content/commands/intention/create.mdx index 37b70b92b..4b3cd5d43 100644 --- a/website/content/commands/intention/create.mdx +++ b/website/content/commands/intention/create.mdx @@ -18,7 +18,7 @@ Corresponding HTTP API Endpoint: [\[POST\] /v1/connect/intentions](/api-docs/con The `intention create` command creates or updates an L4 intention. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/delete.mdx b/website/content/commands/intention/delete.mdx index d2b58545e..474065332 100644 --- a/website/content/commands/intention/delete.mdx +++ b/website/content/commands/intention/delete.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoints: [\[DELETE\] /v1/connect/intentions/exact](/api The `intention delete` command deletes a matching intention. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/get.mdx b/website/content/commands/intention/get.mdx index b1252a1b4..f61ab5e93 100644 --- a/website/content/commands/intention/get.mdx +++ b/website/content/commands/intention/get.mdx @@ -17,7 +17,7 @@ Consul 1.9.0. Intentions no longer need IDs when represented as entries. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/index.mdx b/website/content/commands/intention/index.mdx index 8ba1dd996..b907d144b 100644 --- a/website/content/commands/intention/index.mdx +++ b/website/content/commands/intention/index.mdx @@ -15,7 +15,7 @@ This command is available in Consul 1.2 and later. Intentions are managed primarily via [`service-intentions`](/docs/connect/config-entries/service-intentions) config entries after Consul 1.9. Intentions may also be managed via the [HTTP -API](/api/connect/intentions). +API](/api-docs/connect/intentions). ~> **Deprecated** - This command is deprecated in Consul 1.9.0 in favor of using the [config entry CLI command](/commands/config/write). To create an diff --git a/website/content/commands/intention/list.mdx b/website/content/commands/intention/list.mdx index 03dd93248..004914a73 100644 --- a/website/content/commands/intention/list.mdx +++ b/website/content/commands/intention/list.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/connect/intentions](/api-docs/conn The `intention list` command shows all intentions including ID and precedence. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/intention/match.mdx b/website/content/commands/intention/match.mdx index 49694551a..b7d456c4f 100644 --- a/website/content/commands/intention/match.mdx +++ b/website/content/commands/intention/match.mdx @@ -17,7 +17,7 @@ The [check](/commands/intention/check) command can be used to check whether an L4 connection would be authorized between any two services. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/join.mdx b/website/content/commands/join.mdx index 2186e67e8..85125d75f 100644 --- a/website/content/commands/join.mdx +++ b/website/content/commands/join.mdx @@ -23,7 +23,7 @@ An agent which is already part of a cluster may join an agent in a different cluster, causing the two clusters to be merged into a single cluster. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/keyring.mdx b/website/content/commands/keyring.mdx index 0dbcb98b4..50494a66d 100644 --- a/website/content/commands/keyring.mdx +++ b/website/content/commands/keyring.mdx @@ -30,7 +30,7 @@ are no errors. If any node fails to reply or reports failure, the exit code will be 1. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required1 | diff --git a/website/content/commands/kv/delete.mdx b/website/content/commands/kv/delete.mdx index 6789fa810..8fe1f5bb1 100644 --- a/website/content/commands/kv/delete.mdx +++ b/website/content/commands/kv/delete.mdx @@ -13,7 +13,7 @@ The `kv delete` command removes the value from Consul's KV store at the given path. If no key exists at the path, no action is taken. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/kv/export.mdx b/website/content/commands/kv/export.mdx index 97f675966..b117de87c 100644 --- a/website/content/commands/kv/export.mdx +++ b/website/content/commands/kv/export.mdx @@ -13,7 +13,7 @@ stdout. This can be used with the command "consul kv import" to move entire trees between Consul clusters. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/kv/get.mdx b/website/content/commands/kv/get.mdx index 044e557bb..44e2c8bc8 100644 --- a/website/content/commands/kv/get.mdx +++ b/website/content/commands/kv/get.mdx @@ -21,7 +21,7 @@ Consul clusters. Alternatively, the [transaction API](/api-docs/txn) provides support for performing up to 64 KV operations atomically. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/kv/import.mdx b/website/content/commands/kv/import.mdx index ab9acf229..f8ef0b670 100644 --- a/website/content/commands/kv/import.mdx +++ b/website/content/commands/kv/import.mdx @@ -11,7 +11,7 @@ The `kv import` command is used to import KV pairs from the JSON representation generated by the `kv export` command. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/kv/index.mdx b/website/content/commands/kv/index.mdx index b3c525ff8..49bfcc6b4 100644 --- a/website/content/commands/kv/index.mdx +++ b/website/content/commands/kv/index.mdx @@ -13,7 +13,7 @@ and deleting from the store. This command is available in Consul 0.7.1 and later. The KV store is also accessible via the -[HTTP API](/api/kv). +[HTTP API](/api-docs/kv). ## Usage diff --git a/website/content/commands/kv/put.mdx b/website/content/commands/kv/put.mdx index 8769d726f..f8c9dd137 100644 --- a/website/content/commands/kv/put.mdx +++ b/website/content/commands/kv/put.mdx @@ -17,7 +17,7 @@ The `kv put` command writes the data to the given path in the KV store. 64 KV operations atomically. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/leave.mdx b/website/content/commands/leave.mdx index 527a617bf..a987338b8 100644 --- a/website/content/commands/leave.mdx +++ b/website/content/commands/leave.mdx @@ -26,7 +26,7 @@ Running `consul leave` on a server explicitly will reduce the quorum size. Even This means you could end up with just one server that is still able to commit writes because quorum is only 1, but those writes might be lost if that server fails before more are added. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/license.mdx b/website/content/commands/license.mdx index e8073e844..7b19228d4 100644 --- a/website/content/commands/license.mdx +++ b/website/content/commands/license.mdx @@ -129,7 +129,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/operator/license](/api-docs/operat This command sets the Consul Enterprise license. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -169,7 +169,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/operator/license](/api-docs/operat This command gets the Consul Enterprise license. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -214,7 +214,7 @@ Resets license for the datacenter to the one builtin in Consul binary, if it is If the builtin license is invalid, the current one stays active. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/login.mdx b/website/content/commands/login.mdx index ff04c51a4..715ba71c5 100644 --- a/website/content/commands/login.mdx +++ b/website/content/commands/login.mdx @@ -18,7 +18,7 @@ command `consul logout` should be used to destroy any tokens created this way to avoid a resource leak. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/logout.mdx b/website/content/commands/logout.mdx index 4c0af4a97..67c39910a 100644 --- a/website/content/commands/logout.mdx +++ b/website/content/commands/logout.mdx @@ -16,7 +16,7 @@ The `logout` command will destroy the provided token if it was created from `consul login`. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/maint.mdx b/website/content/commands/maint.mdx index 221e7e67d..2bd327b34 100644 --- a/website/content/commands/maint.mdx +++ b/website/content/commands/maint.mdx @@ -22,7 +22,7 @@ critical status against a service, and deactivated by deregistering the health check. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/members.mdx b/website/content/commands/members.mdx index 98a1ac58b..fe0c3120c 100644 --- a/website/content/commands/members.mdx +++ b/website/content/commands/members.mdx @@ -22,7 +22,7 @@ reconnect with failed nodes for a certain amount of time in the case that the failure is actually just a network partition. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/create.mdx b/website/content/commands/namespace/create.mdx index ccadc6204..6fd8e083c 100644 --- a/website/content/commands/namespace/create.mdx +++ b/website/content/commands/namespace/create.mdx @@ -15,7 +15,7 @@ This `namespace create` command creates a namespaces using the CLI parameters pr This was added in Consul Enterprise 1.7.2. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/delete.mdx b/website/content/commands/namespace/delete.mdx index 95fd4b9e9..5854b585f 100644 --- a/website/content/commands/namespace/delete.mdx +++ b/website/content/commands/namespace/delete.mdx @@ -15,7 +15,7 @@ This `namespace delete` command deletes a namespace. This was added in Consul En ACLs are enabled then this command will require a token with `operator:write` privileges. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/list.mdx b/website/content/commands/namespace/list.mdx index 1ce45328c..ede6da945 100644 --- a/website/content/commands/namespace/list.mdx +++ b/website/content/commands/namespace/list.mdx @@ -17,7 +17,7 @@ within the target namespaces. The results will be filtered based on the ACL toke see a partial list. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/read.mdx b/website/content/commands/namespace/read.mdx index b008977b7..d194a7ff6 100644 --- a/website/content/commands/namespace/read.mdx +++ b/website/content/commands/namespace/read.mdx @@ -16,7 +16,7 @@ ACLs are enabled then this command will require a token with `operator:read` pri within the target namespace. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/update.mdx b/website/content/commands/namespace/update.mdx index bcbb571b4..e74dbc66e 100644 --- a/website/content/commands/namespace/update.mdx +++ b/website/content/commands/namespace/update.mdx @@ -15,7 +15,7 @@ This `namespace update` command updates a namespaces using the CLI parameters pr This was added in Consul Enterprise 1.7.2. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/namespace/write.mdx b/website/content/commands/namespace/write.mdx index 1fe3e5d69..ddcc04348 100644 --- a/website/content/commands/namespace/write.mdx +++ b/website/content/commands/namespace/write.mdx @@ -14,7 +14,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/namespace/:name](/api-docs/namespa This `namespace write` command creates or updates a namespace's configuration from its full definition. This was added in Consul Enterprise 1.7.0. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/operator/area.mdx b/website/content/commands/operator/area.mdx index 17f2881f5..ae61c6b31 100644 --- a/website/content/commands/operator/area.mdx +++ b/website/content/commands/operator/area.mdx @@ -52,7 +52,7 @@ Corresponding HTTP API Endpoint: [\[POST\] /v1/operator/area](/api-docs/operator This command creates a new network area. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -94,7 +94,7 @@ Corresponding HTTP API Endpoint: [\[DELETE\] /v1/operator/area/:uuid](/api-docs/ This command deletes an existing network area. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -133,7 +133,7 @@ This command joins Consul servers into an existing network area by address, such an IP or hostname with an optional port. Multiple addresses may be given. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -177,7 +177,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/operator/area](/api-docs/operator/ This command lists all network areas. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -216,7 +216,7 @@ This command displays Consul server nodes present in a network area, or all areas if no area is specified. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -280,7 +280,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/operator/area/:uuid](/api-docs/ope This command updates the configuration of network area. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/operator/autopilot.mdx b/website/content/commands/operator/autopilot.mdx index ccd395b35..e78ade6a7 100644 --- a/website/content/commands/operator/autopilot.mdx +++ b/website/content/commands/operator/autopilot.mdx @@ -33,7 +33,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/operator/autopilot/configuration]( This command displays the current autopilot configuration. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -68,7 +68,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/operator/autopilot/configuration]( Modifies the current Autopilot configuration. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -126,7 +126,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/operator/autopilot/state](/api-doc This command displays the current autopilot state. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/operator/index.mdx b/website/content/commands/operator/index.mdx index 0e0b46019..067632298 100644 --- a/website/content/commands/operator/index.mdx +++ b/website/content/commands/operator/index.mdx @@ -22,7 +22,7 @@ if required, so this can be run from any Consul node in a cluster. See the See the [Outage Recovery](https://learn.hashicorp.com/consul/day-2-operations/outage) guide for some examples of how this command is used. For an API to perform these operations programmatically, -please see the documentation for the [Operator](/api/operator) +please see the documentation for the [Operator](/api-docs/operator) endpoint. ## Usage diff --git a/website/content/commands/operator/raft.mdx b/website/content/commands/operator/raft.mdx index 2f10cac74..6c0e1f22d 100644 --- a/website/content/commands/operator/raft.mdx +++ b/website/content/commands/operator/raft.mdx @@ -34,7 +34,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/status/peers](/api-docs/status#lis This command displays the current Raft peer configuration. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -86,7 +86,7 @@ clean up by simply running instead of this command. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/reload.mdx b/website/content/commands/reload.mdx index def20e377..40d234a15 100644 --- a/website/content/commands/reload.mdx +++ b/website/content/commands/reload.mdx @@ -26,7 +26,7 @@ Not all configuration options are reloadable. See the section on the agent options page for details on which options are supported. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/rtt.mdx b/website/content/commands/rtt.mdx index 4f7fd6e3a..3f8a1f819 100644 --- a/website/content/commands/rtt.mdx +++ b/website/content/commands/rtt.mdx @@ -18,7 +18,7 @@ See the [Network Coordinates](/docs/architecture/coordinates) internals guide for more information on how these coordinates are computed. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/services/deregister.mdx b/website/content/commands/services/deregister.mdx index 7b3afb066..e61456b69 100644 --- a/website/content/commands/services/deregister.mdx +++ b/website/content/commands/services/deregister.mdx @@ -21,7 +21,7 @@ deregister. See [Service Definition](/docs/discovery/services) for more information about registering services generally. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | diff --git a/website/content/commands/services/register.mdx b/website/content/commands/services/register.mdx index 22a960325..7c0b04446 100644 --- a/website/content/commands/services/register.mdx +++ b/website/content/commands/services/register.mdx @@ -20,10 +20,10 @@ in the Consul agent configuration directory and issuing a [reload](/commands/reload). This approach is easiest for configuration management systems that other systems that have access to the configuration directory. Clients may also use the -[HTTP API](/api/agent/service) directly. +[HTTP API](/api-docs/agent/service) directly. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -44,7 +44,7 @@ This command returns after registration succeeds. It must be paired with a deregistration command or API call to remove the service. To ensure that services are properly deregistered, it is **highly recommended** that a check is created with the -[`DeregisterCriticalServiceAfter`](/api/agent/check#deregistercriticalserviceafter) +[`DeregisterCriticalServiceAfter`](/api-docs/agent/check#deregistercriticalserviceafter) configuration set. This will ensure that even if deregistration failed for any reason, the agent will automatically deregister the service instance after it is unhealthy for the specified period of time. diff --git a/website/content/commands/snapshot/agent.mdx b/website/content/commands/snapshot/agent.mdx index 2be5a849b..f70525389 100644 --- a/website/content/commands/snapshot/agent.mdx +++ b/website/content/commands/snapshot/agent.mdx @@ -49,7 +49,7 @@ remote storage. Snapshots can be restored using the [`consul snapshot restore`](/commands/snapshot/restore) command, or -the [HTTP API](/api/snapshot). +the [HTTP API](/api-docs/snapshot). ## ACL permissions @@ -372,7 +372,7 @@ leader election or service registration: $ consul snapshot agent -interval=0 ``` -Please see the [HTTP API](/api/snapshot) documentation for +Please see the [HTTP API](/api-docs/snapshot) documentation for more details about snapshot internals. ## Licensing @@ -391,4 +391,4 @@ then the order of precedence is as follows: The ability to load licenses from the configuration or environment was added in v1.10.0, v1.9.7 and v1.8.13. See the [licensing documentation](/docs/enterprise/license/overview) for -more information about Consul Enterprise license management. \ No newline at end of file +more information about Consul Enterprise license management. diff --git a/website/content/commands/snapshot/index.mdx b/website/content/commands/snapshot/index.mdx index 6892b07e3..8c872642a 100644 --- a/website/content/commands/snapshot/index.mdx +++ b/website/content/commands/snapshot/index.mdx @@ -12,7 +12,7 @@ state of the Consul servers for disaster recovery. These are atomic, point-in-ti snapshots which include key/value entries, service catalog, prepared queries, sessions, and ACLs. This command is available in Consul 0.7.1 and later. -Snapshots are also accessible via the [HTTP API](/api/snapshot). +Snapshots are also accessible via the [HTTP API](/api-docs/snapshot). ## Usage diff --git a/website/content/commands/snapshot/inspect.mdx b/website/content/commands/snapshot/inspect.mdx index 9db0bac0c..3a5e0b32d 100644 --- a/website/content/commands/snapshot/inspect.mdx +++ b/website/content/commands/snapshot/inspect.mdx @@ -112,7 +112,7 @@ $ consul snapshot inspect -kvdetails -kvdepth 3 -kvfilter vault/core backup.snap Total 5.9KB ``` -Please see the [HTTP API](/api/snapshot) documentation for +Please see the [HTTP API](/api-docs/snapshot) documentation for more details about snapshot internals. To inspect an internal snapshot directly from a Consul server data directory: diff --git a/website/content/commands/snapshot/restore.mdx b/website/content/commands/snapshot/restore.mdx index bc7d68fa1..1657033d1 100644 --- a/website/content/commands/snapshot/restore.mdx +++ b/website/content/commands/snapshot/restore.mdx @@ -20,7 +20,7 @@ intended to be used when recovering from a disaster, restoring into a fresh cluster of Consul servers. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -46,5 +46,5 @@ $ consul snapshot restore backup.snap Restored snapshot ``` -Please see the [HTTP API](/api/snapshot) documentation for +Please see the [HTTP API](/api-docs/snapshot) documentation for more details about snapshot internals. diff --git a/website/content/commands/snapshot/save.mdx b/website/content/commands/snapshot/save.mdx index 38b9a2708..f2c2afede 100644 --- a/website/content/commands/snapshot/save.mdx +++ b/website/content/commands/snapshot/save.mdx @@ -28,7 +28,7 @@ the context of the server process. If you're using Systemd to manage your Consul processes, then adding `Environment=TMPDIR=/path/to/dir` to your Consul unit file will work. The table below shows this command's [required ACLs](/api#authentication). Configuration of -[blocking queries](/api/features/blocking) and [agent caching](/api/features/caching) +[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) are not supported from commands, but may be from the corresponding HTTP endpoint. | ACL Required | @@ -73,5 +73,5 @@ This is useful for situations where a cluster is in a degraded state and no leader is available. To target a specific server for a snapshot, you can run the `consul snapshot save` command on that specific server. -Please see the [HTTP API](/api/snapshot) documentation for +Please see the [HTTP API](/api-docs/snapshot) documentation for more details about snapshot internals. diff --git a/website/content/docs/agent/config-entries.mdx b/website/content/docs/agent/config-entries.mdx index 7a9dfa75d..d477db6b2 100644 --- a/website/content/docs/agent/config-entries.mdx +++ b/website/content/docs/agent/config-entries.mdx @@ -55,7 +55,7 @@ See [Kubernetes Custom Resource Definitions](/docs/k8s/crds). ## Managing Configuration Entries Outside Of Kubernetes Configuration entries outside of Kubernetes should be managed with the Consul -[CLI](/commands/config) or [API](/api/config). Additionally, as a +[CLI](/commands/config) or [API](/api-docs/config). Additionally, as a convenience for initial cluster bootstrapping, configuration entries can be specified in the Consul servers agent's [configuration files](/docs/agent/options#config_entries_bootstrap) diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/options.mdx index b35e964ff..2b1c1b867 100644 --- a/website/content/docs/agent/options.mdx +++ b/website/content/docs/agent/options.mdx @@ -709,7 +709,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `enable_token_replication` ((#acl_enable_token_replication)) - By default secondary Consul datacenters will perform replication of only ACL policies and roles. Setting this configuration will will enable ACL token replication and - allow for the creation of both [local tokens](/api/acl/tokens#local) and + allow for the creation of both [local tokens](/api-docs/acl/tokens#local) and [auth methods](/docs/security/acl/auth-methods) in connected secondary datacenters. ~> **Warning:** When enabling ACL token replication on the secondary datacenter, @@ -761,7 +761,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11 and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master). - Used to access [agent endpoints](/api/agent) that require agent read or write privileges, + Used to access [agent endpoints](/api-docs/agent) that require agent read or write privileges, or node read privileges, even if Consul servers aren't present to validate any tokens. This should only be used by operators during outages, regular ACL tokens should normally be used by applications. @@ -771,7 +771,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `replication` ((#acl_tokens_replication)) - The ACL token used to authorize secondary datacenters with the primary datacenter for replication - operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. + operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. ~> **Warning:** When enabling ACL token replication on the secondary datacenter, policies and roles already present in the secondary datacenter will be lost. For @@ -828,7 +828,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master) - field instead.** Used to access [agent endpoints](/api/agent) that + field instead.** Used to access [agent endpoints](/api-docs/agent) that require agent read or write privileges, or node read privileges, even if Consul servers aren't present to validate any tokens. This should only be used by operators during outages, regular ACL tokens should normally be used by applications. This @@ -859,7 +859,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." using this ACL replication using this token to retrieve and replicate the ACLs to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then - set the token later using the [agent token API](/api/agent#update-acl-tokens) + set the token later using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. If the `acl_replication_token` is set in the config, it will automatically set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. @@ -1164,7 +1164,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `claim_assertions` (Defaults to `[]`) List of assertions about the mapped claims required to authorize the incoming RPC request. The syntax uses [github.com/hashicorp/go-bexpr](https://github.com/hashicorp/go-bexpr) which is shared with the - [API filtering feature](/api/features/filtering). For example, the following + [API filtering feature](/api-docs/features/filtering). For example, the following configurations when combined will ensure that the JWT `sub` matches the node name requested by the client. @@ -1319,12 +1319,12 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `ca_provider` ((#connect_ca_provider)) Controls which CA provider to use for Connect's CA. Currently only the `aws-pca`, `consul`, and `vault` providers are supported. This is only used when initially bootstrapping the cluster. For an existing cluster, - use the [Update CA Configuration Endpoint](/api/connect/ca#update-ca-configuration). + use the [Update CA Configuration Endpoint](/api-docs/connect/ca#update-ca-configuration). - `ca_config` ((#connect_ca_config)) An object which allows setting different config options based on the CA provider chosen. This is only used when initially bootstrapping the cluster. For an existing cluster, use the [Update CA Configuration - Endpoint](/api/connect/ca#update-ca-configuration). + Endpoint](/api-docs/connect/ca#update-ca-configuration). The following providers are supported: @@ -1598,12 +1598,12 @@ There are also a number of common configuration options supported by all provide in seconds, default value is 600, ie: 10 minutes. - `use_cache` ((#dns_use_cache)) - When set to true, DNS resolution will - use the agent cache described in [agent caching](/api/features/caching). + use the agent cache described in [agent caching](/api-docs/features/caching). This setting affects all service and prepared queries DNS requests. Implies [`allow_stale`](#allow_stale) - `cache_max_age` ((#dns_cache_max_age)) - When [use_cache](#dns_use_cache) is enabled, the agent will attempt to re-fetch the result from the servers if - the cached value is older than this duration. See: [agent caching](/api/features/caching). + the cached value is older than this duration. See: [agent caching](/api-docs/features/caching). **Note** that unlike the `max-age` HTTP header, a value of 0 for this field is equivalent to "no max age". To get a fresh value from the cache use a very small value @@ -1622,7 +1622,7 @@ There are also a number of common configuration options supported by all provide - `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.** When set on a Consul server, enables ACL replication without having to set the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication - and then introduce the token using the [agent token API](/api/agent#update-acl-tokens) on each server. + and then introduce the token using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. See [`acl_replication_token`](#acl_replication_token) for more details. ~> **Warning:** When enabling ACL token replication on the secondary datacenter, @@ -1801,7 +1801,7 @@ There are also a number of common configuration options supported by all provide - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]` - - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api/features/caching) to answer the request. Even when the url parameter is provided. + - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api-docs/features/caching) to answer the request. Even when the url parameter is provided. - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte. @@ -1820,8 +1820,8 @@ There are also a number of common configuration options supported by all provide - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though. - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting. - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate. - - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. - - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. + - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api-docs/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. + - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api-docs/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. - `log_file` Equivalent to the [`-log-file` command-line flag](#_log_file). @@ -2238,7 +2238,7 @@ There are also a number of common configuration options supported by all provide 2 times the interval of scrape of Prometheus, but you might also put a very high retention time such as a few days (for instance 744h to enable retention to 31 days). Fetching the metrics using prometheus can then be performed using the - [`/v1/agent/metrics?format=prometheus`](/api/agent#view-metrics) endpoint. + [`/v1/agent/metrics?format=prometheus`](/api-docs/agent#view-metrics) endpoint. The format is compatible natively with prometheus. When running in this mode, it is recommended to also enable the option [`disable_hostname`](#telemetry-disable_hostname) to avoid having prefixed metrics with hostname. Consul does not use the default @@ -2288,11 +2288,11 @@ There are also a number of common configuration options supported by all provide The following endpoints translate addresses: - - [`/v1/catalog/nodes`](/api/catalog#list-nodes) - - [`/v1/catalog/node/`](/api/catalog#retrieve-map-of-services-for-a-node) - - [`/v1/catalog/service/`](/api/catalog#list-nodes-for-service) - - [`/v1/health/service/`](/api/health#list-nodes-for-service) - - [`/v1/query//execute`](/api/query#execute-prepared-query) + - [`/v1/catalog/nodes`](/api-docs/catalog#list-nodes) + - [`/v1/catalog/node/`](/api-docs/catalog#retrieve-map-of-services-for-a-node) + - [`/v1/catalog/service/`](/api-docs/catalog#list-nodes-for-service) + - [`/v1/health/service/`](/api-docs/health#list-nodes-for-service) + - [`/v1/query//execute`](/api-docs/query#execute-prepared-query) - `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** Equivalent to the [`-ui`](#_ui) command-line flag. diff --git a/website/content/docs/agent/sentinel.mdx b/website/content/docs/agent/sentinel.mdx index b86e12942..c25da5293 100644 --- a/website/content/docs/agent/sentinel.mdx +++ b/website/content/docs/agent/sentinel.mdx @@ -54,7 +54,7 @@ Consul passes some context as variables into Sentinel, which are available to us | ------------- | -------- | ---------------------- | | `key` | `string` | Key being written | | `value` | `string` | Value being written | -| `flags` | `uint64` | [Flags](/api/kv#flags) | +| `flags` | `uint64` | [Flags](/api-docs/kv#flags) | ## Sentinel Examples diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 1c02cebf0..e17b75396 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -36,7 +36,7 @@ it can be aggregated and flushed to Graphite or any other metrics store. For a configuration example for Telegraf, review the [Monitoring with Telegraf tutorial](https://learn.hashicorp.com/tutorials/consul/monitor-health-telegraf?utm_source=consul.io&utm_medium=docs). This -information can also be viewed with the [metrics endpoint](/api/agent#view-metrics) in JSON +information can also be viewed with the [metrics endpoint](/api-docs/agent#view-metrics) in JSON format or using [Prometheus](https://prometheus.io/) format. diff --git a/website/content/docs/architecture/consensus.mdx b/website/content/docs/architecture/consensus.mdx index 9a172a54b..361d2336e 100644 --- a/website/content/docs/architecture/consensus.mdx +++ b/website/content/docs/architecture/consensus.mdx @@ -73,7 +73,7 @@ _committed_, it can be _applied_ to a finite state machine. The finite state mac is application specific; in Consul's case, we use [MemDB](https://github.com/hashicorp/go-memdb) to maintain cluster state. Consul's writes block until it is both _committed_ and _applied_. This achieves read after write semantics -when used with the [consistent](/api/features/consistency#consistent) mode for queries. +when used with the [consistent](/api-docs/features/consistency#consistent) mode for queries. Obviously, it would be undesirable to allow a replicated log to grow in an unbounded fashion. Raft provides a mechanism by which the current state is snapshotted and the @@ -168,7 +168,7 @@ The three read modes are: a cluster that is unavailable will still be able to respond. For more documentation about using these various modes, see the -[HTTP API](/api/features/consistency). +[HTTP API](/api-docs/features/consistency). ## Deployment Table ((#deployment_table)) diff --git a/website/content/docs/architecture/coordinates.mdx b/website/content/docs/architecture/coordinates.mdx index 082273fef..1e642b31b 100644 --- a/website/content/docs/architecture/coordinates.mdx +++ b/website/content/docs/architecture/coordinates.mdx @@ -25,15 +25,15 @@ Network coordinates manifest in several ways inside Consul: - The [`consul rtt`](/commands/rtt) command can be used to query for the network round trip time between any two nodes. -- The [Catalog endpoints](/api/catalog) and - [Health endpoints](/api/health) can sort the results of queries based +- The [Catalog endpoints](/api-docs/catalog) and + [Health endpoints](/api-docs/health) can sort the results of queries based on the network round trip time from a given node using a "?near=" parameter. -- [Prepared queries](/api/query) can automatically fail over services +- [Prepared queries](/api-docs/query) can automatically fail over services to other Consul datacenters based on network round trip times. See the [Geo Failover](https://learn.hashicorp.com/tutorials/consul/automate-geo-failover) for some examples. -- The [Coordinate endpoint](/api/coordinate) exposes raw network +- The [Coordinate endpoint](/api-docs/coordinate) exposes raw network coordinates for use in other applications. Consul uses Serf to manage two different gossip pools, one for the LAN with members @@ -46,7 +46,7 @@ LAN coordinates, and WAN coordinates only make sense with other WAN coordinates. Computing the estimated network round trip time between any two nodes is simple once you have their coordinates. Here's a sample coordinate, as returned from the -[Coordinate endpoint](/api/coordinate). +[Coordinate endpoint](/api-docs/coordinate). diff --git a/website/content/docs/connect/ca/consul.mdx b/website/content/docs/connect/ca/consul.mdx index 2c7b08d74..1094ffd25 100644 --- a/website/content/docs/connect/ca/consul.mdx +++ b/website/content/docs/connect/ca/consul.mdx @@ -60,7 +60,7 @@ configuration file. [SPIFFE SVID signing certificate](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and the URI in the SAN must match the cluster identifier created at bootstrap with the ".consul" TLD. The cluster identifier can be found - using the [CA List Roots endpoint](/api/connect/ca#list-ca-root-certificates). + using the [CA List Roots endpoint](/api-docs/connect/ca#list-ca-root-certificates). @include 'http_api_connect_ca_common_options.mdx' @@ -72,7 +72,7 @@ the Consul CA provider to use a specific private key and root certificate. This is particularly useful if you have an external PKI system that doesn't currently integrate with Consul directly. -To view the current CA configuration, use the [Get CA Configuration endpoint](/api/connect/ca#get-ca-configuration): +To view the current CA configuration, use the [Get CA Configuration endpoint](/api-docs/connect/ca#get-ca-configuration): ```shell-session $ curl localhost:8500/v1/connect/ca/configuration @@ -93,7 +93,7 @@ been generated (as seen above in the roots list). There are two ways to have the Consul CA use a custom private key and root certificate: either through the `ca_config` section of the [Agent configuration](/docs/agent/options#connect_ca_config) (which can only be used during the cluster's -initial bootstrap) or through the [Update CA Configuration endpoint](/api/connect/ca#update-ca-configuration). +initial bootstrap) or through the [Update CA Configuration endpoint](/api-docs/connect/ca#update-ca-configuration). Currently Consul requires that root certificates are valid [SPIFFE SVID Signing certificates](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded in the SAN is the cluster identifier created at bootstrap with the ".consul" TLD. In this diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx index d4caf7ed5..dc035b4e4 100644 --- a/website/content/docs/connect/ca/index.mdx +++ b/website/content/docs/connect/ca/index.mdx @@ -49,7 +49,7 @@ state. For the initial bootstrap, the CA provider can be configured through the [Agent configuration](/docs/agent/options#connect_ca_config). After initialization, the CA can only be updated through the -[Update CA Configuration API endpoint](/api/connect/ca#update-ca-configuration). +[Update CA Configuration API endpoint](/api-docs/connect/ca#update-ca-configuration). If a CA is already initialized, any changes to the CA configuration in the agent configuration file (including removing the configuration completely) will have no effect. @@ -61,7 +61,7 @@ be generated automatically. ## Viewing Root Certificates Root certificates can be queried with the -[list CA Roots endpoint](/api/connect/ca#list-ca-root-certificates). +[list CA Roots endpoint](/api-docs/connect/ca#list-ca-root-certificates). With this endpoint, you can see the list of currently trusted root certificates. When a cluster first initializes, this will only list one trusted root. Multiple roots may appear as part of @@ -96,7 +96,7 @@ $ curl http://localhost:8500/v1/connect/ca/roots ## CA Configuration After initialization, the CA provider configuration can be viewed with the -[Get CA Configuration API endpoint](/api/connect/ca#get-ca-configuration). +[Get CA Configuration API endpoint](/api-docs/connect/ca#get-ca-configuration). Consul will filter sensitive values from this endpoint depending on the provider in use, so the configuration may not be complete. @@ -114,7 +114,7 @@ $ curl http://localhost:8500/v1/connect/ca/configuration ``` The CA provider can be reconfigured using the -[Update CA Configuration API endpoint](/api/connect/ca#update-ca-configuration). +[Update CA Configuration API endpoint](/api-docs/connect/ca#update-ca-configuration). Specific options for reconfiguration can be found in the specific CA provider documentation in the sidebar to the left. @@ -147,7 +147,7 @@ certificate or CA provider has been set up, the new root becomes the active one and is immediately used for signing any new incoming certificate requests. If we check the [list CA roots -endpoint](/api/connect/ca#list-ca-root-certificates) after updating the +endpoint](/api-docs/connect/ca#list-ca-root-certificates) after updating the configuration with a new root certificate, we can see both the old and new root certificates are present, and the currently active root has an intermediate certificate which has been generated and cross-signed automatically by the old @@ -244,6 +244,6 @@ to each cluster and can be looked up by examining the `TrustDomain` field in the [List CA Roots](/api-docs/connect/ca#list-ca-root-certificates) endpoint. The contents of the generated cert and private key files from the above step should then be used with -the [Update CA Configuration](/api/connect/ca#update-ca-configuration) endpoint. Once the CA configuration is +the [Update CA Configuration](/api-docs/connect/ca#update-ca-configuration) endpoint. Once the CA configuration is updated on the primary datacenter, all secondary datacenters will pick up the changes and regenerate their intermediate and leaf certificates, after which any new requests that require certificate verification will succeed. diff --git a/website/content/docs/connect/config-entries/ingress-gateway.mdx b/website/content/docs/connect/config-entries/ingress-gateway.mdx index 918177e4c..78773188d 100644 --- a/website/content/docs/connect/config-entries/ingress-gateway.mdx +++ b/website/content/docs/connect/config-entries/ingress-gateway.mdx @@ -937,7 +937,7 @@ You can specify the following parameters to configure ingress gateway configurat enterprise: true, description: 'Specifies the namespace in which the configuration entry will apply. The value must match the namespace in which the gateway is registered.' + - ' If omitted, the namespace will be inherited from the `ns` request parameter (refer to the [`config` API endpoint documentation](/api/config#ns)).' + + ' If omitted, the namespace will be inherited from the `ns` request parameter (refer to the [`config` API endpoint documentation](/api-docs/config#ns)).' + ' or will default to the `default` namespace.', yaml: false, }, @@ -954,7 +954,7 @@ You can specify the following parameters to configure ingress gateway configurat enterprise: true, description: 'Specifies the admin partition in which the configuration will apply. The value must match the partition in which the gateway is registered.' + - ' If omitted, the partition will be inherited from the request (refer to the [`config` API endpoint documentation](/api/config)).' + + ' If omitted, the partition will be inherited from the request (refer to the [`config` API endpoint documentation](/api-docs/config)).' + ' See [Admin Partitions](/docs/enterprise/admin-partitions) for additional information.', yaml: false, }, diff --git a/website/content/docs/connect/config-entries/service-resolver.mdx b/website/content/docs/connect/config-entries/service-resolver.mdx index da4f24e0c..056c7ba02 100644 --- a/website/content/docs/connect/config-entries/service-resolver.mdx +++ b/website/content/docs/connect/config-entries/service-resolver.mdx @@ -405,10 +405,10 @@ spec: { name: 'Filter', type: 'string: ""', - description: `The [filter expression](/api/features/filtering) to be used for selecting + description: `The [filter expression](/api-docs/features/filtering) to be used for selecting instances of the requested service. If empty all healthy instances are returned. This expression can filter on the same selectors as the - [Health API endpoint](/api/health#filtering-2).`, + [Health API endpoint](/api-docs/health#filtering-2).`, }, { name: 'OnlyPassing', diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx index c5efc511a..8da3b20e6 100644 --- a/website/content/docs/connect/config-entries/terminating-gateway.mdx +++ b/website/content/docs/connect/config-entries/terminating-gateway.mdx @@ -568,7 +568,7 @@ spec: enterprise: true, description: 'Specifies the namespace to which the configuration entry will apply. This must match the namespace in which the gateway is registered.' + - ' If omitted, the namespace will be inherited from [the request](/api/config#ns)' + + ' If omitted, the namespace will be inherited from [the request](/api-docs/config#ns)' + ' or will default to the `default` namespace.', yaml: false, }, @@ -578,7 +578,7 @@ spec: enterprise: true, description: 'Specifies the admin partition to which the configuration entry will apply. This must match the partition in which the gateway is registered.' + - ' If omitted, the partition will be inherited from [the request](/api/config)' + + ' If omitted, the partition will be inherited from [the request](/api-docs/config)' + ' or will default to the `default` partition.', yaml: false, }, diff --git a/website/content/docs/connect/connect-internals.mdx b/website/content/docs/connect/connect-internals.mdx index c55ab2942..7e49c9936 100644 --- a/website/content/docs/connect/connect-internals.mdx +++ b/website/content/docs/connect/connect-internals.mdx @@ -30,14 +30,14 @@ This enables Connect services to establish and accept connections with other SPIFFE-compliant systems. The client service verifies the destination service certificate -against the [public CA bundle](/api/connect/ca#list-ca-root-certificates). +against the [public CA bundle](/api-docs/connect/ca#list-ca-root-certificates). This is very similar to a typical HTTPS web browser connection. In addition to this, the client provides its own client certificate to show its identity to the destination service. If the connection handshake succeeds, the connection is encrypted and authorized. The destination service verifies the client certificate against the [public CA -bundle](/api/connect/ca#list-ca-root-certificates). After verifying the +bundle](/api-docs/connect/ca#list-ca-root-certificates). After verifying the certificate, the next step depends upon the configured application protocol of the destination service. TCP (L4) services must authorize incoming _connections_ against the configured set of Consul [intentions](/docs/connect/intentions), @@ -54,15 +54,15 @@ and can be extended to support any system by adding additional CA providers. All APIs required for Connect typically respond in microseconds and impose minimal overhead to existing services. To ensure this, Connect-related API calls are all made to the local Consul agent over a loopback interface, and all [agent -Connect endpoints](/api/agent/connect) implement local caching, background +Connect endpoints](/api-docs/agent/connect) implement local caching, background updating, and support blocking queries. Most API calls operate on purely local in-memory data. ## Agent Caching and Performance To enable fast responses on endpoints such as the [agent Connect -API](/api/agent/connect), the Consul agent locally caches most Connect-related -data and sets up background [blocking queries](/api/features/blocking) against +API](/api-docs/agent/connect), the Consul agent locally caches most Connect-related +data and sets up background [blocking queries](/api-docs/features/blocking) against the server to update the cache in the background. This allows most API calls such as retrieving certificates or authorizing connections to use in-memory data and respond very quickly. diff --git a/website/content/docs/connect/gateways/ingress-gateway.mdx b/website/content/docs/connect/gateways/ingress-gateway.mdx index 50c796532..9f181850e 100644 --- a/website/content/docs/connect/gateways/ingress-gateway.mdx +++ b/website/content/docs/connect/gateways/ingress-gateway.mdx @@ -13,7 +13,7 @@ description: >- Ingress gateways enable connectivity within your organizational network from services outside the Consul service mesh to services in the mesh. An ingress gateway is a type of proxy and must be registered as a service in Consul, with the -[kind](/api/agent/service#kind) set to "ingress-gateway". They are an +[kind](/api-docs/agent/service#kind) set to "ingress-gateway". They are an entrypoint for outside traffic and allow you to define what services should be exposed and on what port. You configure an ingress gateway by defining a set of [listeners](/docs/connect/config-entries/ingress-gateway#listeners) that each map @@ -53,7 +53,7 @@ review the [ingress gateway tutorial](https://learn.hashicorp.com/tutorials/cons ## Ingress Gateway Configuration Ingress gateways are configured in service definitions and registered with Consul like other services, with two exceptions. -The first is that the [kind](/api/agent/service#kind) must be "ingress-gateway". Second, +The first is that the [kind](/api-docs/agent/service#kind) must be "ingress-gateway". Second, the ingress gateway service definition may contain a `Proxy.Config` entry just like a Connect proxy service, to define opaque configuration parameters useful for the actual proxy software. For Envoy there are some supported [gateway options](/docs/connect/proxies/envoy#gateway-options) as well as @@ -279,4 +279,4 @@ Listeners = [ Separate certificates may be loaded per listener or per-service with hostname (SNI) switching. See the [Config Entry -reference](/docs/connect/config-entries/ingress-gateway) for more details. \ No newline at end of file +reference](/docs/connect/config-entries/ingress-gateway) for more details. diff --git a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx index 69399111b..aac107b2e 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx @@ -179,5 +179,5 @@ expected result: their _local_ ip addresses and are listed as `alive`. - Ensure any API request that activates datacenter request forwarding. such as - [`/v1/catalog/services?dc=`](/api/catalog#dc-1) + [`/v1/catalog/services?dc=`](/api-docs/catalog#dc-1) succeeds. diff --git a/website/content/docs/connect/gateways/terminating-gateway.mdx b/website/content/docs/connect/gateways/terminating-gateway.mdx index 31aeddc41..81e18d19f 100644 --- a/website/content/docs/connect/gateways/terminating-gateway.mdx +++ b/website/content/docs/connect/gateways/terminating-gateway.mdx @@ -73,7 +73,7 @@ Connect proxies that send upstream traffic through a gateway aren't affected when you deploy terminating gateways. If you are using non-Envoy proxies as Connect proxies they will continue to work for traffic directed at services linked to a terminating gateway as long as they discover upstreams with the -[/health/connect](/api/health#list-nodes-for-connect-capable-service) endpoint. +[/health/connect](/api-docs/health#list-nodes-for-connect-capable-service) endpoint. ## Running and Using a Terminating Gateway @@ -83,7 +83,7 @@ services outside the mesh, review the [terminating gateway tutorial](https://lea ## Terminating Gateway Configuration Terminating gateways are configured in service definitions and registered with Consul like other services, with two exceptions. -The first is that the [kind](/api/agent/service#kind) must be "terminating-gateway". Second, +The first is that the [kind](/api-docs/agent/service#kind) must be "terminating-gateway". Second, the terminating gateway service definition may contain a `Proxy.Config` entry just like a Connect proxy service, to define opaque configuration parameters useful for the actual proxy software. For Envoy there are some supported [gateway options](/docs/connect/proxies/envoy#gateway-options) as well as @@ -98,7 +98,7 @@ must also provide `agent:read` for its node's name in order to discover the agen Linking services to a terminating gateway is done with a `terminating-gateway` [configuration entry](/docs/connect/config-entries/terminating-gateway). This config entry can be applied via the -[CLI](/commands/config/write) or [API](/api/config#apply-configuration). +[CLI](/commands/config/write) or [API](/api-docs/config#apply-configuration). Gateways with the same name in Consul's service catalog are configured with a single configuration entry. This means that additional gateway instances registered with the same name will determine their routing based on the existing configuration entry. @@ -113,12 +113,12 @@ sets of services within their datacenter then the terminating gateways **must** The services that the terminating gateway will proxy for must be registered with Consul, even the services outside the mesh. They must also be registered in the same Consul datacenter as the terminating gateway. Otherwise the terminating gateway will not be able to discover the services' addresses. These services can be registered with a local Consul agent. -If there is no agent present, the services can be registered [directly in the catalog](/api/catalog#register-entity) +If there is no agent present, the services can be registered [directly in the catalog](/api-docs/catalog#register-entity) by sending the registration request to a client or server agent on a different host. All services registered in the Consul catalog must be associated with a node, even when their node is not managed by a Consul client agent. All agent-less services with the same address can be registered under the same node name and address. -However, ensure that the [node name](/api/catalog#node) for external services registered directly in the catalog +However, ensure that the [node name](/api-docs/catalog#node) for external services registered directly in the catalog does not match the node name of any Consul client agent node. If the node name overlaps with the node name of a Consul client agent, Consul's [anti-entropy sync](/docs/architecture/anti-entropy) will delete the services registered via the `/catalog/register` HTTP API endpoint. diff --git a/website/content/docs/connect/intentions-legacy.mdx b/website/content/docs/connect/intentions-legacy.mdx index 240f0aea8..1939db196 100644 --- a/website/content/docs/connect/intentions-legacy.mdx +++ b/website/content/docs/connect/intentions-legacy.mdx @@ -107,7 +107,7 @@ top to bottom, with larger numbers being evaluated first. | Exact | `*` | `*` | `*` | 2 | | `*` | `*` | `*` | `*` | 1 | -The precedence value can be read from the [API](/api/connect/intentions) +The precedence value can be read from the [API](/api-docs/connect/intentions) after an intention is created. Precedence cannot be manually overridden today. This is a feature that will be added in a later version of Consul. diff --git a/website/content/docs/connect/intentions.mdx b/website/content/docs/connect/intentions.mdx index 9f3ea2c42..e76db2511 100644 --- a/website/content/docs/connect/intentions.mdx +++ b/website/content/docs/connect/intentions.mdx @@ -43,7 +43,7 @@ At any given point in time, between any pair of services **only one intention controls authorization**. This may be either an L4 intention or an L7 intention, but at any given point in time only one of those applies. -The [intention match API](/api/connect/intentions#list-matching-intentions) +The [intention match API](/api-docs/connect/intentions#list-matching-intentions) should be periodically called to retrieve all relevant intentions for the target destination. After verifying the TLS client certificate, the cached intentions should be consulted for each incoming connection/request to diff --git a/website/content/docs/connect/l7-traffic/discovery-chain.mdx b/website/content/docs/connect/l7-traffic/discovery-chain.mdx index 41a32a9b5..dc9594e61 100644 --- a/website/content/docs/connect/l7-traffic/discovery-chain.mdx +++ b/website/content/docs/connect/l7-traffic/discovery-chain.mdx @@ -11,7 +11,7 @@ description: >- -> **1.6.0+:** This feature is available in Consul versions 1.6.0 and newer. -~> This topic is part of a [low-level API](/api/discovery-chain) +~> This topic is part of a [low-level API](/api-docs/discovery-chain) primarily targeted at developers building external [Connect proxy integrations](/docs/connect/proxies/integrate). @@ -74,7 +74,7 @@ discovery chain, we first compile them into a form more directly usable by the layers responsible for configuring Connect sidecar proxies. You can interact with the compiler directly using the [discovery-chain -API](/api/discovery-chain). +API](/api-docs/discovery-chain). ### Compilation Parameters @@ -102,7 +102,7 @@ The response is a single wrapped `CompiledDiscoveryChain` field: The chain encodes a digraph of [nodes](#discoverygraphnode) and [targets](#discoverytarget). Nodes are the compiled representation of various discovery chain stages and targets are instructions on how to use the [health -API](/api/health#list-nodes-for-connect-capable-service) to retrieve +API](/api-docs/health#list-nodes-for-connect-capable-service) to retrieve relevant service instance lists. You should traverse the nodes starting with [`StartNode`](#startnode). The @@ -219,7 +219,7 @@ A single node in the compiled discovery chain. definition for this target. - `Filter` `(string: "")` - The - [filter expression](/api/features/filtering) to be used for selecting + [filter expression](/api-docs/features/filtering) to be used for selecting instances of the requested service. If empty all healthy instances are returned. diff --git a/website/content/docs/connect/native/go.mdx b/website/content/docs/connect/native/go.mdx index 879bb9ea6..43016a851 100644 --- a/website/content/docs/connect/native/go.mdx +++ b/website/content/docs/connect/native/go.mdx @@ -179,7 +179,7 @@ the following specific limitations: - `.service[.].consul` to discover a healthy service instance for a given service. - `.query[.].consul` to discover an instance via - [Prepared Query](/api/query). + [Prepared Query](/api-docs/query). - The top-level domain _must_ be `.consul` even if your cluster has a custom `domain` configured for its DNS interface. This might be relaxed in the future. diff --git a/website/content/docs/connect/native/index.mdx b/website/content/docs/connect/native/index.mdx index e90d833b9..57b7e461d 100644 --- a/website/content/docs/connect/native/index.mdx +++ b/website/content/docs/connect/native/index.mdx @@ -37,9 +37,9 @@ to integrate with Connect. ## Overview The primary work involved in natively integrating with Connect is -[acquiring the proper TLS certificate](/api/agent/connect#service-leaf-certificate), -[verifying TLS certificates](/api/agent/connect#certificate-authority-ca-roots), -and [authorizing inbound connections or requests](/api/connect/intentions#list-matching-intentions). +[acquiring the proper TLS certificate](/api-docs/agent/connect#service-leaf-certificate), +[verifying TLS certificates](/api-docs/agent/connect#certificate-authority-ca-roots), +and [authorizing inbound connections or requests](/api-docs/connect/intentions#list-matching-intentions). All of this is done using the Consul HTTP APIs linked above. @@ -50,7 +50,7 @@ an API call to verify the incoming client certificate. ![Native Integration Overview](/img/connect-native-overview.png) -> **Note:** This diagram depicts the simpler networking layer 4 (e.g. TCP) [integration -mechanism](/api/agent/connect#authorize). +mechanism](/api-docs/agent/connect#authorize). Details on the steps are below: @@ -62,21 +62,21 @@ Details on the steps are below: - **Mutual TLS** - As a client, connect to the discovered service address over normal TLS. As part of the TLS connection, provide the - [service certificate](/api/agent/connect#service-leaf-certificate) + [service certificate](/api-docs/agent/connect#service-leaf-certificate) as the client certificate. Verify the remote certificate against the - [public CA roots](/api/agent/connect#certificate-authority-ca-roots). + [public CA roots](/api-docs/agent/connect#certificate-authority-ca-roots). As a client, if the connection is established then you've established a Connect-based connection and there are no further steps! - **Authorization** - As a server accepting connections, verify the client certificate against the [public CA - roots](/api/agent/connect#certificate-authority-ca-roots). After verifying + roots](/api-docs/agent/connect#certificate-authority-ca-roots). After verifying the certificate, parse some basic fields from it and use those to determine if the connection should be allowed. How this is done is dependent on the level of integration desired: - **Simple integration (TCP-only)** - Call the [authorizing - API](/api/agent/connect#authorize) against the local agent. If this returns + API](/api-docs/agent/connect#authorize) against the local agent. If this returns successfully, complete the TLS handshake and establish the connection. If authorization fails, close the connection. @@ -89,7 +89,7 @@ Details on the steps are below: - **Complete integration** - Like how the calls to acquire the leaf certificate and CA roots are expected to be done out of band and reused, so should the [intention match - API](/api/connect/intentions#list-matching-intentions). With all of the + API](/api-docs/connect/intentions#list-matching-intentions). With all of the relevant intentions cached for the destination, all enforcement operations can be done entirely by the service without calling any Consul APIs in the connection or request path. If the service is networking layer 7 (e.g. @@ -104,10 +104,10 @@ so that new connections are not disrupted. This can be done through Consul blocking queries (HTTP long polling) or through periodic polling. The API calls for -[acquiring a leaf TLS certificate](/api/agent/connect#service-leaf-certificate) -and [reading CA roots](/api/agent/connect#certificate-authority-ca-roots) +[acquiring a leaf TLS certificate](/api-docs/agent/connect#service-leaf-certificate) +and [reading CA roots](/api-docs/agent/connect#certificate-authority-ca-roots) both support -[blocking queries](/api/features/blocking). By using blocking +[blocking queries](/api-docs/features/blocking). By using blocking queries, an application can efficiently wait for an updated value. For example, the leaf certificate API will block until the certificate is near expiration or the signing certificates have changed and will issue and return a new @@ -116,7 +116,7 @@ certificate. In some languages, using blocking queries may not be simple. In that case, we still recommend using the blocking query parameters but with a very short `timeout` value set. Doing this is documented with -[blocking queries](/api/features/blocking). The low timeout will +[blocking queries](/api-docs/features/blocking). The low timeout will ensure the API responds quickly. We recommend that applications poll the certificate endpoints frequently, such as multiple times per minute. diff --git a/website/content/docs/connect/proxies/integrate.mdx b/website/content/docs/connect/proxies/integrate.mdx index bdf59a1af..6b1f64612 100644 --- a/website/content/docs/connect/proxies/integrate.mdx +++ b/website/content/docs/connect/proxies/integrate.mdx @@ -57,15 +57,15 @@ transport layer. -> **Note:** Some features, such as (local) rate limiting or max connections, are expected to be proxy-level configurations enforced separately when authorization calls are made. Proxies can enforce the configurations based on information about request rates and other states that should already be available. -The proxy can authorize the connection by either calling the [`/v1/agent/connect/authorize`](/api/agent/connect) API endpoint or by querying the [intention match API](/api/connect/intentions#list-matching-intentions) endpoint. +The proxy can authorize the connection by either calling the [`/v1/agent/connect/authorize`](/api-docs/agent/connect) API endpoint or by querying the [intention match API](/api-docs/connect/intentions#list-matching-intentions) endpoint. -The [`/v1/agent/connect/authorize`](/api/agent/connect) endpoint should be called in the connection path for each received connection. +The [`/v1/agent/connect/authorize`](/api-docs/agent/connect) endpoint should be called in the connection path for each received connection. If the local Consul agent is down or unresponsive, the success rate of new connections will be compromised. The agent uses locally-cached data to authorize the connection and typically responds in microseconds. As a result, the TLS handshake typically spans microseconds. ~> **Note:** This endpoint is only suitable for L4 (e.g., TCP) integration. The endpoint always treats intentions with `Permissions` defined (i.e., L7 criteria) as `deny` intentions during evaluation. -The proxy can query the [intention match API](/api/connect/intentions#list-matching-intentions) endpoint on startup to retrieve a list of intentions that match the proxy destination. The matches should be stored in the native filter configuration of the proxy, such as RBAC for Envoy. +The proxy can query the [intention match API](/api-docs/connect/intentions#list-matching-intentions) endpoint on startup to retrieve a list of intentions that match the proxy destination. The matches should be stored in the native filter configuration of the proxy, such as RBAC for Envoy. For performance and reliability reasons, querying the intention match API endpoint is the recommended method for implementing intention enforcement. The cached intentions should be consulted for each incoming connection (L4) or request (L7) to determine if the connection or request should be accepted or rejected. @@ -90,7 +90,7 @@ background. The leaf, roots, and intentions should be updated in the background by the proxy. The leaf cert, root cert, and intentions endpoints support [blocking -queries](/api/features/blocking), which should be used to get near-immediate +queries](/api-docs/features/blocking), which should be used to get near-immediate updates for root key rotations, new leaf certs before expiry, and intention changes. @@ -111,7 +111,7 @@ endpoint for a service and provide a client certificate from the ## Configuration Discovery -The [`/v1/agent/service/:service_id`](/api/agent/service#get-service-configuration) +The [`/v1/agent/service/:service_id`](/api-docs/agent/service#get-service-configuration) API endpoint enables any proxy to discover proxy configurations registered with a local service. This endpoint supports hash-based blocking, which enables long-polling for changes to the registration/configuration. Any changes to the registration/config will result in the new config being returned immediately. @@ -119,11 +119,11 @@ result in the new config being returned immediately. Refer to the [built-in proxy](/docs/connect/proxies/built-in) for an example implementation. Using the Go SDK, the proxy calls the HTTP "pull" API via the `watch` package: [`consul/connect/proxy/config.go`]. The [discovery chain] for each upstream service should be fetched from the -[`/v1/discovery-chain/:service_id`](/api/discovery-chain#read-compiled-discovery-chain) +[`/v1/discovery-chain/:service_id`](/api-docs/discovery-chain#read-compiled-discovery-chain) API endpoint. This will return a compiled graph of configurations a sidecar needs for a particular upstream service. If you are only implementing L4 support in your proxy, set the -[`OverrideProtocol`](/api/discovery-chain#overrideprotocol) value to `tcp` when +[`OverrideProtocol`](/api-docs/discovery-chain#overrideprotocol) value to `tcp` when fetching the discovery chain so that L7 features, such as HTTP routing rules, are not returned. @@ -140,7 +140,7 @@ documentation for details about supported configuration parameters. ### Service Discovery -Proxies can use Consul's [service discovery API](`/v1/health/connect/:service_id`) to return all available, Connect-capable endpoints for a given service. This endpoint supports a `cached` query parameter, which uses [agent caching](/api/features/caching) to improve +Proxies can use Consul's [service discovery API](`/v1/health/connect/:service_id`) to return all available, Connect-capable endpoints for a given service. This endpoint supports a `cached` query parameter, which uses [agent caching](/api-docs/features/caching) to improve performance. The API package provides a [`UseCache`] query option to leverage caching. In addition to performance improvements, using the cache makes the mesh more resilient to Consul server outages. This is because the mesh "fails static" with the last known set of service instances still used, rather than errors on new connections. @@ -152,7 +152,7 @@ proxies are likely to find it easier to integrate by pulling the set of endpoints and maintaining it in local memory using blocking queries. Upstreams may be defined with the Prepared Query target type. These upstreams -should use Consul's [prepared query](/api/query) API to determine a list of upstream endpoints for the service. Note that the `PreparedQuery` API does not support blocking, so proxies choosing to populate endpoints in memory will need to poll the endpoint at a suitable and, ideally, configurable frequency. +should use Consul's [prepared query](/api-docs/query) API to determine a list of upstream endpoints for the service. Note that the `PreparedQuery` API does not support blocking, so proxies choosing to populate endpoints in memory will need to poll the endpoint at a suitable and, ideally, configurable frequency. -> **Long-term support for [`service-resolver`](/docs/connect/config-entries/service-resolver) configuration entries**. The `service-resolver` configuration will completely replace prepared queries in future versions of Consul. In some instances, however, prepared queries are still used. diff --git a/website/content/docs/connect/proxies/managed-deprecated.mdx b/website/content/docs/connect/proxies/managed-deprecated.mdx index a1d918772..6b97e006d 100644 --- a/website/content/docs/connect/proxies/managed-deprecated.mdx +++ b/website/content/docs/connect/proxies/managed-deprecated.mdx @@ -177,7 +177,7 @@ reference](/docs/connect/configuration). ### Prepared Query Upstreams The upstream destination may also be a -[prepared query](/api/query). +[prepared query](/api-docs/query). This allows complex service discovery behavior such as connecting to the nearest neighbor or filtering by tags. diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index bd1617795..61fec921a 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -88,9 +88,9 @@ There are several different kinds of checks: dead man's switch, relies on the application to directly report its health. For example, a healthy app can periodically `PUT` a status update to the HTTP endpoint; if the app fails, the TTL will expire and the health check enters a critical state. - The endpoints used to update health information for a given check are: [pass](/api/agent/check#ttl-check-pass), - [warn](/api/agent/check#ttl-check-warn), [fail](/api/agent/check#ttl-check-fail), - and [update](/api/agent/check#ttl-check-update). TTL checks also persist their + The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass), + [warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail), + and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their last known status to disk. This allows the Consul agent to restore the last known status of the check across restarts. Persisted check status is valid through the end of the TTL from the time of the last check. diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index 47807e62f..5023b7b12 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -335,11 +335,11 @@ The `datacenter` is optional, and if not provided, the datacenter of this Consul agent is assumed. The `query or name` is the ID or given name of an existing -[Prepared Query](/api/query). These behave like standard service +[Prepared Query](/api-docs/query). These behave like standard service queries but provide a much richer set of features, such as filtering by multiple tags and automatically failing over to look for services in remote datacenters if no healthy nodes are available in the local datacenter. Consul 0.6.4 and later also -added support for [prepared query templates](/api/query#prepared-query-templates) +added support for [prepared query templates](/api-docs/query#prepared-query-templates) which can match names using a prefix match, allowing one template to apply to potentially many services. @@ -369,7 +369,7 @@ applications. This endpoint currently only finds services within the same datacenter and doesn't support tags. This DNS interface will be expanded over time. If you need more complex behavior, please use the -[catalog API](/api/catalog). +[catalog API](/api-docs/catalog). ### Service Virtual IP Lookups @@ -400,7 +400,7 @@ endpoints for the given `service`. This endpoint currently only finds services within the same datacenter and doesn't support tags. This DNS interface will be expanded over time. If you need more complex behavior, please use the -[catalog API](/api/catalog). +[catalog API](/api-docs/catalog). ### UDP Based DNS Queries @@ -508,7 +508,7 @@ DNS lookups and required policies when ACLs are enabled: | ------------------------------------------------------------------------------ | -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `*.node.consul` | [Node](#node-lookups) | Allow resolving DNS requests for the target node (i.e., `.node.consul`) | [`node:read`](/docs/security/acl/acl-rules#node-rules) | | `*.service.consul`, `*.connect.consul`, `*.ingress.consul`, `*.virtual.consul` | [Service: standard](#service-lookups) | Allow resolving DNS requests for target service (e.g., `.service.consul`) instances running on ACL-authorized nodes | [`service:read`](/docs/security/acl/acl-rules#service-rules), [`node:read`](/docs/security/acl/acl-rules#node-rules) | -| `*.query.consul` | [Service: prepared query](#prepared-query-lookups) | Allow resolving DNS requests for [service instances specified](/api/query#service-1) by the target prepared query (i.e., `.query.consul`) running on ACL-authorized nodes | [`query:read`](/docs/security/acl/acl-rules#prepared-query-rules), [`service:read`](/docs/security/acl/acl-rules#service-rules), [`node:read`](/docs/security/acl/acl-rules#node-rules) | +| `*.query.consul` | [Service: prepared query](#prepared-query-lookups) | Allow resolving DNS requests for [service instances specified](/api-docs/query#service-1) by the target prepared query (i.e., `.query.consul`) running on ACL-authorized nodes | [`query:read`](/docs/security/acl/acl-rules#prepared-query-rules), [`service:read`](/docs/security/acl/acl-rules#service-rules), [`node:read`](/docs/security/acl/acl-rules#node-rules) | For guidance on how to configure an appropriate token for DNS, refer to the securing Consul with ACLs guides for: diff --git a/website/content/docs/dynamic-app-config/kv.mdx b/website/content/docs/dynamic-app-config/kv.mdx index 2e9a7b94c..501abf027 100644 --- a/website/content/docs/dynamic-app-config/kv.mdx +++ b/website/content/docs/dynamic-app-config/kv.mdx @@ -28,7 +28,7 @@ Learn. ## Accessing the KV store The KV store can be accessed by the [consul kv CLI -subcommands](/commands/kv), [HTTP API](/api/kv), and Consul UI. +subcommands](/commands/kv), [HTTP API](/api-docs/kv), and Consul UI. To restrict access, enable and configure [ACLs](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production). Once the ACL system has been bootstrapped, users and services, will need a diff --git a/website/content/docs/enterprise/namespaces.mdx b/website/content/docs/enterprise/namespaces.mdx index ed90fd303..34bca5862 100644 --- a/website/content/docs/enterprise/namespaces.mdx +++ b/website/content/docs/enterprise/namespaces.mdx @@ -24,7 +24,7 @@ For more information on how to use namespaces with Consul Enterprise please revi ## Namespace Definition -Namespaces are managed exclusively through the [HTTP API](/api/namespaces) and the [Consul CLI](/commands/namespace). +Namespaces are managed exclusively through the [HTTP API](/api-docs/namespaces) and the [Consul CLI](/commands/namespace). The HTTP API accepts only JSON formatted definitions while the CLI will parse either JSON or HCL. An example namespace definition looks like the following: diff --git a/website/content/docs/install/performance.mdx b/website/content/docs/install/performance.mdx index a2d30e369..ca9c0e538 100644 --- a/website/content/docs/install/performance.mdx +++ b/website/content/docs/install/performance.mdx @@ -118,7 +118,7 @@ Here are some general recommendations: respect them. - In other applications that perform high volumes of reads against Consul, consider using the - [stale consistency mode](/api/features/consistency#stale) available to allow reads to scale + [stale consistency mode](/api-docs/features/consistency#stale) available to allow reads to scale across all the servers and not just be forwarded to the leader. - In Consul 0.9.3 and later, a new [`limits`](/docs/agent/options#limits) configuration is @@ -158,7 +158,7 @@ RAM NEEDED = number of keys * average key size * 2-3x Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/options#_data_dir)) -For a **read-heavy** workload, configure all Consul server agents with the `allow_stale` DNS option, or query the API with the `stale` [consistency mode](/api/features/consistency). By default, all queries made to the server are RPC forwarded to and serviced by the leader. By enabling stale reads, any server will respond to any query, thereby reducing overhead on the leader. Typically, the stale response is `100ms` or less from consistent mode but it drastically improves performance and reduces latency under high load. +For a **read-heavy** workload, configure all Consul server agents with the `allow_stale` DNS option, or query the API with the `stale` [consistency mode](/api-docs/features/consistency). By default, all queries made to the server are RPC forwarded to and serviced by the leader. By enabling stale reads, any server will respond to any query, thereby reducing overhead on the leader. Typically, the stale response is `100ms` or less from consistent mode but it drastically improves performance and reduces latency under high load. If the leader server is out of memory or the disk is full, the server eventually stops responding, loses its election and cannot move past its last commit time. However, by configuring `max_stale` and setting it to a large value, Consul will continue to respond to queries during such outage scenarios. ([max_stale documentation](/docs/agent/options#max_stale)). diff --git a/website/content/docs/k8s/connect/connect-ca-provider.mdx b/website/content/docs/k8s/connect/connect-ca-provider.mdx index d5a824270..4fb45c82c 100644 --- a/website/content/docs/k8s/connect/connect-ca-provider.mdx +++ b/website/content/docs/k8s/connect/connect-ca-provider.mdx @@ -8,7 +8,7 @@ description: Configuring a Connect CA Provider ~> **NOTE:** The instructions below should only be used for initially bootstrapping a cluster with **Consul K8s 0.38.0+.** To update the Connect CA provider on an existing cluster or to update any properties, such as tokens, of the CA provider, -please use the [Update CA Configuration Endpoint](/api/connect/ca#update-ca-configuration). +please use the [Update CA Configuration Endpoint](/api-docs/connect/ca#update-ca-configuration). Consul has support for different certificate authority (CA) providers to be used with the Consul Service Mesh. Please see [Connect Certificate Management](/docs/connect/ca) for the information on the providers @@ -193,7 +193,7 @@ The [`ca_config`] and [`ca_provider`] options defined in the Consul agent configuration are only used when initially bootstrapping the cluster. Once the cluster is running, subsequent changes to the [`ca_provider`] config are **ignored**–even if `consul reload` is run or the servers are restarted. -To update any settings under these keys, you must use Consul's [Update CA Configuration](/api/connect/ca#update-ca-configuration) API or the [`consul connect ca set-config`](/commands/connect/ca#set-config) command. +To update any settings under these keys, you must use Consul's [Update CA Configuration](/api-docs/connect/ca#update-ca-configuration) API or the [`consul connect ca set-config`](/commands/connect/ca#set-config) command. #### Renewing Vault Token diff --git a/website/content/docs/k8s/installation/install.mdx b/website/content/docs/k8s/installation/install.mdx index e128ccef0..f437467cf 100644 --- a/website/content/docs/k8s/installation/install.mdx +++ b/website/content/docs/k8s/installation/install.mdx @@ -303,7 +303,7 @@ The Consul HTTP API should be accessed by communicating to the local agent running on the same node. While technically any listening agent (client or server) can respond to the HTTP API, communicating with the local agent has important caching behavior, and allows you to use the simpler -[`/agent` endpoints for services and checks](/api/agent). +[`/agent` endpoints for services and checks](/api-docs/agent). For Consul installed via the Helm chart, a client agent is installed on each Kubernetes node. This is explained in the [architecture](/docs/k8s/installation/install#client-agents) diff --git a/website/content/docs/nia/installation/requirements.mdx b/website/content/docs/nia/installation/requirements.mdx index f8ae5831e..ee30a49f4 100644 --- a/website/content/docs/nia/installation/requirements.mdx +++ b/website/content/docs/nia/installation/requirements.mdx @@ -33,7 +33,7 @@ For information on compatible Consul versions, refer to the [Consul compatibilit The Consul agent must be running in order to dynamically update network devices. To run the local Consul agent, you can run Consul in development mode which can be started with `consul agent -dev` for simplicity. For more details on running Consul agent, refer to the [Getting Started: Run the Consul Agent Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-agent?in=consul/getting-started). -When running a Consul agent with CTS in production, we suggest to keep a few considerations in mind. CTS uses [blocking queries](/api/features/blocking) to monitor task dependencies, like changes to registered services. This results in multiple long running TCP connections between CTS and the agent to poll changes for each dependency. Monitoring a high number of services may quickly hit the default Consul agent connection limits. +When running a Consul agent with CTS in production, we suggest to keep a few considerations in mind. CTS uses [blocking queries](/api-docs/features/blocking) to monitor task dependencies, like changes to registered services. This results in multiple long running TCP connections between CTS and the agent to poll changes for each dependency. Monitoring a high number of services may quickly hit the default Consul agent connection limits. There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between CTS and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/options#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by CTS. diff --git a/website/content/docs/security/acl/acl-federated-datacenters.mdx b/website/content/docs/security/acl/acl-federated-datacenters.mdx index e13627d17..045680ec2 100644 --- a/website/content/docs/security/acl/acl-federated-datacenters.mdx +++ b/website/content/docs/security/acl/acl-federated-datacenters.mdx @@ -67,7 +67,7 @@ acl = { ~> **Warning:** Note that most enterprise deployments have security requirements that prevent specifying tokens in configuration files. The `enable_token_persistence` flag is also set in the configuration example so that the token is stored to disk in the agent's -[data directory](/docs/agent/options#_data_dir). Any future changes to the token that are made through the [API](/api/agent#update-acl-tokens) will +[data directory](/docs/agent/options#_data_dir). Any future changes to the token that are made through the [API](/api-docs/agent#update-acl-tokens) will be persisted to the same location, and the value in the config file will be ignored. The ACL agent token can also be set using the [`consul acl set-agent-token`](/commands/acl/set-agent-token) CLI as shown below. diff --git a/website/content/docs/security/acl/acl-legacy.mdx b/website/content/docs/security/acl/acl-legacy.mdx index 629fa0b1e..ea58bef0d 100644 --- a/website/content/docs/security/acl/acl-legacy.mdx +++ b/website/content/docs/security/acl/acl-legacy.mdx @@ -72,18 +72,18 @@ key_prefix "foo" { policy = "write" } The "old" API endpoints below continue to work for backwards compatibility but will continue to create or show only "legacy" tokens that can't take full advantage of the new ACL system improvements. They are documented fully under -[Legacy Tokens](/api/acl/legacy). +[Legacy Tokens](/api-docs/acl/legacy). -- [`PUT /acl/create` - Create Legacy Token](/api/acl/legacy#create-acl-token) -- [`PUT /acl/update` - Update Legacy Token](/api/acl/legacy#update-acl-token) -- [`PUT /acl/destroy/:uuid` - Delete Legacy Token](/api/acl/legacy#delete-acl-token) -- [`GET /acl/info/:uuid` - Read Legacy Token](/api/acl/legacy#read-acl-token) -- [`PUT /acl/clone/:uuid` - Clone Legacy Token](/api/acl/legacy#clone-acl-token) -- [`GET /acl/list` - List Legacy Tokens](/api/acl/legacy#list-acls) +- [`PUT /acl/create` - Create Legacy Token](/api-docs/acl/legacy#create-acl-token) +- [`PUT /acl/update` - Update Legacy Token](/api-docs/acl/legacy#update-acl-token) +- [`PUT /acl/destroy/:uuid` - Delete Legacy Token](/api-docs/acl/legacy#delete-acl-token) +- [`GET /acl/info/:uuid` - Read Legacy Token](/api-docs/acl/legacy#read-acl-token) +- [`PUT /acl/clone/:uuid` - Clone Legacy Token](/api-docs/acl/legacy#clone-acl-token) +- [`GET /acl/list` - List Legacy Tokens](/api-docs/acl/legacy#list-acls) The new ACL system includes new API endpoints to manage -the [ACL System](/api-docs/acl), [Tokens](/api/acl/tokens) -and [Policies](/api/acl/policies). +the [ACL System](/api-docs/acl), [Tokens](/api-docs/acl/tokens) +and [Policies](/api-docs/acl/policies). # Legacy ACL System @@ -139,28 +139,28 @@ rules: | Policy | Scope | | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`agent`](#agent-rules) | Utility operations in the [Agent API](/api/agent), other than service and check registration | -| [`event`](#event-rules) | Listing and firing events in the [Event API](/api/event) | -| [`key`](#key-value-rules) | Key/value store operations in the [KV Store API](/api/kv) | -| [`keyring`](#keyring-rules) | Keyring operations in the [Keyring API](/api/operator/keyring) | -| [`node`](#node-rules) | Node-level catalog operations in the [Catalog API](/api/catalog), [Health API](/api/health), [Prepared Query API](/api/query), [Network Coordinate API](/api/coordinate), and [Agent API](/api/agent) | -| [`operator`](#operator-rules) | Cluster-level operations in the [Operator API](/api/operator), other than the [Keyring API](/api/operator/keyring) | -| [`query`](#prepared-query-rules) | Prepared query operations in the [Prepared Query API](/api/query) | -| [`service`](#service-rules) | Service-level catalog operations in the [Catalog API](/api/catalog), [Health API](/api/health), [Prepared Query API](/api/query), and [Agent API](/api/agent) | -| [`session`](#session-rules) | Session operations in the [Session API](/api/session) | +| [`agent`](#agent-rules) | Utility operations in the [Agent API](/api-docs/agent), other than service and check registration | +| [`event`](#event-rules) | Listing and firing events in the [Event API](/api-docs/event) | +| [`key`](#key-value-rules) | Key/value store operations in the [KV Store API](/api-docs/kv) | +| [`keyring`](#keyring-rules) | Keyring operations in the [Keyring API](/api-docs/operator/keyring) | +| [`node`](#node-rules) | Node-level catalog operations in the [Catalog API](/api-docs/catalog), [Health API](/api-docs/health), [Prepared Query API](/api-docs/query), [Network Coordinate API](/api-docs/coordinate), and [Agent API](/api-docs/agent) | +| [`operator`](#operator-rules) | Cluster-level operations in the [Operator API](/api-docs/operator), other than the [Keyring API](/api-docs/operator/keyring) | +| [`query`](#prepared-query-rules) | Prepared query operations in the [Prepared Query API](/api-docs/query) | +| [`service`](#service-rules) | Service-level catalog operations in the [Catalog API](/api-docs/catalog), [Health API](/api-docs/health), [Prepared Query API](/api-docs/query), and [Agent API](/api-docs/agent) | +| [`session`](#session-rules) | Session operations in the [Session API](/api-docs/session) | Since Consul snapshots actually contain ACL tokens, the -[Snapshot API](/api/snapshot) requires a management token for snapshot operations +[Snapshot API](/api-docs/snapshot) requires a management token for snapshot operations and does not use a special policy. The following resources are not covered by ACL policies: -1. The [Status API](/api/status) is used by servers when bootstrapping and exposes +1. The [Status API](/api-docs/status) is used by servers when bootstrapping and exposes basic IP and port information about the servers, and does not allow modification of any state. 2. The datacenter listing operation of the - [Catalog API](/api/catalog#list-datacenters) similarly exposes the names of known + [Catalog API](/api-docs/catalog#list-datacenters) similarly exposes the names of known Consul datacenters, and does not allow modification of any state. Constructing rules from these policies is covered in detail in the @@ -212,13 +212,13 @@ system, or accessing Consul in special situations: | Special Token | Servers | Clients | Purpose | | ----------------------------------------------------------------------------- | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | +| [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api-docs/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | | [`acl_agent_token`](/docs/agent/options#acl_agent_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | | [`acl_master_token`](/docs/agent/options#acl_master_token_legacy) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | | [`acl_token`](/docs/agent/options#acl_token_legacy) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the -[/v1/agent/token API](/api/agent#update-acl-tokens). +[/v1/agent/token API](/api-docs/agent#update-acl-tokens). #### ACL Agent Master Token @@ -234,13 +234,13 @@ node "" { ``` In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the -[/v1/agent/token API](/api/agent#update-acl-tokens). +[/v1/agent/token API](/api-docs/agent#update-acl-tokens). #### ACL Agent Token The [`acl_agent_token`](/docs/agent/options#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/options#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: -1. Updating the agent's node entry using the [Catalog API](/api/catalog), including updating its node metadata, tagged addresses, and network coordinates +1. Updating the agent's node entry using the [Catalog API](/api-docs/catalog), including updating its node metadata, tagged addresses, and network coordinates 2. Performing [anti-entropy](/docs/architecture/anti-entropy) syncing, in particular reading the node metadata and services registered with the catalog 3. Reading and writing the special `_rexec` section of the KV store when executing [`consul exec`](/commands/exec) commands @@ -261,7 +261,7 @@ key "_rexec" { The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/options#disable_remote_exec), the default, then the `key` policy can be omitted. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the -[/v1/agent/token API](/api/agent#update-acl-tokens). +[/v1/agent/token API](/api-docs/agent#update-acl-tokens). ## Bootstrapping ACLs @@ -556,7 +556,7 @@ supplied, the [`acl_token`](/docs/agent/options#acl_token) will be used for the instead of being left empty which would normally invoke the anonymous token. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the -[/v1/agent/token API](/api/agent#update-acl-tokens). +[/v1/agent/token API](/api-docs/agent#update-acl-tokens). This behaves very similarly to the anonymous token, but can be configured differently on each agent, if desired. For example, this allows more fine grained control of what DNS requests a @@ -702,7 +702,7 @@ or the `CONSUL_HTTP_TOKEN` environment variable. #### Agent Rules -The `agent` policy controls access to the utility operations in the [Agent API](/api/agent), +The `agent` policy controls access to the utility operations in the [Agent API](/api-docs/agent), such as join and leave. All of the catalog-related operations are covered by the [`node`](#node-rules) and [`service`](#service-rules) policies instead. @@ -725,14 +725,14 @@ the example above, the rules allow read-only access to any node name with the em read-write access to any node name that starts with "foo", and deny all access to any node name that starts with "bar". -Since [Agent API](/api/agent) utility operations may be required before an agent is joined to +Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be configured with [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token) to allow write access to these operations even if no ACL resolution capability is available. #### Event Rules -The `event` policy controls access to event operations in the [Event API](/api/event), such as +The `event` policy controls access to event operations in the [Event API](/api-docs/event), such as firing events and listing events. Event rules look like this: @@ -757,7 +757,7 @@ give agents a token with access to this event prefix, in addition to configuring #### Key/Value Rules -The `key` policy controls access to key/value store operations in the [KV API](/api/kv). Key +The `key` policy controls access to key/value store operations in the [KV API](/api-docs/kv). Key rules look like this: ```hcl @@ -781,7 +781,7 @@ starts with "bar". Consul 1.0 introduces a new `list` policy for keys that is only enforced when opted in via the boolean config param "acl_enable_key_list_policy". `list` controls access to recursively list entries and keys, and enables more fine grained policies. With "acl_enable_key_list_policy", -recursive reads via [the KV API](/api/kv#recurse) with an invalid token result in a 403. Example: +recursive reads via [the KV API](/api-docs/kv#recurse) with an invalid token result in a 403. Example: ```hcl key "" { @@ -825,7 +825,7 @@ For more detailed information, see the [Consul Sentinel documentation](/docs/age #### Keyring Rules The `keyring` policy controls access to keyring operations in the -[Keyring API](/api/operator/keyring). +[Keyring API](/api-docs/operator/keyring). Keyring rules look like this: @@ -838,8 +838,8 @@ dispositions. In the example above, the keyring may be read and updated. #### Node Rules -The `node` policy controls node-level registration and read access to the [Catalog API](/api/catalog), -service discovery with the [Health API](/api/health), and filters results in [Agent API](/api/agent) +The `node` policy controls node-level registration and read access to the [Catalog API](/api-docs/catalog), +service discovery with the [Health API](/api-docs/health), and filters results in [Agent API](/api-docs/agent) operations like fetching the list of cluster members. Node rules look like this: @@ -874,7 +874,7 @@ When reading from the catalog or retrieving information from the health endpoint used to filter the results of the query. This allows for configurations where a token has access to a given service name, but only on an allowed subset of node names. -Node rules come into play when using the [Agent API](/api/agent) to register node-level +Node rules come into play when using the [Agent API](/api-docs/agent) to register node-level checks. The agent will check tokens locally as a check is registered, and Consul also performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which may require an ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens @@ -897,7 +897,7 @@ script checks. #### Operator Rules The `operator` policy controls access to cluster-level operations in the -[Operator API](/api/operator), other than the [Keyring API](/api/operator/keyring). +[Operator API](/api-docs/operator), other than the [Keyring API](/api-docs/operator/keyring). Operator rules look like this: @@ -912,7 +912,7 @@ diagnostic purposes but not make any changes. #### Prepared Query Rules The `query` policy controls access to create, update, and delete prepared queries in the -[Prepared Query API](/api/query). Executing queries is subject to `node` and `service` +[Prepared Query API](/api-docs/query). Executing queries is subject to `node` and `service` policies, as will be explained below. Query rules look like this: @@ -955,7 +955,7 @@ here, with examples: that is used and known by many clients to provide geo-failover behavior for a database. -- [Template queries](/api/query#prepared-query-templates) +- [Template queries](/api-docs/query#prepared-query-templates) queries work like static queries with a `Name` defined, except that a catch-all template with an empty `Name` requires an ACL token that can write to any query prefix. @@ -1002,8 +1002,8 @@ These differences are outlined in the table below: #### Service Rules -The `service` policy controls service-level registration and read access to the [Catalog API](/api/catalog) -and service discovery with the [Health API](/api/health). +The `service` policy controls service-level registration and read access to the [Catalog API](/api-docs/catalog) +and service discovery with the [Health API](/api-docs/health). Service rules look like this: @@ -1031,7 +1031,7 @@ given service, then the DNS interface will return no records when queried for it When reading from the catalog or retrieving information from the health endpoints, service rules are used to filter the results of the query. -Service rules come into play when using the [Agent API](/api/agent) to register services or +Service rules come into play when using the [Agent API](/api-docs/agent) to register services or checks. The agent will check tokens locally as a service or check is registered, and Consul also performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which may require an ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens @@ -1058,7 +1058,7 @@ set to `true` in order to enable script checks. #### Session Rules -The `session` policy controls access to [Session API](/api/session) operations. +The `session` policy controls access to [Session API](/api-docs/session) operations. Session rules look like this: @@ -1099,7 +1099,7 @@ configuration on the servers in the non-authoritative datacenters. In Consul 0.9.1 and later you can enable ACL replication using [`enable_acl_replication`](/docs/agent/options#enable_acl_replication) and then set the token later using the -[agent token API](/api/agent#update-acl-tokens) on each server. This can +[agent token API](/api-docs/agent#update-acl-tokens) on each server. This can also be used to rotate the token without restarting the Consul servers. With replication enabled, the servers will maintain a replica of the authoritative diff --git a/website/content/docs/security/acl/acl-migrate-tokens.mdx b/website/content/docs/security/acl/acl-migrate-tokens.mdx index fc1a8e522..36387783c 100644 --- a/website/content/docs/security/acl/acl-migrate-tokens.mdx +++ b/website/content/docs/security/acl/acl-migrate-tokens.mdx @@ -59,7 +59,7 @@ blank `AccessorID`. In addition, it is assumed that all clients that might _create_ ACL tokens (e.g. Vault's Consul secrets engine) have been updated to use the [new ACL -APIs](/api/acl/tokens). +APIs](/api-docs/acl/tokens). Specifically if you are using Vault's Consul secrets engine you need to be running Vault 1.0.0 or higher, _and_ you must update all roles defined in Vault @@ -135,7 +135,7 @@ necessary access for existing clients; this is up to the operator to ensure. #### Update via API -Use the [`PUT /v1/acl/token/:AccessorID`](/api/acl/tokens#update-a-token) +Use the [`PUT /v1/acl/token/:AccessorID`](/api-docs/acl/tokens#update-a-token) endpoint. Specifically, ensure that the `Rules` field is omitted or empty. Empty `Rules` indicates that this is now treated as a new token. diff --git a/website/content/docs/security/acl/acl-policies.mdx b/website/content/docs/security/acl/acl-policies.mdx index 485b1a8f9..1a72774cf 100644 --- a/website/content/docs/security/acl/acl-policies.mdx +++ b/website/content/docs/security/acl/acl-policies.mdx @@ -318,7 +318,7 @@ You can can define several attributes that attach additional metadata and specif ### HTTP API Endpoint -The endpoint takes data formatted in HCL or JSON. Refer to the [ACL HTTP API endpoint documentation](/api/acl/acl) for details about the API. +The endpoint takes data formatted in HCL or JSON. Refer to the [ACL HTTP API endpoint documentation](/api-docs/acl/acl) for details about the API. The following example adds a set of rules to a policy called `my-app-policy`. The policy defines access to the `key` resource (Consul K/V). The rules are formatted in HCL, but they are wrapped in JSON so that the data can be sent using cURL: @@ -540,4 +540,4 @@ session_prefix "" { } ``` - \ No newline at end of file + diff --git a/website/content/docs/security/acl/acl-roles.mdx b/website/content/docs/security/acl/acl-roles.mdx index c5bd0661b..1b16e37e2 100644 --- a/website/content/docs/security/acl/acl-roles.mdx +++ b/website/content/docs/security/acl/acl-roles.mdx @@ -106,7 +106,7 @@ Use the following syntax to define a service identity: - `ServiceIdentities.ServiceName`: String value that specifies the name of the service you want to associate with the policy. - `ServiceIdentitites.Datacenters`: Array that specifies the names of datacenters in which the service identity applies. This field is optional. -Refer to the the [API documentation for roles](/api/acl/roles#sample-payload) for additional information and examples. +Refer to the the [API documentation for roles](/api-docs/acl/roles#sample-payload) for additional information and examples. -> **Scope for Namespace and Admin Partition** - In Consul Enterprise, service identities inherit the namespace or admin partition scope of the corresponding ACL token or role. @@ -280,7 +280,7 @@ NodeIdentities = { - `NodeIdentities.NodeName`: String value that specifies the name of the node you want to associate with the policy. - `NodeIdentitites.Datacenters`: Array that specifies the names of datacenters in which the node identity applies. This field is optional. -Refer to the the [API documentation for roles](/api/acl/roles#sample-payload) for additional information and examples. +Refer to the the [API documentation for roles](/api-docs/acl/roles#sample-payload) for additional information and examples. -> **Consul Enterprise Namespacing** - Node Identities can only be applied to tokens and roles in the `default` namespace. The generated policy rules allow for `service:read` permissions on all services in all namespaces. diff --git a/website/content/docs/security/acl/acl-rules.mdx b/website/content/docs/security/acl/acl-rules.mdx index ddce746cc..28fe09a70 100644 --- a/website/content/docs/security/acl/acl-rules.mdx +++ b/website/content/docs/security/acl/acl-rules.mdx @@ -15,25 +15,25 @@ The following table provides an overview of the resources you can use to create | Resource | Description | Labels | | ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `acl` | Controls access to ACL operations in the [ACL API](/api/acl/acl).
See [ACL Resource Rules](#acl-resource-rules) for details. | No | +| `acl` | Controls access to ACL operations in the [ACL API](/api-docs/acl/acl).
See [ACL Resource Rules](#acl-resource-rules) for details. | No | | `partition`
`partition_prefix` | Controls access to one or more admin partitions.
See [Admin Partition Rules](#admin-partition-rules) for details. | Yes | -| `agent`
`agent_prefix` | Controls access to the utility operations in the [Agent API](/api/agent), such as `join` and `leave`.
See [Agent Rules](#agent-rules) for details. | Yes | -| `event`
`event_prefix` | Controls access to event operations in the [Event API](/api/event), such as firing and listing events.
See [Event Rules](#event-rules) for details. | Yes | -| `key`
`key_prefix`   | Controls access to key/value store operations in the [KV API](/api/kv).
Can also use the `list` access level when setting the policy disposition.
Has additional value options in Consul Enterprise for integrating with [Sentinel](https://docs.hashicorp.com/sentinel/consul).
See [Key/Value Rules](#key-value-rules) for details. | Yes | -| `keyring`       | Controls access to keyring operations in the [Keyring API](/api/keyring).
See [Keyring Rules](#keyring-rules) for details. | No | +| `agent`
`agent_prefix` | Controls access to the utility operations in the [Agent API](/api-docs/agent), such as `join` and `leave`.
See [Agent Rules](#agent-rules) for details. | Yes | +| `event`
`event_prefix` | Controls access to event operations in the [Event API](/api-docs/event), such as firing and listing events.
See [Event Rules](#event-rules) for details. | Yes | +| `key`
`key_prefix`   | Controls access to key/value store operations in the [KV API](/api-docs/kv).
Can also use the `list` access level when setting the policy disposition.
Has additional value options in Consul Enterprise for integrating with [Sentinel](https://docs.hashicorp.com/sentinel/consul).
See [Key/Value Rules](#key-value-rules) for details. | Yes | +| `keyring`       | Controls access to keyring operations in the [Keyring API](/api-docs/keyring).
See [Keyring Rules](#keyring-rules) for details. | No | | `mesh`       | Provides operator-level permissions for resources in the admin partition, such as ingress gateways or mesh proxy defaults. See [Mesh Rules](#mesh-rules) for details. | No | | `namespace`
`namespace_prefix` | Controls access to one or more namespaces.
See [Namespace Rules](#namespace-rules) for details. | Yes | -| `node`
`node_prefix`   | Controls access to node-level operations in the [Catalog API](/api/catalog), [Health API](/api/health), [Prepared Query API](/api/query), [Network Coordinate API](/api/coordinate), and [Agent API](/api/agent)
See [Node Rules](#node-rules) for details. | Yes | -| `operator`       | Controls access to cluster-level operations available in the [Operator API](/api/operator) excluding keyring API endpoints.
See [Operator Rules](#operator-rules) for details. | No | -| `query`
`query_prefix` | Controls access to create, update, and delete prepared queries in the [Prepared Query API](/api/query). Access to the [node](#node-rules) and [service](#service-rules) must also be granted.
See [Prepared Query Rules](#prepared-query-rules) for details. | Yes | -| `service`
`service_prefix` | Controls service-level operations in the [Catalog API](/api/catalog), [Health API](/api/health), [Intentions API](/api/connect/intentions), [Prepared Query API](/api/query), and [Agent API](/api/agent).
See [Service Rules](#node-rules) for details. | Yes | -| `session`
`session_prefix` | Controls access to operations in the [Session API](/api/session).
See [Session Rules](#session-rules) for details. | Yes | +| `node`
`node_prefix`   | Controls access to node-level operations in the [Catalog API](/api-docs/catalog), [Health API](/api-docs/health), [Prepared Query API](/api-docs/query), [Network Coordinate API](/api-docs/coordinate), and [Agent API](/api-docs/agent)
See [Node Rules](#node-rules) for details. | Yes | +| `operator`       | Controls access to cluster-level operations available in the [Operator API](/api-docs/operator) excluding keyring API endpoints.
See [Operator Rules](#operator-rules) for details. | No | +| `query`
`query_prefix` | Controls access to create, update, and delete prepared queries in the [Prepared Query API](/api-docs/query). Access to the [node](#node-rules) and [service](#service-rules) must also be granted.
See [Prepared Query Rules](#prepared-query-rules) for details. | Yes | +| `service`
`service_prefix` | Controls service-level operations in the [Catalog API](/api-docs/catalog), [Health API](/api-docs/health), [Intentions API](/api-docs/connect/intentions), [Prepared Query API](/api-docs/query), and [Agent API](/api-docs/agent).
See [Service Rules](#node-rules) for details. | Yes | +| `session`
`session_prefix` | Controls access to operations in the [Session API](/api-docs/session).
See [Session Rules](#session-rules) for details. | Yes | The following resources are not covered by ACL policies: -- The [Status API](/api/status) is used by servers when bootstrapping and exposes basic IP and port information about the servers, and does not allow modification of any state. -- The datacenter listing operation of the [Catalog API](/api/catalog#list-datacenters) similarly exposes the names of known Consul datacenters, and does not allow modification of any state. -- The [connect CA roots endpoint](/api/connect/ca#list-ca-root-certificates) exposes just the public TLS certificate which other systems can use to verify the TLS connection with Consul. +- The [Status API](/api-docs/status) is used by servers when bootstrapping and exposes basic IP and port information about the servers, and does not allow modification of any state. +- The datacenter listing operation of the [Catalog API](/api-docs/catalog#list-datacenters) similarly exposes the names of known Consul datacenters, and does not allow modification of any state. +- The [connect CA roots endpoint](/api-docs/connect/ca#list-ca-root-certificates) exposes just the public TLS certificate which other systems can use to verify the TLS connection with Consul. -> **Consul Enterprise Namespace** - In addition to directly-linked policies, roles, and service identities, Consul Enterprise enables ACL policies and roles to be defined in the [Namespaces definition](/docs/enterprise/namespaces#namespace-definition) (Consul Enterprise 1.7.0+). @@ -179,7 +179,7 @@ partition_prefix "ex-" { ## Agent Rules -The `agent` and `agent_prefix` resources control access to the utility operations in the [Agent API](/api/agent), +The `agent` and `agent_prefix` resources control access to the utility operations in the [Agent API](/api-docs/agent), such as join and leave. All of the catalog-related operations are covered by the [`node` or `node_prefix`](#node-rules) and [`service` or `service_prefix`](#service-rules) policies instead. @@ -225,14 +225,14 @@ allow read-only access to any node name by using the empty prefix, read-write ac the node with the _exact_ name `foo`, and denies all access to any node name that starts with `bar`. -Since [Agent API](/api/agent) utility operations may be required before an agent is joined to +Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be configured with [`acl.tokens.agent_recovery`](/docs/agent/options#acl_tokens_agent_recovery) to allow write access to these operations even if no ACL resolution capability is available. ## Event Rules -The `event` and `event_prefix` resources control access to event operations in the [Event API](/api/event), such as +The `event` and `event_prefix` resources control access to event operations in the [Event API](/api-docs/event), such as firing events and listing events. @@ -276,7 +276,7 @@ give agents a token with access to this event prefix, in addition to configuring ## Key/Value Rules -The `key` and `key_prefix` resources control access to key/value store operations in the [KV API](/api/kv). +The `key` and `key_prefix` resources control access to key/value store operations in the [KV API](/api-docs/kv). @@ -320,7 +320,7 @@ to any key name with the empty prefix rule, allow read-write access to the "foo" ### List Policy for Keys -Enable the `list` policy disposition (Consul 1.0+) by setting the `acl.enable_key_list_policy` parameter to `true`. The disposition provides recursive access to `key` entries. Refer to the [KV API](/api/kv#recurse) documentation for additional information. In the following example, `key` resources that start with `bar` are listed. +Enable the `list` policy disposition (Consul 1.0+) by setting the `acl.enable_key_list_policy` parameter to `true`. The disposition provides recursive access to `key` entries. Refer to the [KV API](/api-docs/kv#recurse) documentation for additional information. In the following example, `key` resources that start with `bar` are listed. @@ -385,7 +385,7 @@ For more detailed information, see the [Consul Sentinel documentation](/docs/age ## Keyring Rules -The `keyring` resource controls access to keyring operations in the [Keyring API](/api/operator/keyring). Only one keyring policy is allowed per rule set. The value is set to one of the policy dispositions, but may be read and updated. +The `keyring` resource controls access to keyring operations in the [Keyring API](/api-docs/operator/keyring). Only one keyring policy is allowed per rule set. The value is set to one of the policy dispositions, but may be read and updated. @@ -592,9 +592,9 @@ specific namespace are prevented from accessing resources in another namespace. The `node` and `node_prefix` resources control access to the following API behaviors: -- node-level registration and read access to the [Catalog API](/api/catalog) -- service discovery with the [Health API](/api/health) -- filtering results in [Agent API](/api/agent) operations, such as fetching the list of cluster members. +- node-level registration and read access to the [Catalog API](/api-docs/catalog) +- service discovery with the [Health API](/api-docs/health) +- filtering results in [Agent API](/api-docs/agent) operations, such as fetching the list of cluster members. You can use resource labels to scope the rule to a specific resource or set of resources. @@ -659,7 +659,7 @@ These actions may required an ACL token to complete. Use the following methods t ## Operator Rules The `operator` resource controls access to cluster-level operations in the -[Operator API](/api/operator), other than the [Keyring API](/api/operator/keyring). +[Operator API](/api-docs/operator), other than the [Keyring API](/api-docs/operator/keyring). Only one operator rule allowed per rule set. In the following example, the token may be used to query the operator endpoints for diagnostic purposes but it will not make changes. @@ -684,7 +684,7 @@ operator = "read" ## Prepared Query Rules The `query` and `query_prefix` resources control access to create, update, and delete prepared queries in the -[Prepared Query API](/api/query). Specify the resource label in query rules to determine the scope of the rule. +[Prepared Query API](/api-docs/query). Specify the resource label in query rules to determine the scope of the rule. The resource label in the following example is empty. As a result, the rules allow read-only access to query resources with any name. The rules also grant read-write access to the query named `foo`, which allows control of the query namespace to be delegated based on ACLs: @@ -745,7 +745,7 @@ here, with examples: that is used and known by many clients to provide geo-failover behavior for a database. -- [Template queries](/api/query#prepared-query-templates) +- [Template queries](/api-docs/query#prepared-query-templates) queries work like static queries with a `Name` defined, except that a catch-all template with an empty `Name` requires an ACL token that can write to any query prefix. @@ -792,7 +792,7 @@ These differences are outlined in the table below: ## Service Rules -The `service` and `service_prefix` resources control service-level registration and read access to the [Catalog API](/api/catalog) and service discovery with the [Health API](/api/health). +The `service` and `service_prefix` resources control service-level registration and read access to the [Catalog API](/api-docs/catalog) and service discovery with the [Health API](/api-docs/health). Specify the resource label in service rules to set the scope of the rule. The resource label in the following example is empty. As a result, the rules allow read-only access to any service name with the empty prefix. The rules also allow read-write access to the `app` service and deny all access to the `admin` service: @@ -841,7 +841,7 @@ given service, then the DNS interface will return no records when queried for it When reading from the catalog or retrieving information from the health endpoints, service rules are used to filter the results of the query. -Service rules come into play when using the [Agent API](/api/agent) to register services or +Service rules come into play when using the [Agent API](/api-docs/agent) to register services or checks. The agent will check tokens locally as a service or check is registered, and Consul also performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which may require an ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens @@ -900,7 +900,7 @@ for more information about managing intentions access with service rules. ## Session Rules -The `session` and `session_prefix` resources controls access to [Session API](/api/session) operations. +The `session` and `session_prefix` resources controls access to [Session API](/api-docs/session) operations. Specify the resource label in session rules to set the scope of the rule. The resource label in the following example is empty. As a result, the rules allow read-only access to all sessions. diff --git a/website/content/docs/security/acl/acl-tokens.mdx b/website/content/docs/security/acl/acl-tokens.mdx index 66d886ecb..89999008b 100644 --- a/website/content/docs/security/acl/acl-tokens.mdx +++ b/website/content/docs/security/acl/acl-tokens.mdx @@ -160,22 +160,22 @@ system or accessing Consul under specific conditions. The following table descri | Token | Servers | Clients | Description | | ------------------------------------------------------------------------------------ | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`acl.tokens.agent_recovery`](/docs/agent/options#acl_tokens_agent_recovery) | `OPTIONAL` | `OPTIONAL` | Enables access to the [Agent API](/api/agent) when remote bearer token resolution fails.
Used for setting up the cluster and performing initial join operations.
See [ACL Agent Recovery Token](#acl-agent-recovery-token) for details. | +| [`acl.tokens.agent_recovery`](/docs/agent/options#acl_tokens_agent_recovery) | `OPTIONAL` | `OPTIONAL` | Enables access to the [Agent API](/api-docs/agent) when remote bearer token resolution fails.
Used for setting up the cluster and performing initial join operations.
See [ACL Agent Recovery Token](#acl-agent-recovery-token) for details. | | [`acl.tokens.agent`](/docs/agent/options#acl_tokens_agent) | `OPTIONAL` | `OPTIONAL` | Used for internal agent operations. See [ACL Agent Token](#acl-agent-token) for details. | | [`acl.tokens.initial_management`](/docs/agent/options#acl_tokens_initial_management) | `OPTIONAL` | `N/A` | Used to bootstrap the ACL system. See [Initial Management Token](#initial-management-token). | | [`acl.tokens.default`](/docs/agent/options#acl_tokens_default) | `OPTIONAL` | `OPTIONAL` | Specifies a default token to use for client requests if no token is supplied. This is commonly configured with read-only access to services to enable DNS service discovery on agents. | -All reserved tokens except the `initial_management` token can be created or updated using the [/v1/agent/token API](/api/agent#update-acl-tokens). +All reserved tokens except the `initial_management` token can be created or updated using the [/v1/agent/token API](/api-docs/agent#update-acl-tokens). ### Snapshot Tokens -Snapshots are artifacts created with the [snapshot API](/api/snapshot) for backup and recovery purposes. Snapshots contain ACL tokens and require, and interacting with them requires a token with `write` privileges. +Snapshots are artifacts created with the [snapshot API](/api-docs/snapshot) for backup and recovery purposes. Snapshots contain ACL tokens and require, and interacting with them requires a token with `write` privileges. ### ACL Agent Token The [`acl.tokens.agent`](/docs/agent/options#acl_tokens_agent) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl.tokens.default`](/docs/agent/options#acl_tokens_default), though if the `acl.tokens.agent` isn't configured the `acl.tokens.default` will be used. The ACL agent token is used for the following operations by the agent: -1. Updating the agent's node entry using the [Catalog API](/api/catalog), including updating its node metadata, tagged addresses, and network coordinates +1. Updating the agent's node entry using the [Catalog API](/api-docs/catalog), including updating its node metadata, tagged addresses, and network coordinates 2. Performing [anti-entropy](/docs/internals/anti-entropy) syncing, in particular reading the node metadata and services registered with the catalog 3. Reading and writing the special `_rexec` section of the KV store when executing [`consul exec`](/commands/exec) commands diff --git a/website/content/docs/security/acl/auth-methods/index.mdx b/website/content/docs/security/acl/auth-methods/index.mdx index 6f3385a03..c1e29d504 100644 --- a/website/content/docs/security/acl/auth-methods/index.mdx +++ b/website/content/docs/security/acl/auth-methods/index.mdx @@ -49,7 +49,7 @@ using the API or command line before they can be used by applications. details about how to authenticate application credentials. Successful validation of application credentials will return a set of trusted identity attributes (such as a username). These can be managed with the `consul acl auth-method` subcommands or the corresponding [API - endpoints](/api/acl/auth-methods). The specific details of + endpoints](/api-docs/acl/auth-methods). The specific details of configuration are type dependent and described in their own documentation pages. @@ -57,7 +57,7 @@ using the API or command line before they can be used by applications. how to translate trusted identity attributes from each auth method into privileges assigned to the ACL token that is created. These can be managed with the `consul acl binding-rule` subcommands or the corresponding [API - endpoints](/api/acl/binding-rules). + endpoints](/api-docs/acl/binding-rules). -> **Note** - To configure auth methods in any connected secondary datacenter, [ACL token replication](/docs/agent/options#acl_enable_token_replication) @@ -83,7 +83,7 @@ Each binding rule is composed of two portions: - **Selector** - A logical query that must match the trusted identity attributes for the binding rule to be applicable to a given login attempt. The syntax uses github.com/hashicorp/go-bexpr which is shared with the [API - filtering feature](/api/features/filtering). For example: + filtering feature](/api-docs/features/filtering). For example: `"serviceaccount.namespace==default and serviceaccount.name!=vault"` - **Bind Type and Name** - A binding rule can bind a token to a diff --git a/website/content/docs/security/acl/auth-methods/jwt.mdx b/website/content/docs/security/acl/auth-methods/jwt.mdx index d3d8130c6..60fe15891 100644 --- a/website/content/docs/security/acl/auth-methods/jwt.mdx +++ b/website/content/docs/security/acl/auth-methods/jwt.mdx @@ -28,7 +28,7 @@ processing of the claims data in the JWT. ## Config Parameters -The following auth method [`Config`](/api/acl/auth-methods#config) +The following auth method [`Config`](/api-docs/acl/auth-methods#config) parameters are required to properly configure an auth method of type `jwt`: diff --git a/website/content/docs/security/acl/auth-methods/kubernetes.mdx b/website/content/docs/security/acl/auth-methods/kubernetes.mdx index e1490ba8e..8d84d0ca1 100644 --- a/website/content/docs/security/acl/auth-methods/kubernetes.mdx +++ b/website/content/docs/security/acl/auth-methods/kubernetes.mdx @@ -21,7 +21,7 @@ documentation](/docs/security/acl/auth-methods). ## Config Parameters -The following auth method [`Config`](/api/acl/auth-methods#config) +The following auth method [`Config`](/api-docs/acl/auth-methods#config) parameters are required to properly configure an auth method of type `kubernetes`: @@ -37,27 +37,27 @@ parameters are required to properly configure an auth method of type validate application JWTs during login. - `MapNamespaces` `(bool: )` - - **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules).** + **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules).** Indicates whether the auth method should attempt to map the Kubernetes namespace to a Consul namespace instead of creating tokens in the auth methods own namespace. Note that mapping namespaces requires the auth method to reside within the `default` namespace. - Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules). + Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules). - `ConsulNamespacePrefix` `(string: )` - - **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules).** + **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules).** When `MapNamespaces` is enabled, this value will be prefixed to the Kubernetes namespace to determine the Consul namespace to create the new token within. - Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules). + Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules). - `ConsulNamespaceOverrides` `(map: )` - - **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules).** + **Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules).** This field is a mapping of Kubernetes namespace names to Consul namespace names. If a Kubernetes namespace is present within this map, the value will be used without adding the `ConsulNamespacePrefix`. If the value in the map is `""` then the auth methods namespace will be used instead of attempting to determine an alternate namespace. - Deprecated in Consul 1.8.0 in favor of [namespace rules](/api/acl/auth-methods#namespacerules). + Deprecated in Consul 1.8.0 in favor of [namespace rules](/api-docs/acl/auth-methods#namespacerules). ### Sample Config diff --git a/website/content/docs/security/acl/auth-methods/oidc.mdx b/website/content/docs/security/acl/auth-methods/oidc.mdx index 5c9605831..07ca08a89 100644 --- a/website/content/docs/security/acl/auth-methods/oidc.mdx +++ b/website/content/docs/security/acl/auth-methods/oidc.mdx @@ -34,7 +34,7 @@ processing of the claims data in the JWT. ## Config Parameters -The following auth method [`Config`](/api/acl/auth-methods#config) +The following auth method [`Config`](/api-docs/acl/auth-methods#config) parameters are required to properly configure an auth method of type `oidc`: diff --git a/website/content/docs/security/acl/index.mdx b/website/content/docs/security/acl/index.mdx index 197d63250..d0676d14d 100644 --- a/website/content/docs/security/acl/index.mdx +++ b/website/content/docs/security/acl/index.mdx @@ -76,7 +76,7 @@ Service identities enable you to quickly construct policies for services, rather Refer to the following topics for additional information about service identities: - [Service Identities](/docs/security/acl/acl-roles#service-identities) -- [API documentation for roles](/api/acl/roles#sample-payload) +- [API documentation for roles](/api-docs/acl/roles#sample-payload) ## Node Identities @@ -87,4 +87,4 @@ Node identities enable you to quickly construct policies for nodes, rather than Refer to the following topics for additional information about node identities: - [Node Identities](/docs/security/acl/acl-roles#node-identities) -- [API documentation for roles](/api/acl/roles#sample-payload) +- [API documentation for roles](/api-docs/acl/roles#sample-payload) diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index 096ecba58..e7284dd38 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -403,7 +403,7 @@ will not upgrade more than one batch per second so on a cluster with 10,000 tokens, this may take several minutes. While this is happening both old and new ACLs will work correctly with the -caveat that new ACL [Token APIs](/api/acl/tokens) may not return an +caveat that new ACL [Token APIs](/api-docs/acl/tokens) may not return an accessor ID for legacy tokens that are not yet migrated. #### Migrating Existing ACLs @@ -417,8 +417,8 @@ API so existing integrations that create tokens (e.g. Vault) will continue to work. The "legacy" tokens generated though will not be able to take advantage of new policy features. It's recommended that you complete migration of all tokens as soon as possible after upgrade, as well as updating any integrations to work -with the the new ACL [Token](/api/acl/tokens) and -[Policy](/api/acl/policies) APIs. +with the the new ACL [Token](/api-docs/acl/tokens) and +[Policy](/api-docs/acl/policies) APIs. More complete details on how to upgrade "legacy" tokens is available [here](/docs/security/acl/acl-migrate-tokens). @@ -718,7 +718,7 @@ directly returning one of Consul's internal data structures. This configuration structure has been moved under `DebugConfig`, and is documents as for debugging use and subject to change, and a small set of elements of `Config` have been maintained and documented. See [Read -Configuration](/api/agent#read-configuration) endpoint documentation for +Configuration](/api-docs/agent#read-configuration) endpoint documentation for details. #### Deprecated `configtest` Command Removed @@ -950,7 +950,7 @@ to upgrade all agents to a newer version of Consul before upgrading to Consul #### Prepared Query Changes Consul version 0.7 adds a feature which allows prepared queries to store a -[`Near` parameter](/api/query#near) in the query definition +[`Near` parameter](/api-docs/query#near) in the query definition itself. This feature enables using the distance sorting features of prepared queries without explicitly providing the node to sort near in requests, but requires the agent servicing a request to send additional information about diff --git a/website/next.config.js b/website/next.config.js index 0ddf76822..f90501d0d 100644 --- a/website/next.config.js +++ b/website/next.config.js @@ -10,12 +10,6 @@ module.exports = withHashicorp({ transpileModules: ['@hashicorp/flight-icons'], })({ svgo: { plugins: [{ removeViewBox: false }] }, - rewrites: () => [ - { - source: '/api/:path*', - destination: '/api-docs/:path*', - }, - ], redirects: () => redirects, // Note: These are meant to be public, it's not a mistake that they are here env: { diff --git a/website/redirects.next.js b/website/redirects.next.js index 484d912a1..3517dc5e3 100644 --- a/website/redirects.next.js +++ b/website/redirects.next.js @@ -357,10 +357,10 @@ module.exports = [ }, { source: '/docs/agent/http/:path*', - destination: '/api/:path*', + destination: '/api-docs/:path*', permanent: true, }, - { source: '/docs/agent/http', destination: '/api', permanent: true }, + { source: '/docs/agent/http', destination: '/api-docs', permanent: true }, // CLI Redirects { source: '/docs/commands', destination: '/commands', permanent: true }, { @@ -1254,4 +1254,9 @@ module.exports = [ destination: '/docs/releases/release-notes/v1_9_0', permanent: true, }, + { + source: '/api/:path*', + destination: '/api-docs/:path*', + permanent: true, + }, ] From aae6d8080dcf63fc252fb342524d863c0f3ce3df Mon Sep 17 00:00:00 2001 From: Paul Glass Date: Thu, 31 Mar 2022 10:18:48 -0500 Subject: [PATCH 050/785] Add IAM Auth Method (#12583) This adds an aws-iam auth method type which supports authenticating to Consul using AWS IAM identities. Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> --- .changelog/12583.txt | 3 + agent/consul/acl_authmethod.go | 1 + agent/consul/authmethod/awsauth/aws.go | 193 ++++++++++ agent/consul/authmethod/awsauth/aws_test.go | 342 +++++++++++++++++ command/login/aws.go | 148 ++++++++ command/login/login.go | 41 ++- command/login/login_test.go | 291 +++++++++++++-- go.mod | 2 +- internal/iamauth/README.md | 2 + internal/iamauth/auth.go | 311 ++++++++++++++++ internal/iamauth/auth_test.go | 123 +++++++ internal/iamauth/config.go | 69 ++++ internal/iamauth/config_test.go | 150 ++++++++ internal/iamauth/iamauthtest/testing.go | 187 ++++++++++ internal/iamauth/responses/arn.go | 94 +++++ internal/iamauth/responses/responses.go | 92 +++++ internal/iamauth/responses/responses_test.go | 157 ++++++++ internal/iamauth/responsestest/testing.go | 81 +++++ internal/iamauth/token.go | 343 +++++++++++++++++ internal/iamauth/token_test.go | 364 +++++++++++++++++++ internal/iamauth/util.go | 158 ++++++++ lib/glob.go | 24 ++ lib/glob_test.go | 37 ++ 23 files changed, 3165 insertions(+), 48 deletions(-) create mode 100644 .changelog/12583.txt create mode 100644 agent/consul/authmethod/awsauth/aws.go create mode 100644 agent/consul/authmethod/awsauth/aws_test.go create mode 100644 command/login/aws.go create mode 100644 internal/iamauth/README.md create mode 100644 internal/iamauth/auth.go create mode 100644 internal/iamauth/auth_test.go create mode 100644 internal/iamauth/config.go create mode 100644 internal/iamauth/config_test.go create mode 100644 internal/iamauth/iamauthtest/testing.go create mode 100644 internal/iamauth/responses/arn.go create mode 100644 internal/iamauth/responses/responses.go create mode 100644 internal/iamauth/responses/responses_test.go create mode 100644 internal/iamauth/responsestest/testing.go create mode 100644 internal/iamauth/token.go create mode 100644 internal/iamauth/token_test.go create mode 100644 internal/iamauth/util.go create mode 100644 lib/glob.go create mode 100644 lib/glob_test.go diff --git a/.changelog/12583.txt b/.changelog/12583.txt new file mode 100644 index 000000000..4b5dad9c0 --- /dev/null +++ b/.changelog/12583.txt @@ -0,0 +1,3 @@ +```release-note:feature +acl: Added an AWS IAM auth method that allows authenticating to Consul using AWS IAM identities +``` diff --git a/agent/consul/acl_authmethod.go b/agent/consul/acl_authmethod.go index 2e973c6a1..b901ce131 100644 --- a/agent/consul/acl_authmethod.go +++ b/agent/consul/acl_authmethod.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-bexpr" // register these as a builtin auth method + _ "github.com/hashicorp/consul/agent/consul/authmethod/awsauth" _ "github.com/hashicorp/consul/agent/consul/authmethod/kubeauth" _ "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" ) diff --git a/agent/consul/authmethod/awsauth/aws.go b/agent/consul/authmethod/awsauth/aws.go new file mode 100644 index 000000000..32320e3f7 --- /dev/null +++ b/agent/consul/authmethod/awsauth/aws.go @@ -0,0 +1,193 @@ +package awsauth + +import ( + "context" + "fmt" + + "github.com/hashicorp/consul/agent/consul/authmethod" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/iamauth" + "github.com/hashicorp/go-hclog" +) + +const ( + authMethodType string = "aws-iam" + + IAMServerIDHeaderName string = "X-Consul-IAM-ServerID" + GetEntityMethodHeader string = "X-Consul-IAM-GetEntity-Method" + GetEntityURLHeader string = "X-Consul-IAM-GetEntity-URL" + GetEntityHeadersHeader string = "X-Consul-IAM-GetEntity-Headers" + GetEntityBodyHeader string = "X-Consul-IAM-GetEntity-Body" +) + +func init() { + // register this as an available auth method type + authmethod.Register(authMethodType, func(logger hclog.Logger, method *structs.ACLAuthMethod) (authmethod.Validator, error) { + v, err := NewValidator(logger, method) + if err != nil { + return nil, err + } + return v, nil + }) +} + +type Config struct { + // BoundIAMPrincipalARNs are the trusted AWS IAM principal ARNs that are permitted + // to login to the auth method. These can be the exact ARNs or wildcards. Wildcards + // are only supported if EnableIAMEntityDetails is true. + BoundIAMPrincipalARNs []string `json:",omitempty"` + + // EnableIAMEntityDetails will fetch the IAM User or IAM Role details to include + // in binding rules. Required if wildcard principal ARNs are used. + EnableIAMEntityDetails bool `json:",omitempty"` + + // IAMEntityTags are the specific IAM User or IAM Role tags to include as selectable + // fields in the binding rule attributes. Requires EnableIAMEntityDetails = true. + IAMEntityTags []string `json:",omitempty"` + + // ServerIDHeaderValue adds a X-Consul-IAM-ServerID header to each AWS API request. + // This helps protect against replay attacks. + ServerIDHeaderValue string `json:",omitempty"` + + // MaxRetries is the maximum number of retries on AWS API requests for recoverable errors. + MaxRetries int `json:",omitempty"` + // IAMEndpoint is the AWS IAM endpoint where iam:GetRole or iam:GetUser requests will be sent. + // Note that the Host header in a signed request cannot be changed. + IAMEndpoint string `json:",omitempty"` + // STSEndpoint is the AWS STS endpoint where sts:GetCallerIdentity requests will be sent. + // Note that the Host header in a signed request cannot be changed. + STSEndpoint string `json:",omitempty"` + // STSRegion is the region for the AWS STS service. This should only be set if STSEndpoint + // is set, and must match the region of the STSEndpoint. + STSRegion string `json:",omitempty"` + + // AllowedSTSHeaderValues is a list of additional allowed headers on the sts:GetCallerIdentity + // request in the bearer token. A default list of necessary headers is allowed in any case. + AllowedSTSHeaderValues []string `json:",omitempty"` +} + +func (c *Config) convertForLibrary() *iamauth.Config { + return &iamauth.Config{ + BoundIAMPrincipalARNs: c.BoundIAMPrincipalARNs, + EnableIAMEntityDetails: c.EnableIAMEntityDetails, + IAMEntityTags: c.IAMEntityTags, + ServerIDHeaderValue: c.ServerIDHeaderValue, + MaxRetries: c.MaxRetries, + IAMEndpoint: c.IAMEndpoint, + STSEndpoint: c.STSEndpoint, + STSRegion: c.STSRegion, + AllowedSTSHeaderValues: c.AllowedSTSHeaderValues, + + ServerIDHeaderName: IAMServerIDHeaderName, + GetEntityMethodHeader: GetEntityMethodHeader, + GetEntityURLHeader: GetEntityURLHeader, + GetEntityHeadersHeader: GetEntityHeadersHeader, + GetEntityBodyHeader: GetEntityBodyHeader, + } +} + +type Validator struct { + name string + config *iamauth.Config + logger hclog.Logger + + auth *iamauth.Authenticator +} + +func NewValidator(logger hclog.Logger, method *structs.ACLAuthMethod) (*Validator, error) { + if method.Type != authMethodType { + return nil, fmt.Errorf("%q is not an AWS IAM auth method", method.Name) + } + + var config Config + if err := authmethod.ParseConfig(method.Config, &config); err != nil { + return nil, err + } + iamConfig := config.convertForLibrary() + + auth, err := iamauth.NewAuthenticator(iamConfig, logger) + if err != nil { + return nil, err + } + + return &Validator{ + name: method.Name, + config: iamConfig, + logger: logger, + auth: auth, + }, nil +} + +// Name implements authmethod.Validator. +func (v *Validator) Name() string { return v.name } + +// Stop implements authmethod.Validator. +func (v *Validator) Stop() {} + +// ValidateLogin implements authmethod.Validator. +func (v *Validator) ValidateLogin(ctx context.Context, loginToken string) (*authmethod.Identity, error) { + details, err := v.auth.ValidateLogin(ctx, loginToken) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "entity_name": details.EntityName, + "entity_id": details.EntityId, + "account_id": details.AccountId, + } + fields := &awsSelectableFields{ + EntityName: details.EntityName, + EntityId: details.EntityId, + AccountId: details.AccountId, + } + + if v.config.EnableIAMEntityDetails { + vars["entity_path"] = details.EntityPath + fields.EntityPath = details.EntityPath + fields.EntityTags = map[string]string{} + for _, tag := range v.config.IAMEntityTags { + vars["entity_tags."+tag] = details.EntityTags[tag] + fields.EntityTags[tag] = details.EntityTags[tag] + } + } + + result := &authmethod.Identity{ + SelectableFields: fields, + ProjectedVars: vars, + EnterpriseMeta: nil, + } + return result, nil + +} + +func (v *Validator) NewIdentity() *authmethod.Identity { + fields := &awsSelectableFields{ + EntityTags: map[string]string{}, + } + vars := map[string]string{ + "entity_name": "", + "entity_id": "", + "account_id": "", + } + if v.config.EnableIAMEntityDetails { + vars["entity_path"] = "" + for _, tag := range v.config.IAMEntityTags { + vars["entity_tags."+tag] = "" + fields.EntityTags[tag] = "" + } + } + return &authmethod.Identity{ + SelectableFields: fields, + ProjectedVars: vars, + } +} + +type awsSelectableFields struct { + EntityName string `bexpr:"entity_name"` + EntityId string `bexpr:"entity_id"` + AccountId string `bexpr:"account_id"` + + EntityPath string `bexpr:"entity_path"` + EntityTags map[string]string `bexpr:"entity_tags"` +} diff --git a/agent/consul/authmethod/awsauth/aws_test.go b/agent/consul/authmethod/awsauth/aws_test.go new file mode 100644 index 000000000..8ee507692 --- /dev/null +++ b/agent/consul/authmethod/awsauth/aws_test.go @@ -0,0 +1,342 @@ +package awsauth + +import ( + "context" + "encoding/json" + "fmt" + "net/http/httptest" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/hashicorp/consul/agent/consul/authmethod" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/iamauth" + "github.com/hashicorp/consul/internal/iamauth/iamauthtest" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestNewValidator(t *testing.T) { + f := iamauthtest.MakeFixture() + expConfig := &iamauth.Config{ + BoundIAMPrincipalARNs: []string{f.AssumedRoleARN}, + EnableIAMEntityDetails: true, + IAMEntityTags: []string{"tag-1"}, + ServerIDHeaderValue: "x-some-header", + MaxRetries: 3, + IAMEndpoint: "iam-endpoint", + STSEndpoint: "sts-endpoint", + STSRegion: "sts-region", + AllowedSTSHeaderValues: []string{"header-value"}, + ServerIDHeaderName: "X-Consul-IAM-ServerID", + GetEntityMethodHeader: "X-Consul-IAM-GetEntity-Method", + GetEntityURLHeader: "X-Consul-IAM-GetEntity-URL", + GetEntityHeadersHeader: "X-Consul-IAM-GetEntity-Headers", + GetEntityBodyHeader: "X-Consul-IAM-GetEntity-Body", + } + + type AM = *structs.ACLAuthMethod + // Create the auth method, with an optional modification function. + makeMethod := func(modifyFn func(AM)) AM { + config := map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.AssumedRoleARN}, + "EnableIAMEntityDetails": true, + "IAMEntityTags": []string{"tag-1"}, + "ServerIDHeaderValue": "x-some-header", + "MaxRetries": 3, + "IAMEndpoint": "iam-endpoint", + "STSEndpoint": "sts-endpoint", + "STSRegion": "sts-region", + "AllowedSTSHeaderValues": []string{"header-value"}, + } + + m := &structs.ACLAuthMethod{ + Name: "test-iam", + Type: "aws-iam", + Description: "aws iam auth", + Config: config, + } + if modifyFn != nil { + modifyFn(m) + } + return m + } + + cases := map[string]struct { + ok bool + modifyFn func(AM) + }{ + "success": {true, nil}, + "wrong type": {false, func(m AM) { m.Type = "not-iam" }}, + "extra config": {false, func(m AM) { m.Config["extraField"] = "123" }}, + "wrong config value type": {false, func(m AM) { m.Config["MaxRetries"] = []string{"1"} }}, + "missing bound principals": {false, func(m AM) { delete(m.Config, "BoundIAMPrincipalARNs") }}, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + v, err := NewValidator(nil, makeMethod(c.modifyFn)) + if c.ok { + require.NoError(t, err) + require.NotNil(t, v) + require.Equal(t, "test-iam", v.name) + require.NotNil(t, v.auth) + require.Equal(t, expConfig, v.config) + } else { + require.Error(t, err) + require.Nil(t, v) + } + }) + } +} + +func TestValidateLogin(t *testing.T) { + f := iamauthtest.MakeFixture() + + cases := map[string]struct { + server *iamauthtest.Server + token string + config map[string]interface{} + expVars map[string]string + expFields []string + expError string + }{ + "success - role login": { + server: f.ServerForRole, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.CanonicalRoleARN}, + }, + expVars: map[string]string{ + "entity_id": f.EntityID, + "entity_name": f.RoleName, + "account_id": f.AccountID, + }, + expFields: []string{ + fmt.Sprintf(`entity_id == %q`, f.EntityID), + fmt.Sprintf(`entity_name == %q`, f.RoleName), + fmt.Sprintf(`account_id == %q`, f.AccountID), + }, + }, + "success - user login": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + }, + expVars: map[string]string{ + "entity_id": f.EntityID, + "entity_name": f.UserName, + "account_id": f.AccountID, + }, + expFields: []string{ + fmt.Sprintf(`entity_id == %q`, f.EntityID), + fmt.Sprintf(`entity_name == %q`, f.UserName), + fmt.Sprintf(`account_id == %q`, f.AccountID), + }, + }, + "success - role login with entity details": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + "EnableIAMEntityDetails": true, + }, + expVars: map[string]string{ + "entity_id": f.EntityID, + "entity_name": f.UserName, + "account_id": f.AccountID, + "entity_path": f.UserPath, + }, + expFields: []string{ + fmt.Sprintf(`entity_id == %q`, f.EntityID), + fmt.Sprintf(`entity_name == %q`, f.UserName), + fmt.Sprintf(`account_id == %q`, f.AccountID), + fmt.Sprintf(`entity_path == %q`, f.UserPath), + }, + }, + "success - user login with entity details": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + "EnableIAMEntityDetails": true, + }, + expVars: map[string]string{ + "entity_id": f.EntityID, + "entity_name": f.UserName, + "account_id": f.AccountID, + "entity_path": f.UserPath, + }, + expFields: []string{ + fmt.Sprintf(`entity_id == %q`, f.EntityID), + fmt.Sprintf(`entity_name == %q`, f.UserName), + fmt.Sprintf(`account_id == %q`, f.AccountID), + fmt.Sprintf(`entity_path == %q`, f.UserPath), + }, + }, + "invalid token": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + }, + token: `invalid`, + expError: "invalid token", + }, + "empty json token": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + }, + token: `{}`, + expError: "invalid token", + }, + "empty json fields in token": { + server: f.ServerForUser, + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": []string{f.UserARN}, + }, + token: `{"iam_http_request_method": "", +"iam_request_body": "", +"iam_request_headers": "", +"iam_request_url": "" +}`, + expError: "invalid token", + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + v, _, token := setup(t, c.config, c.server) + if c.token != "" { + token = c.token + } + id, err := v.ValidateLogin(context.Background(), token) + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + require.Nil(t, id) + } else { + require.NoError(t, err) + authmethod.RequireIdentityMatch(t, id, c.expVars, c.expFields...) + } + }) + } +} + +func setup(t *testing.T, config map[string]interface{}, server *iamauthtest.Server) (*Validator, *httptest.Server, string) { + t.Helper() + + fakeAws := iamauthtest.NewTestServer(t, server) + + config["STSEndpoint"] = fakeAws.URL + "/sts" + config["STSRegion"] = "fake-region" + config["IAMEndpoint"] = fakeAws.URL + "/iam" + + method := &structs.ACLAuthMethod{ + Name: "test-method", + Type: "aws-iam", + Config: config, + } + nullLogger := hclog.NewNullLogger() + v, err := NewValidator(nullLogger, method) + require.NoError(t, err) + + // Generate the login token + tokenData, err := iamauth.GenerateLoginData(&iamauth.LoginInput{ + Creds: credentials.NewStaticCredentials("fake", "fake", ""), + IncludeIAMEntity: v.config.EnableIAMEntityDetails, + STSEndpoint: v.config.STSEndpoint, + STSRegion: v.config.STSRegion, + Logger: nullLogger, + ServerIDHeaderValue: v.config.ServerIDHeaderValue, + ServerIDHeaderName: v.config.ServerIDHeaderName, + GetEntityMethodHeader: v.config.GetEntityMethodHeader, + GetEntityURLHeader: v.config.GetEntityURLHeader, + GetEntityHeadersHeader: v.config.GetEntityHeadersHeader, + GetEntityBodyHeader: v.config.GetEntityBodyHeader, + }) + require.NoError(t, err) + + token, err := json.Marshal(tokenData) + require.NoError(t, err) + return v, fakeAws, string(token) +} + +func TestNewIdentity(t *testing.T) { + principals := []string{"arn:aws:sts::1234567890:assumed-role/my-role/some-session"} + cases := map[string]struct { + config map[string]interface{} + expVars map[string]string + expFilters []string + }{ + "entity details disabled": { + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": principals, + }, + expVars: map[string]string{ + "entity_name": "", + "entity_id": "", + "account_id": "", + }, + expFilters: []string{ + `entity_name == ""`, + `entity_id == ""`, + `account_id == ""`, + }, + }, + "entity details enabled": { + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": principals, + "EnableIAMEntityDetails": true, + }, + expVars: map[string]string{ + "entity_name": "", + "entity_id": "", + "account_id": "", + "entity_path": "", + }, + expFilters: []string{ + `entity_name == ""`, + `entity_id == ""`, + `account_id == ""`, + `entity_path == ""`, + }, + }, + "entity tags": { + config: map[string]interface{}{ + "BoundIAMPrincipalARNs": principals, + "EnableIAMEntityDetails": true, + "IAMEntityTags": []string{ + "test_tag", + "test_tag_2", + }, + }, + expVars: map[string]string{ + "entity_name": "", + "entity_id": "", + "account_id": "", + "entity_path": "", + "entity_tags.test_tag": "", + "entity_tags.test_tag_2": "", + }, + expFilters: []string{ + `entity_name == ""`, + `entity_id == ""`, + `account_id == ""`, + `entity_path == ""`, + `entity_tags.test_tag == ""`, + `entity_tags.test_tag_2 == ""`, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + method := &structs.ACLAuthMethod{ + Name: "test-method", + Type: "aws-iam", + Config: c.config, + } + nullLogger := hclog.NewNullLogger() + v, err := NewValidator(nullLogger, method) + require.NoError(t, err) + + id := v.NewIdentity() + authmethod.RequireIdentityMatch(t, id, c.expVars, c.expFilters...) + }) + } +} diff --git a/command/login/aws.go b/command/login/aws.go new file mode 100644 index 000000000..bae90c943 --- /dev/null +++ b/command/login/aws.go @@ -0,0 +1,148 @@ +package login + +import ( + "encoding/json" + "flag" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + + "github.com/hashicorp/consul/agent/consul/authmethod/awsauth" + "github.com/hashicorp/consul/internal/iamauth" + "github.com/hashicorp/go-hclog" +) + +type AWSLogin struct { + autoBearerToken bool + includeEntity bool + stsEndpoint string + region string + serverIDHeaderValue string + accessKeyId string + secretAccessKey string + sessionToken string +} + +func (a *AWSLogin) flags() *flag.FlagSet { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.BoolVar(&a.autoBearerToken, "aws-auto-bearer-token", false, + "Construct a bearer token and login to the AWS IAM auth method. This requires AWS credentials. "+ + "AWS credentials are automatically discovered from standard sources supported by the Go SDK for "+ + "AWS. Alternatively, explicit credentials can be passed using the -aws-acesss-key-id and "+ + "-aws-secret-access-key flags. [aws-iam only]") + + fs.BoolVar(&a.includeEntity, "aws-include-entity", false, + "Include a signed request to get the IAM role or IAM user in the bearer token. [aws-iam only]") + + fs.StringVar(&a.stsEndpoint, "aws-sts-endpoint", "", + "URL for AWS STS API calls. [aws-iam only]") + + fs.StringVar(&a.region, "aws-region", "", + "Region for AWS API calls. If set, should match the region of -aws-sts-endpoint. "+ + "If not provided, the region will be discovered from standard sources, such as "+ + "the AWS_REGION environment variable. [aws-iam only]") + + fs.StringVar(&a.serverIDHeaderValue, "aws-server-id-header-value", "", + "If set, an X-Consul-IAM-ServerID header is included in signed AWS API request(s) that form "+ + "the bearer token. This value must match the server-side configured value for the auth method "+ + "in order to login. This is optional and helps protect against replay attacks. [aws-iam only]") + + fs.StringVar(&a.accessKeyId, "aws-access-key-id", "", + "AWS access key id to use. Requires -aws-secret-access-key if specified. [aws-iam only]") + + fs.StringVar(&a.secretAccessKey, "aws-secret-access-key", "", + "AWS secret access key to use. Requires -aws-access-key-id if specified. [aws-iam only]") + + fs.StringVar(&a.sessionToken, "aws-session-token", "", + "AWS session token to use. Requires -aws-access-key-id and -aws-secret-access-key if "+ + "specified. [aws-iam only]") + return fs +} + +// checkFlags validates flags for the aws-iam auth method. +func (a *AWSLogin) checkFlags() error { + if !a.autoBearerToken { + if a.includeEntity || a.stsEndpoint != "" || a.region != "" || a.serverIDHeaderValue != "" || + a.accessKeyId != "" || a.secretAccessKey != "" || a.sessionToken != "" { + return fmt.Errorf("Missing '-aws-auto-bearer-token' flag") + } + } + if a.accessKeyId != "" && a.secretAccessKey == "" { + return fmt.Errorf("Missing '-aws-secret-access-key' flag") + } + if a.secretAccessKey != "" && a.accessKeyId == "" { + return fmt.Errorf("Missing '-aws-access-key-id' flag") + } + if a.sessionToken != "" && (a.accessKeyId == "" || a.secretAccessKey == "") { + return fmt.Errorf("Missing '-aws-access-key-id' and '-aws-secret-access-key' flags") + } + return nil +} + +// createAWSBearerToken generates a bearer token string for the AWS IAM auth method. +// It will discover AWS credentials which are used to sign AWS API requests. +// Alternatively, static credentials can be passed as flags. +// +// The bearer token contains a signed sts:GetCallerIdentity request. +// If aws-include-entity is specified, a signed iam:GetRole or iam:GetUser request is +// also included. The AWS credentials are used to retrieve the current user's role +// or user name for the iam:GetRole or iam:GetUser request. +func (a *AWSLogin) createAWSBearerToken() (string, error) { + cfg := aws.Config{ + Endpoint: aws.String(a.stsEndpoint), + Region: aws.String(a.region), + // More detailed error message to help debug credential discovery. + CredentialsChainVerboseErrors: aws.Bool(true), + } + + if a.accessKeyId != "" { + // Use creds from flags. + cfg.Credentials = credentials.NewStaticCredentials( + a.accessKeyId, a.secretAccessKey, a.sessionToken, + ) + } + + // Session loads creds from standard sources (env vars, file, EC2 metadata, ...) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: cfg, + // Allow loading from config files by default: + // ~/.aws/config or AWS_CONFIG_FILE + // ~/.aws/credentials or AWS_SHARED_CREDENTIALS_FILE + SharedConfigState: session.SharedConfigEnable, + }) + if err != nil { + return "", err + } + if sess.Config.Region == nil || *sess.Config.Region == "" { + return "", fmt.Errorf("AWS region not found") + } + if sess.Config.Credentials == nil { + return "", fmt.Errorf("AWS credentials not found") + } + creds := sess.Config.Credentials + + loginData, err := iamauth.GenerateLoginData(&iamauth.LoginInput{ + Creds: creds, + IncludeIAMEntity: a.includeEntity, + STSEndpoint: a.stsEndpoint, + STSRegion: a.region, + Logger: hclog.New(nil), + ServerIDHeaderValue: a.serverIDHeaderValue, + ServerIDHeaderName: awsauth.IAMServerIDHeaderName, + GetEntityMethodHeader: awsauth.GetEntityMethodHeader, + GetEntityURLHeader: awsauth.GetEntityURLHeader, + GetEntityHeadersHeader: awsauth.GetEntityHeadersHeader, + GetEntityBodyHeader: awsauth.GetEntityBodyHeader, + }) + if err != nil { + return "", err + } + + loginDataJson, err := json.Marshal(loginData) + if err != nil { + return "", err + } + return string(loginDataJson), err +} diff --git a/command/login/login.go b/command/login/login.go index ded0958f9..a8f58556a 100644 --- a/command/login/login.go +++ b/command/login/login.go @@ -36,6 +36,8 @@ type cmd struct { tokenSinkFile string meta map[string]string + aws AWSLogin + enterpriseCmd } @@ -57,10 +59,10 @@ func (c *cmd) init() { c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta", "Metadata to set on the token, formatted as key=value. This flag "+ "may be specified multiple times to set multiple meta fields.") - c.initEnterpriseFlags() c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.aws.flags()) flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) flags.Merge(c.flags, c.http.MultiTenancyFlags()) @@ -89,21 +91,38 @@ func (c *cmd) Run(args []string) int { } func (c *cmd) bearerTokenLogin() int { - if c.bearerTokenFile == "" { - c.UI.Error(fmt.Sprintf("Missing required '-bearer-token-file' flag")) - return 1 - } - - data, err := ioutil.ReadFile(c.bearerTokenFile) - if err != nil { + if err := c.aws.checkFlags(); err != nil { c.UI.Error(err.Error()) return 1 } - c.bearerToken = strings.TrimSpace(string(data)) - if c.bearerToken == "" { - c.UI.Error(fmt.Sprintf("No bearer token found in %s", c.bearerTokenFile)) + if c.aws.autoBearerToken { + if c.bearerTokenFile != "" { + c.UI.Error("Cannot use '-bearer-token-file' flag with '-aws-auto-bearer-token'") + return 1 + } + + if token, err := c.aws.createAWSBearerToken(); err != nil { + c.UI.Error(fmt.Sprintf("Error with aws-iam auth method: %s", err)) + return 1 + } else { + c.bearerToken = token + } + } else if c.bearerTokenFile == "" { + c.UI.Error("Missing required '-bearer-token-file' flag") return 1 + } else { + data, err := ioutil.ReadFile(c.bearerTokenFile) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.bearerToken = strings.TrimSpace(string(data)) + + if c.bearerToken == "" { + c.UI.Error(fmt.Sprintf("No bearer token found in %s", c.bearerTokenFile)) + return 1 + } } // Ensure that we don't try to use a token when performing a login diff --git a/command/login/login_test.go b/command/login/login_test.go index 3d730548d..7eba6a403 100644 --- a/command/login/login_test.go +++ b/command/login/login_test.go @@ -1,6 +1,7 @@ package login import ( + "fmt" "io/ioutil" "os" "path/filepath" @@ -18,6 +19,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" + "github.com/hashicorp/consul/internal/iamauth/iamauthtest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" ) @@ -39,18 +41,7 @@ func TestLoginCommand(t *testing.T) { testDir := testutil.TempDir(t, "acl") - a := agent.NewTestAgent(t, ` - primary_datacenter = "dc1" - acl { - enabled = true - tokens { - initial_management = "root" - } - }`) - - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - + a := newTestAgent(t) client := a.Client() t.Run("method is required", func(t *testing.T) { @@ -102,6 +93,81 @@ func TestLoginCommand(t *testing.T) { require.Contains(t, ui.ErrorWriter.String(), "Missing required '-bearer-token-file' flag") }) + t.Run("bearer-token-file disallowed with aws-auto-bearer-token", func(t *testing.T) { + defer os.Remove(tokenSinkFile) + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-token=root", + "-method=test", + "-token-sink-file", tokenSinkFile, + "-bearer-token-file", "none.txt", + "-aws-auto-bearer-token", + } + + code := cmd.Run(args) + require.Equal(t, code, 1, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Cannot use '-bearer-token-file' flag with '-aws-auto-bearer-token'") + }) + + t.Run("aws flags require aws-auto-bearer-token", func(t *testing.T) { + defer os.Remove(tokenSinkFile) + + baseArgs := []string{ + "-http-addr=" + a.HTTPAddr(), + "-token=root", + "-method=test", + "-token-sink-file", tokenSinkFile, + } + + for _, extraArgs := range [][]string{ + {"-aws-include-entity"}, + {"-aws-sts-endpoint", "some-endpoint"}, + {"-aws-region", "some-region"}, + {"-aws-server-id-header-value", "some-value"}, + {"-aws-access-key-id", "some-key"}, + {"-aws-secret-access-key", "some-secret"}, + {"-aws-session-token", "some-token"}, + } { + ui := cli.NewMockUi() + code := New(ui).Run(append(baseArgs, extraArgs...)) + require.Equal(t, code, 1, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing '-aws-auto-bearer-token' flag") + } + }) + + t.Run("aws-access-key-id and aws-secret-access-key require each other", func(t *testing.T) { + defer os.Remove(tokenSinkFile) + + baseArgs := []string{ + "-http-addr=" + a.HTTPAddr(), + "-token=root", + "-method=test", + "-token-sink-file", tokenSinkFile, + "-aws-auto-bearer-token", + } + + ui := cli.NewMockUi() + code := New(ui).Run(append(baseArgs, "-aws-access-key-id", "some-key")) + require.Equal(t, code, 1, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing '-aws-secret-access-key' flag") + + ui = cli.NewMockUi() + code = New(ui).Run(append(baseArgs, "-aws-secret-access-key", "some-key")) + require.Equal(t, code, 1, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing '-aws-access-key-id' flag") + + ui = cli.NewMockUi() + code = New(ui).Run(append(baseArgs, "-aws-session-token", "some-token")) + require.Equal(t, code, 1, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), + "Missing '-aws-access-key-id' and '-aws-secret-access-key' flags") + + }) + bearerTokenFile := filepath.Join(testDir, "bearer.token") t.Run("bearer-token-file is empty", func(t *testing.T) { @@ -236,18 +302,7 @@ func TestLoginCommand_k8s(t *testing.T) { testDir := testutil.TempDir(t, "acl") - a := agent.NewTestAgent(t, ` - primary_datacenter = "dc1" - acl { - enabled = true - tokens { - initial_management = "root" - } - }`) - - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - + a := newTestAgent(t) client := a.Client() tokenSinkFile := filepath.Join(testDir, "test.token") @@ -334,18 +389,7 @@ func TestLoginCommand_jwt(t *testing.T) { testDir := testutil.TempDir(t, "acl") - a := agent.NewTestAgent(t, ` - primary_datacenter = "dc1" - acl { - enabled = true - tokens { - initial_management = "root" - } - }`) - - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - + a := newTestAgent(t) client := a.Client() tokenSinkFile := filepath.Join(testDir, "test.token") @@ -470,3 +514,178 @@ func TestLoginCommand_jwt(t *testing.T) { }) } } + +func TestLoginCommand_aws_iam(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + // Formats an HIL template for a BindName, and the expected value for entity tags. + // Input: string{"a", "b"}, []string{"1", "2"} + // Return: "${entity_tags.a}-${entity_tags.b}", "1-2" + entityTagsBind := func(keys, values []string) (string, string) { + parts := []string{} + for _, k := range keys { + parts = append(parts, fmt.Sprintf("${entity_tags.%s}", k)) + } + return strings.Join(parts, "-"), strings.Join(values, "-") + } + + f := iamauthtest.MakeFixture() + roleTagsBindName, roleTagsBindValue := entityTagsBind(f.RoleTagKeys(), f.RoleTagValues()) + userTagsBindName, userTagsBindValue := entityTagsBind(f.UserTagKeys(), f.UserTagValues()) + + cases := map[string]struct { + awsServer *iamauthtest.Server + cmdArgs []string + config map[string]interface{} + bindingRule *api.ACLBindingRule + expServiceIdentity *api.ACLServiceIdentity + }{ + "success - login with role": { + awsServer: f.ServerForRole, + cmdArgs: []string{"-aws-auto-bearer-token"}, + config: map[string]interface{}{ + // Test that an assumed-role arn is translated to the canonical role arn. + "BoundIAMPrincipalARNs": []string{f.CanonicalRoleARN}, + }, + bindingRule: &api.ACLBindingRule{ + BindType: api.BindingRuleBindTypeService, + BindName: "${entity_name}-${entity_id}-${account_id}", + Selector: fmt.Sprintf(`entity_name==%q and entity_id==%q and account_id==%q`, + f.RoleName, f.EntityID, f.AccountID), + }, + expServiceIdentity: &api.ACLServiceIdentity{ + ServiceName: fmt.Sprintf("%s-%s-%s", f.RoleName, strings.ToLower(f.EntityID), f.AccountID), + }, + }, + "success - login with role and entity details enabled": { + awsServer: f.ServerForRole, + cmdArgs: []string{"-aws-auto-bearer-token", "-aws-include-entity"}, + config: map[string]interface{}{ + // Test that we can login with full user path. + "BoundIAMPrincipalARNs": []string{f.RoleARN}, + "EnableIAMEntityDetails": true, + }, + bindingRule: &api.ACLBindingRule{ + BindType: api.BindingRuleBindTypeService, + // TODO: Path cannot be used as service name if it contains a '/' + BindName: "${entity_name}", + Selector: fmt.Sprintf(`entity_name==%q and entity_path==%q`, f.RoleName, f.RolePath), + }, + expServiceIdentity: &api.ACLServiceIdentity{ServiceName: f.RoleName}, + }, + "success - login with role and role tags": { + awsServer: f.ServerForRole, + cmdArgs: []string{"-aws-auto-bearer-token", "-aws-include-entity"}, + config: map[string]interface{}{ + // Test that we can login with a wildcard. + "BoundIAMPrincipalARNs": []string{f.RoleARNWildcard}, + "EnableIAMEntityDetails": true, + "IAMEntityTags": f.RoleTagKeys(), + }, + bindingRule: &api.ACLBindingRule{ + BindType: api.BindingRuleBindTypeService, + BindName: roleTagsBindName, + Selector: fmt.Sprintf(`entity_name==%q and entity_path==%q`, f.RoleName, f.RolePath), + }, + expServiceIdentity: &api.ACLServiceIdentity{ServiceName: roleTagsBindValue}, + }, + "success - login with user and user tags": { + awsServer: f.ServerForUser, + cmdArgs: []string{"-aws-auto-bearer-token", "-aws-include-entity"}, + config: map[string]interface{}{ + // Test that we can login with a wildcard. + "BoundIAMPrincipalARNs": []string{f.UserARNWildcard}, + "EnableIAMEntityDetails": true, + "IAMEntityTags": f.UserTagKeys(), + }, + bindingRule: &api.ACLBindingRule{ + BindType: api.BindingRuleBindTypeService, + BindName: "${entity_name}-" + userTagsBindName, + Selector: fmt.Sprintf(`entity_name==%q and entity_path==%q`, f.UserName, f.UserPath), + }, + expServiceIdentity: &api.ACLServiceIdentity{ + ServiceName: fmt.Sprintf("%s-%s", f.UserName, userTagsBindValue), + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + a := newTestAgent(t) + client := a.Client() + + fakeAws := iamauthtest.NewTestServer(t, c.awsServer) + + c.config["STSEndpoint"] = fakeAws.URL + "/sts" + c.config["IAMEndpoint"] = fakeAws.URL + "/iam" + + _, _, err := client.ACL().AuthMethodCreate( + &api.ACLAuthMethod{ + Name: "iam-test", + Type: "aws-iam", + Config: c.config, + }, + &api.WriteOptions{Token: "root"}, + ) + require.NoError(t, err) + + c.bindingRule.AuthMethod = "iam-test" + _, _, err = client.ACL().BindingRuleCreate( + c.bindingRule, + &api.WriteOptions{Token: "root"}, + ) + require.NoError(t, err) + + testDir := testutil.TempDir(t, "acl") + tokenSinkFile := filepath.Join(testDir, "test.token") + t.Cleanup(func() { _ = os.Remove(tokenSinkFile) }) + + ui := cli.NewMockUi() + cmd := New(ui) + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-token=root", + "-method=iam-test", + "-token-sink-file", tokenSinkFile, + "-aws-sts-endpoint", fakeAws.URL + "/sts", + "-aws-region", "fake-region", + "-aws-access-key-id", "fake-key-id", + "-aws-secret-access-key", "fake-secret-key", + } + args = append(args, c.cmdArgs...) + code := cmd.Run(args) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + + raw, err := ioutil.ReadFile(tokenSinkFile) + require.NoError(t, err) + + token := strings.TrimSpace(string(raw)) + require.Len(t, token, 36, "must be a valid uid: %s", token) + + // Validate correct BindName was interpolated. + tokenRead, _, err := client.ACL().TokenReadSelf(&api.QueryOptions{Token: token}) + require.NoError(t, err) + require.Len(t, tokenRead.ServiceIdentities, 1) + require.Equal(t, c.expServiceIdentity, tokenRead.ServiceIdentities[0]) + + }) + } +} + +func newTestAgent(t *testing.T) *agent.TestAgent { + a := agent.NewTestAgent(t, ` + primary_datacenter = "dc1" + acl { + enabled = true + tokens { + initial_management = "root" + } + }`) + t.Cleanup(func() { _ = a.Shutdown() }) + testrpc.WaitForLeader(t, a.RPC, "dc1") + return a +} diff --git a/go.mod b/go.mod index 456623d40..e99a098ba 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/hashicorp/go-memdb v1.3.2 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-raftchunking v0.6.2 - github.com/hashicorp/go-retryablehttp v0.6.7 // indirect + github.com/hashicorp/go-retryablehttp v0.6.7 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.2 diff --git a/internal/iamauth/README.md b/internal/iamauth/README.md new file mode 100644 index 000000000..a9880a355 --- /dev/null +++ b/internal/iamauth/README.md @@ -0,0 +1,2 @@ +This is an internal package to house the AWS IAM auth method utilities for potential +future extraction from Consul. diff --git a/internal/iamauth/auth.go b/internal/iamauth/auth.go new file mode 100644 index 000000000..aaf6bc657 --- /dev/null +++ b/internal/iamauth/auth.go @@ -0,0 +1,311 @@ +package iamauth + +import ( + "context" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" + "time" + + "github.com/hashicorp/consul/internal/iamauth/responses" + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" +) + +const ( + // Retry configuration + retryWaitMin = 500 * time.Millisecond + retryWaitMax = 30 * time.Second +) + +type Authenticator struct { + config *Config + logger hclog.Logger +} + +type IdentityDetails struct { + EntityName string + EntityId string + AccountId string + + EntityPath string + EntityTags map[string]string +} + +func NewAuthenticator(config *Config, logger hclog.Logger) (*Authenticator, error) { + if err := config.Validate(); err != nil { + return nil, err + } + return &Authenticator{ + config: config, + logger: logger, + }, nil +} + +// ValidateLogin determines if the identity in the loginToken is permitted to login. +// If so, it returns details about the identity. Otherwise, an error is returned. +func (a *Authenticator) ValidateLogin(ctx context.Context, loginToken string) (*IdentityDetails, error) { + token, err := NewBearerToken(loginToken, a.config) + if err != nil { + return nil, err + } + + req, err := token.GetCallerIdentityRequest() + if err != nil { + return nil, err + } + + if a.config.ServerIDHeaderValue != "" { + err := validateHeaderValue(req.Header, a.config.ServerIDHeaderName, a.config.ServerIDHeaderValue) + if err != nil { + return nil, err + } + } + + callerIdentity, err := a.submitCallerIdentityRequest(ctx, req) + if err != nil { + return nil, err + } + a.logger.Debug("iamauth login attempt", "arn", callerIdentity.Arn) + + entity, err := responses.ParseArn(callerIdentity.Arn) + if err != nil { + return nil, err + } + + identityDetails := &IdentityDetails{ + EntityName: entity.FriendlyName, + // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID" + // (in the case of an IAM user). + EntityId: strings.Split(callerIdentity.UserId, ":")[0], + AccountId: callerIdentity.Account, + } + clientArn := entity.CanonicalArn() + + // Fetch the IAM Role or IAM User, if configured. + // This requires the token to contain a signed iam:GetRole or iam:GetUser request. + if a.config.EnableIAMEntityDetails { + iamReq, err := token.GetEntityRequest() + if err != nil { + return nil, err + } + + if a.config.ServerIDHeaderValue != "" { + err := validateHeaderValue(iamReq.Header, a.config.ServerIDHeaderName, a.config.ServerIDHeaderValue) + if err != nil { + return nil, err + } + } + + iamEntityDetails, err := a.submitGetIAMEntityRequest(ctx, iamReq, token.entityRequestType) + if err != nil { + return nil, err + } + + // Only the CallerIdentity response is a guarantee of the client's identity. + // The role/user details must have a unique id match to the CallerIdentity before use. + if iamEntityDetails.EntityId() != identityDetails.EntityId { + return nil, fmt.Errorf("unique id mismatch in login token") + } + + // Use the full ARN with path from the Role/User details + clientArn = iamEntityDetails.EntityArn() + identityDetails.EntityPath = iamEntityDetails.EntityPath() + identityDetails.EntityTags = iamEntityDetails.EntityTags() + } + + if err := a.validateIdentity(clientArn); err != nil { + return nil, err + } + return identityDetails, nil +} + +// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1321-L1361 +func (a *Authenticator) validateIdentity(clientArn string) error { + if stringslice.Contains(a.config.BoundIAMPrincipalARNs, clientArn) { + // Matches one of BoundIAMPrincipalARNs, so it is trusted + return nil + } + if a.config.EnableIAMEntityDetails { + for _, principalArn := range a.config.BoundIAMPrincipalARNs { + if strings.HasSuffix(principalArn, "*") && lib.GlobbedStringsMatch(principalArn, clientArn) { + // Wildcard match, so it is trusted + return nil + } + } + } + return fmt.Errorf("IAM principal %s is not trusted", clientArn) +} + +func (a *Authenticator) submitCallerIdentityRequest(ctx context.Context, req *http.Request) (*responses.GetCallerIdentityResult, error) { + responseBody, err := a.submitRequest(ctx, req) + if err != nil { + return nil, err + } + callerIdentityResponse, err := parseGetCallerIdentityResponse(responseBody) + if err != nil { + return nil, fmt.Errorf("error parsing STS response") + } + + if n := len(callerIdentityResponse.GetCallerIdentityResult); n != 1 { + return nil, fmt.Errorf("received %d identities in STS response but expected 1", n) + } + return &callerIdentityResponse.GetCallerIdentityResult[0], nil +} + +func (a *Authenticator) submitGetIAMEntityRequest(ctx context.Context, req *http.Request, reqType string) (responses.IAMEntity, error) { + responseBody, err := a.submitRequest(ctx, req) + if err != nil { + return nil, err + } + iamResponse, err := parseGetIAMEntityResponse(responseBody, reqType) + if err != nil { + return nil, fmt.Errorf("error parsing IAM response: %s", err) + } + return iamResponse, nil + +} + +// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1636 +func (a *Authenticator) submitRequest(ctx context.Context, req *http.Request) (string, error) { + retryableReq, err := retryablehttp.FromRequest(req) + if err != nil { + return "", err + } + retryableReq = retryableReq.WithContext(ctx) + client := cleanhttp.DefaultClient() + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + retryingClient := &retryablehttp.Client{ + HTTPClient: client, + RetryWaitMin: retryWaitMin, + RetryWaitMax: retryWaitMax, + RetryMax: a.config.MaxRetries, + CheckRetry: retryablehttp.DefaultRetryPolicy, + Backoff: retryablehttp.DefaultBackoff, + } + + response, err := retryingClient.Do(retryableReq) + if err != nil { + return "", fmt.Errorf("error making request: %w", err) + } + if response != nil { + defer response.Body.Close() + } + // Validate that the response type is XML + if ct := response.Header.Get("Content-Type"); ct != "text/xml" { + return "", fmt.Errorf("response body is invalid") + } + + // we check for status code afterwards to also print out response body + responseBody, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", err + } + if response.StatusCode != 200 { + return "", fmt.Errorf("received error code %d: %s", response.StatusCode, string(responseBody)) + } + return string(responseBody), nil + +} + +// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1625-L1634 +func parseGetCallerIdentityResponse(response string) (responses.GetCallerIdentityResponse, error) { + result := responses.GetCallerIdentityResponse{} + response = strings.TrimSpace(response) + if !strings.HasPrefix(response, " 2 { + return fmt.Errorf("found multiple SignedHeaders components") + } + signedHeaders := string(matches[1]) + return ensureHeaderIsSigned(signedHeaders, headerName) + } + // NOTE: If we support GET requests, then we need to parse the X-Amz-SignedHeaders + // argument out of the query string and search in there for the header value + return fmt.Errorf("missing Authorization header") +} + +func ensureHeaderIsSigned(signedHeaders, headerToSign string) error { + // Not doing a constant time compare here, the values aren't secret + for _, header := range strings.Split(signedHeaders, ";") { + if header == strings.ToLower(headerToSign) { + return nil + } + } + return fmt.Errorf("header wasn't signed") +} diff --git a/internal/iamauth/auth_test.go b/internal/iamauth/auth_test.go new file mode 100644 index 000000000..736c3203a --- /dev/null +++ b/internal/iamauth/auth_test.go @@ -0,0 +1,123 @@ +package iamauth + +import ( + "context" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/hashicorp/consul/internal/iamauth/iamauthtest" + "github.com/hashicorp/consul/internal/iamauth/responsestest" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestValidateLogin(t *testing.T) { + f := iamauthtest.MakeFixture() + + var ( + serverForRoleMismatchedIds = &iamauthtest.Server{ + GetCallerIdentityResponse: f.ServerForRole.GetCallerIdentityResponse, + GetRoleResponse: responsestest.MakeGetRoleResponse(f.RoleARN, "AAAAsomenonmatchingid"), + } + serverForUserMismatchedIds = &iamauthtest.Server{ + GetCallerIdentityResponse: f.ServerForUser.GetCallerIdentityResponse, + GetUserResponse: responsestest.MakeGetUserResponse(f.UserARN, "AAAAsomenonmatchingid"), + } + ) + + cases := map[string]struct { + config *Config + server *iamauthtest.Server + expIdent *IdentityDetails + expError string + }{ + "no bound principals": { + expError: "not trusted", + server: f.ServerForRole, + config: &Config{}, + }, + "no matching principal": { + expError: "not trusted", + server: f.ServerForUser, + config: &Config{ + BoundIAMPrincipalARNs: []string{ + "arn:aws:iam::1234567890:user/some-other-role", + "arn:aws:iam::1234567890:user/some-other-user", + }, + }, + }, + "mismatched server id header": { + expError: `expected "some-non-matching-value" but got "server.id.example.com"`, + server: f.ServerForRole, + config: &Config{ + BoundIAMPrincipalARNs: []string{f.CanonicalRoleARN}, + ServerIDHeaderValue: "some-non-matching-value", + ServerIDHeaderName: "X-Test-ServerID", + }, + }, + "role unique id mismatch": { + expError: "unique id mismatch in login token", + // The RoleId in the GetRole response must match the UserId in the GetCallerIdentity response + // during login. If not, the RoleId cannot be used. + server: serverForRoleMismatchedIds, + config: &Config{ + BoundIAMPrincipalARNs: []string{f.RoleARN}, + EnableIAMEntityDetails: true, + }, + }, + "user unique id mismatch": { + expError: "unique id mismatch in login token", + server: serverForUserMismatchedIds, + config: &Config{ + BoundIAMPrincipalARNs: []string{f.UserARN}, + EnableIAMEntityDetails: true, + }, + }, + } + logger := hclog.New(nil) + for name, c := range cases { + t.Run(name, func(t *testing.T) { + fakeAws := iamauthtest.NewTestServer(t, c.server) + + c.config.STSEndpoint = fakeAws.URL + "/sts" + c.config.IAMEndpoint = fakeAws.URL + "/iam" + setTestHeaderNames(c.config) + + // This bypasses NewAuthenticator, which bypasses config.Validate(). + auth := &Authenticator{config: c.config, logger: logger} + + loginInput := &LoginInput{ + Creds: credentials.NewStaticCredentials("fake", "fake", ""), + IncludeIAMEntity: c.config.EnableIAMEntityDetails, + STSEndpoint: c.config.STSEndpoint, + STSRegion: "fake-region", + Logger: logger, + ServerIDHeaderValue: "server.id.example.com", + } + setLoginInputHeaderNames(loginInput) + loginData, err := GenerateLoginData(loginInput) + require.NoError(t, err) + loginBytes, err := json.Marshal(loginData) + require.NoError(t, err) + + ident, err := auth.ValidateLogin(context.Background(), string(loginBytes)) + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + require.Nil(t, ident) + } else { + require.NoError(t, err) + require.Equal(t, c.expIdent, ident) + } + }) + } +} + +func setLoginInputHeaderNames(in *LoginInput) { + in.ServerIDHeaderName = "X-Test-ServerID" + in.GetEntityMethodHeader = "X-Test-Method" + in.GetEntityURLHeader = "X-Test-URL" + in.GetEntityHeadersHeader = "X-Test-Headers" + in.GetEntityBodyHeader = "X-Test-Body" +} diff --git a/internal/iamauth/config.go b/internal/iamauth/config.go new file mode 100644 index 000000000..a8a6b61d5 --- /dev/null +++ b/internal/iamauth/config.go @@ -0,0 +1,69 @@ +package iamauth + +import ( + "fmt" + "strings" + + awsArn "github.com/aws/aws-sdk-go/aws/arn" +) + +type Config struct { + BoundIAMPrincipalARNs []string + EnableIAMEntityDetails bool + IAMEntityTags []string + ServerIDHeaderValue string + MaxRetries int + IAMEndpoint string + STSEndpoint string + STSRegion string + AllowedSTSHeaderValues []string + + // Customizable header names + ServerIDHeaderName string + GetEntityMethodHeader string + GetEntityURLHeader string + GetEntityHeadersHeader string + GetEntityBodyHeader string +} + +func (c *Config) Validate() error { + if len(c.BoundIAMPrincipalARNs) == 0 { + return fmt.Errorf("BoundIAMPrincipalARNs is required and must have at least 1 entry") + } + + for _, arn := range c.BoundIAMPrincipalARNs { + if n := strings.Count(arn, "*"); n > 0 { + if !c.EnableIAMEntityDetails { + return fmt.Errorf("Must set EnableIAMEntityDetails=true to use wildcards in BoundIAMPrincipalARNs") + } + if n != 1 || !strings.HasSuffix(arn, "*") { + return fmt.Errorf("Only one wildcard is allowed at the end of the bound IAM principal ARN") + } + } + + if parsed, err := awsArn.Parse(arn); err != nil { + return fmt.Errorf("Invalid principal ARN: %q", arn) + } else if parsed.Service != "iam" && parsed.Service != "sts" { + return fmt.Errorf("Invalid principal ARN: %q", arn) + } + } + + if len(c.IAMEntityTags) > 0 && !c.EnableIAMEntityDetails { + return fmt.Errorf("Must set EnableIAMEntityDetails=true to use IAMUserTags") + } + + // If server id header checking is enabled, we need the header name. + if c.ServerIDHeaderValue != "" && c.ServerIDHeaderName == "" { + return fmt.Errorf("Must set ServerIDHeaderName to use a server ID value") + } + + if c.EnableIAMEntityDetails && (c.GetEntityBodyHeader == "" || + c.GetEntityHeadersHeader == "" || + c.GetEntityMethodHeader == "" || + c.GetEntityURLHeader == "") { + return fmt.Errorf("Must set all of GetEntityMethodHeader, GetEntityURLHeader, " + + "GetEntityHeadersHeader, and GetEntityBodyHeader when EnableIAMEntityDetails=true") + } + + return nil +} diff --git a/internal/iamauth/config_test.go b/internal/iamauth/config_test.go new file mode 100644 index 000000000..d23dc992a --- /dev/null +++ b/internal/iamauth/config_test.go @@ -0,0 +1,150 @@ +package iamauth + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConfigValidate(t *testing.T) { + principalArn := "arn:aws:iam::000000000000:role/my-role" + + cases := map[string]struct { + expError string + configs []Config + + includeHeaderNames bool + }{ + "bound iam principals are required": { + expError: "BoundIAMPrincipalARNs is required and must have at least 1 entry", + configs: []Config{ + {BoundIAMPrincipalARNs: nil}, + {BoundIAMPrincipalARNs: []string{}}, + }, + }, + "entity tags require entity details": { + expError: "Must set EnableIAMEntityDetails=true to use IAMUserTags", + configs: []Config{ + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: false, + IAMEntityTags: []string{"some-tag"}, + }, + }, + }, + "entity details require all entity header names": { + expError: "Must set all of GetEntityMethodHeader, GetEntityURLHeader, " + + "GetEntityHeadersHeader, and GetEntityBodyHeader when EnableIAMEntityDetails=true", + configs: []Config{ + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: true, + }, + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: true, + GetEntityBodyHeader: "X-Test-Header", + }, + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: true, + GetEntityHeadersHeader: "X-Test-Header", + }, + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: true, + GetEntityURLHeader: "X-Test-Header", + }, + { + BoundIAMPrincipalARNs: []string{principalArn}, + EnableIAMEntityDetails: true, + GetEntityMethodHeader: "X-Test-Header", + }, + }, + }, + "wildcard principals require entity details": { + expError: "Must set EnableIAMEntityDetails=true to use wildcards in BoundIAMPrincipalARNs", + configs: []Config{ + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*"}}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/path/*"}}, + }, + }, + "only one wildcard suffix is allowed": { + expError: "Only one wildcard is allowed at the end of the bound IAM principal ARN", + configs: []Config{ + { + BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/**"}, + EnableIAMEntityDetails: true, + }, + { + BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/*"}, + EnableIAMEntityDetails: true, + }, + { + BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/path"}, + EnableIAMEntityDetails: true, + }, + { + BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/path/*"}, + EnableIAMEntityDetails: true, + }, + }, + }, + "invalid principal arns are disallowed": { + expError: fmt.Sprintf("Invalid principal ARN"), + configs: []Config{ + {BoundIAMPrincipalARNs: []string{""}}, + {BoundIAMPrincipalARNs: []string{" "}}, + {BoundIAMPrincipalARNs: []string{"*"}, EnableIAMEntityDetails: true}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam:role/my-role"}}, + }, + }, + "valid principal arns are allowed": { + includeHeaderNames: true, + configs: []Config{ + {BoundIAMPrincipalARNs: []string{"arn:aws:sts::000000000000:assumed-role/my-role/some-session-name"}}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/my-user"}}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/my-role"}}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:*"}, EnableIAMEntityDetails: true}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*"}, EnableIAMEntityDetails: true}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/path/*"}, EnableIAMEntityDetails: true}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/*"}, EnableIAMEntityDetails: true}, + {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/path/*"}, EnableIAMEntityDetails: true}, + }, + }, + "server id header value requires service id header name": { + expError: "Must set ServerIDHeaderName to use a server ID value", + configs: []Config{ + { + BoundIAMPrincipalARNs: []string{principalArn}, + ServerIDHeaderValue: "consul.test.example.com", + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + for _, conf := range c.configs { + if c.includeHeaderNames { + setTestHeaderNames(&conf) + } + err := conf.Validate() + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + } else { + require.NoError(t, err) + } + } + }) + } +} + +func setTestHeaderNames(conf *Config) { + conf.GetEntityMethodHeader = "X-Test-Method" + conf.GetEntityURLHeader = "X-Test-URL" + conf.GetEntityHeadersHeader = "X-Test-Headers" + conf.GetEntityBodyHeader = "X-Test-Body" +} diff --git a/internal/iamauth/iamauthtest/testing.go b/internal/iamauth/iamauthtest/testing.go new file mode 100644 index 000000000..4cb8519a9 --- /dev/null +++ b/internal/iamauth/iamauthtest/testing.go @@ -0,0 +1,187 @@ +package iamauthtest + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "sort" + "strings" + "testing" + + "github.com/hashicorp/consul/internal/iamauth/responses" + "github.com/hashicorp/consul/internal/iamauth/responsestest" +) + +// NewTestServer returns a fake AWS API server for local tests: +// It supports the following paths: +// /sts returns STS API responses +// /iam returns IAM API responses +func NewTestServer(t *testing.T, s *Server) *httptest.Server { + server := httptest.NewUnstartedServer(s) + t.Cleanup(server.Close) + server.Start() + return server +} + +// Server contains configuration for the fake AWS API server. +type Server struct { + GetCallerIdentityResponse responses.GetCallerIdentityResponse + GetRoleResponse responses.GetRoleResponse + GetUserResponse responses.GetUserResponse +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + writeError(w, http.StatusBadRequest, r) + return + } + + switch { + case strings.HasPrefix(r.URL.Path, "/sts"): + writeXML(w, s.GetCallerIdentityResponse) + case strings.HasPrefix(r.URL.Path, "/iam"): + if bodyBytes, err := io.ReadAll(r.Body); err == nil { + body := string(bodyBytes) + switch { + case strings.Contains(body, "Action=GetRole"): + writeXML(w, s.GetRoleResponse) + return + case strings.Contains(body, "Action=GetUser"): + writeXML(w, s.GetUserResponse) + return + } + } + writeError(w, http.StatusBadRequest, r) + default: + writeError(w, http.StatusNotFound, r) + } +} + +func writeXML(w http.ResponseWriter, val interface{}) { + str, err := xml.MarshalIndent(val, "", " ") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, err.Error()) + return + } + w.Header().Add("Content-Type", "text/xml") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(str)) +} + +func writeError(w http.ResponseWriter, code int, r *http.Request) { + w.WriteHeader(code) + msg := fmt.Sprintf("%s %s", r.Method, r.URL) + fmt.Fprintf(w, ` + + Fake AWS Server Error: %s + +`, msg) +} + +type Fixture struct { + AssumedRoleARN string + CanonicalRoleARN string + RoleARN string + RoleARNWildcard string + RoleName string + RolePath string + RoleTags map[string]string + + EntityID string + EntityIDWithSession string + AccountID string + + UserARN string + UserARNWildcard string + UserName string + UserPath string + UserTags map[string]string + + ServerForRole *Server + ServerForUser *Server +} + +func MakeFixture() Fixture { + f := Fixture{ + AssumedRoleARN: "arn:aws:sts::1234567890:assumed-role/my-role/some-session", + CanonicalRoleARN: "arn:aws:iam::1234567890:role/my-role", + RoleARN: "arn:aws:iam::1234567890:role/some/path/my-role", + RoleARNWildcard: "arn:aws:iam::1234567890:role/some/path/*", + RoleName: "my-role", + RolePath: "some/path", + RoleTags: map[string]string{ + "service-name": "my-service", + "env": "my-env", + }, + + EntityID: "AAAsomeuniqueid", + EntityIDWithSession: "AAAsomeuniqueid:some-session", + AccountID: "1234567890", + + UserARN: "arn:aws:iam::1234567890:user/my-user", + UserARNWildcard: "arn:aws:iam::1234567890:user/*", + UserName: "my-user", + UserPath: "", + UserTags: map[string]string{"user-group": "my-group"}, + } + + f.ServerForRole = &Server{ + GetCallerIdentityResponse: responsestest.MakeGetCallerIdentityResponse( + f.AssumedRoleARN, f.EntityIDWithSession, f.AccountID, + ), + GetRoleResponse: responsestest.MakeGetRoleResponse( + f.RoleARN, f.EntityID, toTags(f.RoleTags)..., + ), + } + + f.ServerForUser = &Server{ + GetCallerIdentityResponse: responsestest.MakeGetCallerIdentityResponse( + f.UserARN, f.EntityID, f.AccountID, + ), + GetUserResponse: responsestest.MakeGetUserResponse( + f.UserARN, f.EntityID, toTags(f.UserTags)..., + ), + } + + return f +} + +func (f *Fixture) RoleTagKeys() []string { return keys(f.RoleTags) } +func (f *Fixture) UserTagKeys() []string { return keys(f.UserTags) } +func (f *Fixture) RoleTagValues() []string { return values(f.RoleTags) } +func (f *Fixture) UserTagValues() []string { return values(f.UserTags) } + +// toTags converts the map to a slice of responses.Tag +func toTags(tags map[string]string) []responses.Tag { + result := []responses.Tag{} + for k, v := range tags { + result = append(result, responses.Tag{ + Key: k, + Value: v, + }) + } + return result + +} + +// keys returns the keys in sorted order +func keys(tags map[string]string) []string { + result := []string{} + for k := range tags { + result = append(result, k) + } + sort.Strings(result) + return result +} + +// values returns values in tags, ordered by sorted keys +func values(tags map[string]string) []string { + result := []string{} + for _, k := range keys(tags) { // ensures sorted by key + result = append(result, tags[k]) + } + return result +} diff --git a/internal/iamauth/responses/arn.go b/internal/iamauth/responses/arn.go new file mode 100644 index 000000000..ea5e541d3 --- /dev/null +++ b/internal/iamauth/responses/arn.go @@ -0,0 +1,94 @@ +package responses + +import ( + "fmt" + "strings" +) + +// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1722-L1744 +type ParsedArn struct { + Partition string + AccountNumber string + Type string + Path string + FriendlyName string + SessionInfo string +} + +// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1482-L1530 +// However, instance profiles are not support in Consul. +func ParseArn(iamArn string) (*ParsedArn, error) { + // iamArn should look like one of the following: + // 1. arn:aws:iam:::/ + // 2. arn:aws:sts:::assumed-role// + // if we get something like 2, then we want to transform that back to what + // most people would expect, which is arn:aws:iam:::role/ + var entity ParsedArn + fullParts := strings.Split(iamArn, ":") + if len(fullParts) != 6 { + return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts)) + } + if fullParts[0] != "arn" { + return nil, fmt.Errorf("unrecognized arn: does not begin with \"arn:\"") + } + // normally aws, but could be aws-cn or aws-us-gov + entity.Partition = fullParts[1] + if entity.Partition == "" { + return nil, fmt.Errorf("unrecognized arn: %q is missing the partition", iamArn) + } + if fullParts[2] != "iam" && fullParts[2] != "sts" { + return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2]) + } + // fullParts[3] is the region, which doesn't matter for AWS IAM entities + entity.AccountNumber = fullParts[4] + if entity.AccountNumber == "" { + return nil, fmt.Errorf("unrecognized arn: %q is missing the account number", iamArn) + } + // fullParts[5] would now be something like user/ or assumed-role// + parts := strings.Split(fullParts[5], "/") + if len(parts) < 2 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5]) + } + entity.Type = parts[0] + entity.Path = strings.Join(parts[1:len(parts)-1], "/") + entity.FriendlyName = parts[len(parts)-1] + // now, entity.FriendlyName should either be or + switch entity.Type { + case "assumed-role": + // Check for three parts for assumed role ARNs + if len(parts) < 3 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 3 slash-separated parts", fullParts[5]) + } + // Assumed roles don't have paths and have a slightly different format + // parts[2] is + entity.Path = "" + entity.FriendlyName = parts[1] + entity.SessionInfo = parts[2] + case "user": + case "role": + // case "instance-profile": + default: + return nil, fmt.Errorf("unrecognized principal type: %q", entity.Type) + } + + if entity.FriendlyName == "" { + return nil, fmt.Errorf("unrecognized arn: %q is missing the resource name", iamArn) + } + + return &entity, nil +} + +// CanonicalArn returns the canonical ARN for referring to an IAM entity +func (p *ParsedArn) CanonicalArn() string { + entityType := p.Type + // canonicalize "assumed-role" into "role" + if entityType == "assumed-role" { + entityType = "role" + } + // Annoyingly, the assumed-role entity type doesn't have the Path of the role which was assumed + // So, we "canonicalize" it by just completely dropping the path. The other option would be to + // make an AWS API call to look up the role by FriendlyName, which introduces more complexity to + // code and test, and it also breaks backwards compatibility in an area where we would really want + // it + return fmt.Sprintf("arn:%s:iam::%s:%s/%s", p.Partition, p.AccountNumber, entityType, p.FriendlyName) +} diff --git a/internal/iamauth/responses/responses.go b/internal/iamauth/responses/responses.go new file mode 100644 index 000000000..e050b7734 --- /dev/null +++ b/internal/iamauth/responses/responses.go @@ -0,0 +1,92 @@ +package responses + +import "encoding/xml" + +type GetCallerIdentityResponse struct { + XMLName xml.Name `xml:"GetCallerIdentityResponse"` + GetCallerIdentityResult []GetCallerIdentityResult `xml:"GetCallerIdentityResult"` + ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` +} + +type GetCallerIdentityResult struct { + Arn string `xml:"Arn"` + UserId string `xml:"UserId"` + Account string `xml:"Account"` +} + +type ResponseMetadata struct { + RequestId string `xml:"RequestId"` +} + +// IAMEntity is an interface for getting details from an IAM Role or User. +type IAMEntity interface { + EntityPath() string + EntityArn() string + EntityName() string + EntityId() string + EntityTags() map[string]string +} + +var _ IAMEntity = (*Role)(nil) +var _ IAMEntity = (*User)(nil) + +type GetRoleResponse struct { + XMLName xml.Name `xml:"GetRoleResponse"` + GetRoleResult []GetRoleResult `xml:"GetRoleResult"` + ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` +} + +type GetRoleResult struct { + Role Role `xml:"Role"` +} + +type Role struct { + Arn string `xml:"Arn"` + Path string `xml:"Path"` + RoleId string `xml:"RoleId"` + RoleName string `xml:"RoleName"` + Tags []Tag `xml:"Tags"` +} + +func (r *Role) EntityPath() string { return r.Path } +func (r *Role) EntityArn() string { return r.Arn } +func (r *Role) EntityName() string { return r.RoleName } +func (r *Role) EntityId() string { return r.RoleId } +func (r *Role) EntityTags() map[string]string { return tagsToMap(r.Tags) } + +type GetUserResponse struct { + XMLName xml.Name `xml:"GetUserResponse"` + GetUserResult []GetUserResult `xml:"GetUserResult"` + ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` +} + +type GetUserResult struct { + User User `xml:"User"` +} + +type User struct { + Arn string `xml:"Arn"` + Path string `xml:"Path"` + UserId string `xml:"UserId"` + UserName string `xml:"UserName"` + Tags []Tag `xml:"Tags"` +} + +func (u *User) EntityPath() string { return u.Path } +func (u *User) EntityArn() string { return u.Arn } +func (u *User) EntityName() string { return u.UserName } +func (u *User) EntityId() string { return u.UserId } +func (u *User) EntityTags() map[string]string { return tagsToMap(u.Tags) } + +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func tagsToMap(tags []Tag) map[string]string { + result := map[string]string{} + for _, tag := range tags { + result[tag.Key] = tag.Value + } + return result +} diff --git a/internal/iamauth/responses/responses_test.go b/internal/iamauth/responses/responses_test.go new file mode 100644 index 000000000..df4a9c1e3 --- /dev/null +++ b/internal/iamauth/responses/responses_test.go @@ -0,0 +1,157 @@ +package responses + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseArn(t *testing.T) { + cases := map[string]struct { + arn string + expArn *ParsedArn + }{ + "assumed-role": { + arn: "arn:aws:sts::000000000000:assumed-role/my-role/session-name", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "assumed-role", + Path: "", + FriendlyName: "my-role", + SessionInfo: "session-name", + }, + }, + "role": { + arn: "arn:aws:iam::000000000000:role/my-role", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "role", + Path: "", + FriendlyName: "my-role", + SessionInfo: "", + }, + }, + "user": { + arn: "arn:aws:iam::000000000000:user/my-user", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "user", + Path: "", + FriendlyName: "my-user", + SessionInfo: "", + }, + }, + "role with path": { + arn: "arn:aws:iam::000000000000:role/path/my-role", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "role", + Path: "path", + FriendlyName: "my-role", + SessionInfo: "", + }, + }, + "role with path 2": { + arn: "arn:aws:iam::000000000000:role/path/to/my-role", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "role", + Path: "path/to", + FriendlyName: "my-role", + SessionInfo: "", + }, + }, + "role with path 3": { + arn: "arn:aws:iam::000000000000:role/some/path/to/my-role", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "role", + Path: "some/path/to", + FriendlyName: "my-role", + SessionInfo: "", + }, + }, + "user with path": { + arn: "arn:aws:iam::000000000000:user/path/my-user", + expArn: &ParsedArn{ + Partition: "aws", + AccountNumber: "000000000000", + Type: "user", + Path: "path", + FriendlyName: "my-user", + SessionInfo: "", + }, + }, + + // Invalid cases + "empty string": {arn: ""}, + "wildcard": {arn: "*"}, + "missing prefix": {arn: ":aws:sts::000000000000:assumed-role/my-role/session-name"}, + "missing partition": {arn: "arn::sts::000000000000:assumed-role/my-role/session-name"}, + "missing service": {arn: "arn:aws:::000000000000:assumed-role/my-role/session-name"}, + "missing separator": {arn: "arn:aws:sts:000000000000:assumed-role/my-role/session-name"}, + "missing account id": {arn: "arn:aws:sts:::assumed-role/my-role/session-name"}, + "missing resource": {arn: "arn:aws:sts::000000000000:"}, + "assumed-role missing parts": {arn: "arn:aws:sts::000000000000:assumed-role/my-role"}, + "role missing parts": {arn: "arn:aws:sts::000000000000:role"}, + "role missing parts 2": {arn: "arn:aws:sts::000000000000:role/"}, + "user missing parts": {arn: "arn:aws:sts::000000000000:user"}, + "user missing parts 2": {arn: "arn:aws:sts::000000000000:user/"}, + "unsupported service": {arn: "arn:aws:ecs:us-east-1:000000000000:task/my-task/00000000000000000000000000000000"}, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + parsed, err := ParseArn(c.arn) + if c.expArn != nil { + require.NoError(t, err) + require.Equal(t, c.expArn, parsed) + } else { + require.Error(t, err) + require.Nil(t, parsed) + } + }) + } +} + +func TestCanonicalArn(t *testing.T) { + cases := map[string]struct { + arn string + expArn string + }{ + "assumed-role arn": { + arn: "arn:aws:sts::000000000000:assumed-role/my-role/session-name", + expArn: "arn:aws:iam::000000000000:role/my-role", + }, + "role arn": { + arn: "arn:aws:iam::000000000000:role/my-role", + expArn: "arn:aws:iam::000000000000:role/my-role", + }, + "role arn with path": { + arn: "arn:aws:iam::000000000000:role/path/to/my-role", + expArn: "arn:aws:iam::000000000000:role/my-role", + }, + "user arn": { + arn: "arn:aws:iam::000000000000:user/my-user", + expArn: "arn:aws:iam::000000000000:user/my-user", + }, + "user arn with path": { + arn: "arn:aws:iam::000000000000:user/path/to/my-user", + expArn: "arn:aws:iam::000000000000:user/my-user", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + parsed, err := ParseArn(c.arn) + require.NoError(t, err) + require.Equal(t, c.expArn, parsed.CanonicalArn()) + }) + } +} diff --git a/internal/iamauth/responsestest/testing.go b/internal/iamauth/responsestest/testing.go new file mode 100644 index 000000000..683308677 --- /dev/null +++ b/internal/iamauth/responsestest/testing.go @@ -0,0 +1,81 @@ +package responsestest + +import ( + "strings" + + "github.com/hashicorp/consul/internal/iamauth/responses" +) + +func MakeGetCallerIdentityResponse(arn, userId, accountId string) responses.GetCallerIdentityResponse { + // Sanity check the UserId for unit tests. + parsed := parseArn(arn) + switch parsed.Type { + case "assumed-role": + if !strings.Contains(userId, ":") { + panic("UserId for assumed-role in GetCallerIdentity response must be ':'") + } + default: + if strings.Contains(userId, ":") { + panic("UserId in GetCallerIdentity must not contain ':'") + } + } + + return responses.GetCallerIdentityResponse{ + GetCallerIdentityResult: []responses.GetCallerIdentityResult{ + { + Arn: arn, + UserId: userId, + Account: accountId, + }, + }, + } +} + +func MakeGetRoleResponse(arn, id string, tags ...responses.Tag) responses.GetRoleResponse { + if strings.Contains(id, ":") { + panic("RoleId in GetRole response must not contain ':'") + } + parsed := parseArn(arn) + return responses.GetRoleResponse{ + GetRoleResult: []responses.GetRoleResult{ + { + Role: responses.Role{ + Arn: arn, + Path: parsed.Path, + RoleId: id, + RoleName: parsed.FriendlyName, + Tags: tags, + }, + }, + }, + } +} + +func MakeGetUserResponse(arn, id string, tags ...responses.Tag) responses.GetUserResponse { + if strings.Contains(id, ":") { + panic("UserId in GetUser resposne must not contain ':'") + } + parsed := parseArn(arn) + return responses.GetUserResponse{ + GetUserResult: []responses.GetUserResult{ + { + User: responses.User{ + Arn: arn, + Path: parsed.Path, + UserId: id, + UserName: parsed.FriendlyName, + Tags: tags, + }, + }, + }, + } +} + +func parseArn(arn string) *responses.ParsedArn { + parsed, err := responses.ParseArn(arn) + if err != nil { + // For testing, just fail immediately. + panic(err) + } + return parsed +} diff --git a/internal/iamauth/token.go b/internal/iamauth/token.go new file mode 100644 index 000000000..91994b510 --- /dev/null +++ b/internal/iamauth/token.go @@ -0,0 +1,343 @@ +package iamauth + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/textproto" + "net/url" + "strings" + + "github.com/hashicorp/consul/lib/stringslice" +) + +const ( + amzHeaderPrefix = "X-Amz-" + defaultIAMEndpoint = "https://iam.amazonaws.com" + defaultSTSEndpoint = "https://sts.amazonaws.com" +) + +var defaultAllowedSTSRequestHeaders = []string{ + "X-Amz-Algorithm", + "X-Amz-Content-Sha256", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Security-Token", + "X-Amz-Signature", + "X-Amz-SignedHeaders", +} + +// BearerToken is a login "token" for an IAM auth method. It is a signed +// sts:GetCallerIdentity request in JSON format. Optionally, it can include a +// signed embedded iam:GetRole or iam:GetUser request in the headers. +type BearerToken struct { + config *Config + + getCallerIdentityMethod string + getCallerIdentityURL string + getCallerIdentityHeader http.Header + getCallerIdentityBody string + + getIAMEntityMethod string + getIAMEntityURL string + getIAMEntityHeader http.Header + getIAMEntityBody string + + entityRequestType string + parsedCallerIdentityURL *url.URL + parsedIAMEntityURL *url.URL +} + +var _ json.Unmarshaler = (*BearerToken)(nil) + +func NewBearerToken(loginToken string, config *Config) (*BearerToken, error) { + token := &BearerToken{config: config} + if err := json.Unmarshal([]byte(loginToken), &token); err != nil { + return nil, fmt.Errorf("invalid token: %s", err) + } + + if err := token.validate(); err != nil { + return nil, err + } + + if config.EnableIAMEntityDetails { + method, err := token.getHeader(token.config.GetEntityMethodHeader) + if err != nil { + return nil, err + } + + rawUrl, err := token.getHeader(token.config.GetEntityURLHeader) + if err != nil { + return nil, err + } + + headerJson, err := token.getHeader(token.config.GetEntityHeadersHeader) + if err != nil { + return nil, err + } + + var header http.Header + if err := json.Unmarshal([]byte(headerJson), &header); err != nil { + return nil, err + } + + body, err := token.getHeader(token.config.GetEntityBodyHeader) + if err != nil { + return nil, err + } + + parsedUrl, err := parseUrl(rawUrl) + if err != nil { + return nil, err + } + + token.getIAMEntityMethod = method + token.getIAMEntityBody = body + token.getIAMEntityURL = rawUrl + token.getIAMEntityHeader = header + token.parsedIAMEntityURL = parsedUrl + + reqType, err := token.validateIAMEntityBody() + if err != nil { + return nil, err + } + token.entityRequestType = reqType + } + return token, nil +} + +// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1178 +func (t *BearerToken) validate() error { + if t.getCallerIdentityMethod != "POST" { + return fmt.Errorf("iam_http_request_method must be POST") + } + if err := t.validateGetCallerIdentityBody(); err != nil { + return err + } + if err := t.validateAllowedSTSHeaderValues(); err != nil { + return err + } + return nil +} + +// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1439 +func (t *BearerToken) validateGetCallerIdentityBody() error { + allowedValues := url.Values{ + "Action": []string{"GetCallerIdentity"}, + // Will assume for now that future versions don't change + // the semantics + "Version": nil, // any value is allowed + } + if _, err := parseRequestBody(t.getCallerIdentityBody, allowedValues); err != nil { + return fmt.Errorf("iam_request_body error: %s", err) + } + + return nil +} + +func (t *BearerToken) validateIAMEntityBody() (string, error) { + allowedValues := url.Values{ + "Action": []string{"GetRole", "GetUser"}, + "RoleName": nil, // any value is allowed + "UserName": nil, + "Version": nil, + } + body, err := parseRequestBody(t.getIAMEntityBody, allowedValues) + if err != nil { + return "", fmt.Errorf("iam_request_headers[%s] error: %s", t.config.GetEntityBodyHeader, err) + } + + // Disallow GetRole+UserName and GetUser+RoleName. + action := body["Action"][0] + _, hasRoleName := body["RoleName"] + _, hasUserName := body["UserName"] + if action == "GetUser" && hasUserName && !hasRoleName { + return action, nil + } else if action == "GetRole" && hasRoleName && !hasUserName { + return action, nil + } + return "", fmt.Errorf("iam_request_headers[%q] error: invalid request body %q", t.config.GetEntityBodyHeader, t.getIAMEntityBody) +} + +// parseRequestBody parses the AWS STS or IAM request body, such as 'Action=GetRole&RoleName=my-role'. +// It returns the parsed values, or an error if there are unexpected fields based on allowedValues. +// +// A key-value pair in the body is allowed if: +// - It is a single value (i.e. no bodies like 'Action=1&Action=2') +// - allowedValues[key] is an empty slice or nil (any value is allowed for the key) +// - allowedValues[key] is non-empty and contains the exact value +// This always requires an 'Action' field is present and non-empty. +func parseRequestBody(body string, allowedValues url.Values) (url.Values, error) { + qs, err := url.ParseQuery(body) + if err != nil { + return nil, err + } + + // Action field is always required. + if _, ok := qs["Action"]; !ok || len(qs["Action"]) == 0 || qs["Action"][0] == "" { + return nil, fmt.Errorf(`missing field "Action"`) + } + + // Ensure the body does not have extra fields and each + // field in the body matches the allowed values. + for k, v := range qs { + exp, ok := allowedValues[k] + if k != "Action" && !ok { + return nil, fmt.Errorf("unexpected field %q", k) + } + + if len(exp) == 0 { + // empty indicates any value is okay + continue + } else if len(v) != 1 || !stringslice.Contains(exp, v[0]) { + return nil, fmt.Errorf("unexpected value %s=%v", k, v) + } + } + + return qs, nil +} + +// https://github.com/hashicorp/vault/blob/861454e0ed1390d67ddaf1a53c1798e5e291728c/builtin/credential/aws/path_config_client.go#L349 +func (t *BearerToken) validateAllowedSTSHeaderValues() error { + for k := range t.getCallerIdentityHeader { + h := textproto.CanonicalMIMEHeaderKey(k) + if strings.HasPrefix(h, amzHeaderPrefix) && + !stringslice.Contains(defaultAllowedSTSRequestHeaders, h) && + !stringslice.Contains(t.config.AllowedSTSHeaderValues, h) { + return fmt.Errorf("invalid request header: %s", h) + } + } + return nil +} + +// UnmarshalJSON unmarshals the bearer token details which contains an HTTP +// request (a signed sts:GetCallerIdentity request). +func (t *BearerToken) UnmarshalJSON(data []byte) error { + var rawData struct { + Method string `json:"iam_http_request_method"` + UrlBase64 string `json:"iam_request_url"` + HeadersBase64 string `json:"iam_request_headers"` + BodyBase64 string `json:"iam_request_body"` + } + + if err := json.Unmarshal(data, &rawData); err != nil { + return err + } + + rawUrl, err := base64.StdEncoding.DecodeString(rawData.UrlBase64) + if err != nil { + return err + } + + headersJson, err := base64.StdEncoding.DecodeString(rawData.HeadersBase64) + if err != nil { + return err + } + + var headers http.Header + // This is a JSON-string in JSON + if err := json.Unmarshal(headersJson, &headers); err != nil { + return err + } + + body, err := base64.StdEncoding.DecodeString(rawData.BodyBase64) + if err != nil { + return err + } + + t.getCallerIdentityMethod = rawData.Method + t.getCallerIdentityBody = string(body) + t.getCallerIdentityHeader = headers + t.getCallerIdentityURL = string(rawUrl) + + parsedUrl, err := parseUrl(t.getCallerIdentityURL) + if err != nil { + return err + } + t.parsedCallerIdentityURL = parsedUrl + return nil +} + +func parseUrl(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + // url.Parse doesn't error on empty string + if u == nil || u.Scheme == "" || u.Host == "" || u.Path == "" { + return nil, fmt.Errorf("url is invalid: %q", s) + } + return u, nil +} + +// GetCallerIdentityRequest returns the sts:GetCallerIdentity request decoded +// from the bearer token. +func (t *BearerToken) GetCallerIdentityRequest() (*http.Request, error) { + // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy + // The protection against this is that this method will only call the endpoint specified in the + // client config (defaulting to sts.amazonaws.com), so it would require an admin to override + // the endpoint to talk to alternate web addresses + endpoint := defaultSTSEndpoint + if t.config.STSEndpoint != "" { + endpoint = t.config.STSEndpoint + } + + return buildHttpRequest( + t.getCallerIdentityMethod, + endpoint, + t.parsedCallerIdentityURL, + t.getCallerIdentityBody, + t.getCallerIdentityHeader, + ) +} + +// GetEntityRequest returns the iam:GetUser or iam:GetRole request from the request details, +// if present, embedded in the headers of the sts:GetCallerIdentity request. +func (t *BearerToken) GetEntityRequest() (*http.Request, error) { + endpoint := defaultIAMEndpoint + if t.config.IAMEndpoint != "" { + endpoint = t.config.IAMEndpoint + } + + return buildHttpRequest( + t.getIAMEntityMethod, + endpoint, + t.parsedIAMEntityURL, + t.getIAMEntityBody, + t.getIAMEntityHeader, + ) +} + +// getHeader returns the header from s.GetCallerIdentityHeader, or an error if +// the header is not found or is not a single value. +func (t *BearerToken) getHeader(name string) (string, error) { + values := t.getCallerIdentityHeader.Values(name) + if len(values) == 0 { + return "", fmt.Errorf("missing header %q", name) + } + if len(values) != 1 { + return "", fmt.Errorf("invalid value for header %q (expected 1 item)", name) + } + return values[0], nil +} + +// buildHttpRequest returns an HTTP request from the given details. +// This supports sending to a custom endpoint, but always preserves the +// Host header and URI path, which are signed and cannot be modified. +// There's a deeper explanation of this in the Vault source code. +// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1569 +func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*http.Request, error) { + targetUrl := fmt.Sprintf("%s%s", endpoint, parsedUrl.RequestURI()) + request, err := http.NewRequest(method, targetUrl, strings.NewReader(body)) + if err != nil { + return nil, err + } + request.Host = parsedUrl.Host + for k, vals := range headers { + for _, val := range vals { + request.Header.Add(k, val) + } + } + return request, nil +} diff --git a/internal/iamauth/token_test.go b/internal/iamauth/token_test.go new file mode 100644 index 000000000..4de7ba715 --- /dev/null +++ b/internal/iamauth/token_test.go @@ -0,0 +1,364 @@ +package iamauth + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewBearerToken(t *testing.T) { + cases := map[string]struct { + tokenStr string + config Config + expToken BearerToken + expError string + }{ + "valid token": { + tokenStr: validBearerTokenJson, + expToken: validBearerTokenParsed, + }, + "valid token with role": { + tokenStr: validBearerTokenWithRoleJson, + config: Config{ + EnableIAMEntityDetails: true, + GetEntityMethodHeader: "X-Consul-IAM-GetEntity-Method", + GetEntityURLHeader: "X-Consul-IAM-GetEntity-URL", + GetEntityHeadersHeader: "X-Consul-IAM-GetEntity-Headers", + GetEntityBodyHeader: "X-Consul-IAM-GetEntity-Body", + }, + expToken: validBearerTokenWithRoleParsed, + }, + + "empty json": { + tokenStr: `{}`, + expError: "unexpected end of JSON input", + }, + "missing iam_request_method field": { + tokenStr: tokenJsonMissingMethodField, + expError: "iam_http_request_method must be POST", + }, + "missing iam_request_url field": { + tokenStr: tokenJsonMissingUrlField, + expError: "url is invalid", + }, + "missing iam_request_headers field": { + tokenStr: tokenJsonMissingHeadersField, + expError: "unexpected end of JSON input", + }, + "missing iam_request_body field": { + tokenStr: tokenJsonMissingBodyField, + expError: "iam_request_body error", + }, + "invalid json": { + tokenStr: `{`, + expError: "unexpected end of JSON input", + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + token, err := NewBearerToken(c.tokenStr, &c.config) + t.Logf("token = %+v", token) + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + require.Nil(t, token) + } else { + require.NoError(t, err) + c.expToken.config = &c.config + require.Equal(t, &c.expToken, token) + } + }) + } +} + +func TestParseRequestBody(t *testing.T) { + cases := map[string]struct { + body string + allowedValues url.Values + expValues url.Values + expError string + }{ + "one allowed field": { + body: "Action=GetCallerIdentity&Version=1234", + allowedValues: url.Values{"Version": []string{"1234"}}, + expValues: url.Values{ + "Action": []string{"GetCallerIdentity"}, + "Version": []string{"1234"}, + }, + }, + "many allowed fields": { + body: "Action=GetRole&RoleName=my-role&Version=1234", + allowedValues: url.Values{ + "Action": []string{"GetUser", "GetRole"}, + "UserName": nil, + "RoleName": nil, + "Version": nil, + }, + expValues: url.Values{ + "Action": []string{"GetRole"}, + "RoleName": []string{"my-role"}, + "Version": []string{"1234"}, + }, + }, + "action only": { + body: "Action=GetRole", + allowedValues: nil, + expValues: url.Values{"Action": []string{"GetRole"}}, + }, + + "empty body": { + expValues: url.Values{}, + expError: `missing field "Action"`, + }, + "disallowed field": { + body: "Action=GetRole&Version=1234&Extra=Abc", + allowedValues: url.Values{"Action": nil, "Version": nil}, + expError: `unexpected field "Extra"`, + }, + "mismatched action": { + body: "Action=GetRole", + allowedValues: url.Values{"Action": []string{"GetUser"}}, + expError: `unexpected value Action=[GetRole]`, + }, + "mismatched field": { + body: "Action=GetRole&Extra=1234", + allowedValues: url.Values{"Action": nil, "Extra": []string{"abc"}}, + expError: `unexpected value Extra=[1234]`, + }, + "multi-valued field": { + body: "Action=GetRole&Action=GetUser", + allowedValues: url.Values{"Action": []string{"GetRole", "GetUser"}}, + // only one value is allowed. + expError: `unexpected value Action=[GetRole GetUser]`, + }, + "empty action": { + body: "Action=", + allowedValues: nil, + expError: `missing field "Action"`, + }, + "missing action": { + body: "Version=1234", + allowedValues: url.Values{"Action": []string{"GetRole"}}, + expError: `missing field "Action"`, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + values, err := parseRequestBody(c.body, c.allowedValues) + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + require.Nil(t, values) + } else { + require.NoError(t, err) + require.Equal(t, c.expValues, values) + } + }) + } +} + +func TestValidateGetCallerIdentityBody(t *testing.T) { + cases := map[string]struct { + body string + expError string + }{ + "valid": {"Action=GetCallerIdentity&Version=1234", ""}, + "valid 2": {"Action=GetCallerIdentity", ""}, + "empty action": { + "Action=", + `iam_request_body error: missing field "Action"`, + }, + "invalid action": { + "Action=GetRole", + `iam_request_body error: unexpected value Action=[GetRole]`, + }, + "missing action": { + "Version=1234", + `iam_request_body error: missing field "Action"`, + }, + "empty": { + "", + `iam_request_body error: missing field "Action"`, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + token := &BearerToken{getCallerIdentityBody: c.body} + err := token.validateGetCallerIdentityBody() + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateIAMEntityBody(t *testing.T) { + cases := map[string]struct { + body string + expReqType string + expError string + }{ + "valid role": { + body: "Action=GetRole&RoleName=my-role&Version=1234", + expReqType: "GetRole", + }, + "valid role without version": { + body: "Action=GetRole&RoleName=my-role", + expReqType: "GetRole", + }, + "valid user": { + body: "Action=GetUser&UserName=my-role&Version=1234", + expReqType: "GetUser", + }, + "valid user without version": { + body: "Action=GetUser&UserName=my-role", + expReqType: "GetUser", + }, + + "invalid action": { + body: "Action=GetCallerIdentity", + expError: `unexpected value Action=[GetCallerIdentity]`, + }, + "role missing action": { + body: "RoleName=my-role&Version=1234", + expError: `missing field "Action"`, + }, + "user missing action": { + body: "UserName=my-role&Version=1234", + expError: `missing field "Action"`, + }, + "empty": { + body: "", + expError: `missing field "Action"`, + }, + "empty action": { + body: "Action=", + expError: `missing field "Action"`, + }, + "role with user name": { + body: "Action=GetRole&UserName=my-role&Version=1234", + expError: `invalid request body`, + }, + "user with role name": { + body: "Action=GetUser&RoleName=my-role&Version=1234", + expError: `invalid request body`, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + token := &BearerToken{ + config: &Config{}, + getIAMEntityBody: c.body, + } + reqType, err := token.validateIAMEntityBody() + if c.expError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.expError) + require.Equal(t, "", reqType) + } else { + require.NoError(t, err) + require.Equal(t, c.expReqType, reqType) + } + }) + } +} + +var ( + validBearerTokenJson = `{ + "iam_http_request_method":"POST", + "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", + "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", + "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" +}` + + validBearerTokenParsed = BearerToken{ + getCallerIdentityMethod: "POST", + getCallerIdentityURL: "https://sts.amazonaws.com/", + getCallerIdentityHeader: http.Header{ + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake/20220322/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-security-token, Signature=efc320b972d07b38b65eb24256805e03149da586d804f8c6364ce98debe080b1"}, + "Content-Length": []string{"43"}, + "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, + "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, + "X-Amz-Date": []string{"20220322T211103Z"}, + "X-Amz-Security-Token": []string{"fake"}, + }, + getCallerIdentityBody: "Action=GetCallerIdentity&Version=2011-06-15", + parsedCallerIdentityURL: &url.URL{ + Scheme: "https", + Host: "sts.amazonaws.com", + Path: "/", + }, + } + + validBearerTokenWithRoleJson = `{"iam_http_request_method":"POST","iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==","iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLWtleS1pZC8yMDIyMDMyMi9mYWtlLXJlZ2lvbi9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1jb25zdWwtaWFtLWdldGVudGl0eS1ib2R5O3gtY29uc3VsLWlhbS1nZXRlbnRpdHktaGVhZGVyczt4LWNvbnN1bC1pYW0tZ2V0ZW50aXR5LW1ldGhvZDt4LWNvbnN1bC1pYW0tZ2V0ZW50aXR5LXVybCwgU2lnbmF0dXJlPTU2MWFjMzFiNWFkMDFjMTI0YzU0YzE2OGY3NmVhNmJmZDY0NWI4ZWM1MzQ1ZjgzNTc3MjljOWFhMGI0NzEzMzciXSwiQ29udGVudC1MZW5ndGgiOlsiNDMiXSwiQ29udGVudC1UeXBlIjpbImFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZDsgY2hhcnNldD11dGYtOCJdLCJVc2VyLUFnZW50IjpbImF3cy1zZGstZ28vMS40Mi4zNCAoZ28xLjE3LjU7IGRhcndpbjsgYW1kNjQpIl0sIlgtQW16LURhdGUiOlsiMjAyMjAzMjJUMjI1NzQyWiJdLCJYLUNvbnN1bC1JYW0tR2V0ZW50aXR5LUJvZHkiOlsiQWN0aW9uPUdldFJvbGVcdTAwMjZSb2xlTmFtZT1teS1yb2xlXHUwMDI2VmVyc2lvbj0yMDEwLTA1LTA4Il0sIlgtQ29uc3VsLUlhbS1HZXRlbnRpdHktSGVhZGVycyI6WyJ7XCJBdXRob3JpemF0aW9uXCI6W1wiQVdTNC1ITUFDLVNIQTI1NiBDcmVkZW50aWFsPWZha2Uta2V5LWlkLzIwMjIwMzIyL3VzLWVhc3QtMS9pYW0vYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGUsIFNpZ25hdHVyZT1hYTJhMTlkMGEzMDVkNzRiYmQwMDk3NzZiY2E4ODBlNTNjZmE5OTFlNDgzZTQwMzk0NzE4MWE0MWNjNDgyOTQwXCJdLFwiQ29udGVudC1MZW5ndGhcIjpbXCI1MFwiXSxcIkNvbnRlbnQtVHlwZVwiOltcImFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZDsgY2hhcnNldD11dGYtOFwiXSxcIlVzZXItQWdlbnRcIjpbXCJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KVwiXSxcIlgtQW16LURhdGVcIjpbXCIyMDIyMDMyMlQyMjU3NDJaXCJdfSJdLCJYLUNvbnN1bC1JYW0tR2V0ZW50aXR5LU1ldGhvZCI6WyJQT1NUIl0sIlgtQ29uc3VsLUlhbS1HZXRlbnRpdHktVXJsIjpbImh0dHBzOi8vaWFtLmFtYXpvbmF3cy5jb20vIl19","iam_request_url":"aHR0cDovLzEyNy4wLjAuMTo2MzY5Ni9zdHMv"}` + + validBearerTokenWithRoleParsed = BearerToken{ + getCallerIdentityMethod: "POST", + getCallerIdentityURL: "http://127.0.0.1:63696/sts/", + getCallerIdentityHeader: http.Header{ + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/fake-region/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-consul-iam-getentity-body;x-consul-iam-getentity-headers;x-consul-iam-getentity-method;x-consul-iam-getentity-url, Signature=561ac31b5ad01c124c54c168f76ea6bfd645b8ec5345f8357729c9aa0b471337"}, + "Content-Length": []string{"43"}, + "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, + "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, + "X-Amz-Date": []string{"20220322T225742Z"}, + "X-Consul-Iam-Getentity-Body": []string{"Action=GetRole&RoleName=my-role&Version=2010-05-08"}, + "X-Consul-Iam-Getentity-Headers": []string{`{"Authorization":["AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/us-east-1/iam/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aa2a19d0a305d74bbd009776bca880e53cfa991e483e403947181a41cc482940"],"Content-Length":["50"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"],"X-Amz-Date":["20220322T225742Z"]}`}, + "X-Consul-Iam-Getentity-Method": []string{"POST"}, + "X-Consul-Iam-Getentity-Url": []string{"https://iam.amazonaws.com/"}, + }, + getCallerIdentityBody: "Action=GetCallerIdentity&Version=2011-06-15", + + // Fields parsed from headers above + getIAMEntityMethod: "POST", + getIAMEntityURL: "https://iam.amazonaws.com/", + getIAMEntityHeader: http.Header{ + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/us-east-1/iam/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aa2a19d0a305d74bbd009776bca880e53cfa991e483e403947181a41cc482940"}, + "Content-Length": []string{"50"}, + "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, + "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, + "X-Amz-Date": []string{"20220322T225742Z"}, + }, + getIAMEntityBody: "Action=GetRole&RoleName=my-role&Version=2010-05-08", + entityRequestType: "GetRole", + + parsedCallerIdentityURL: &url.URL{ + Scheme: "http", + Host: "127.0.0.1:63696", + Path: "/sts/", + }, + parsedIAMEntityURL: &url.URL{ + Scheme: "https", + Host: "iam.amazonaws.com", + Path: "/", + }, + } + + tokenJsonMissingMethodField = `{ + "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", + "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", + "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" +}` + + tokenJsonMissingBodyField = `{ + "iam_http_request_method":"POST", + "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", + "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" +}` + + tokenJsonMissingHeadersField = `{ + "iam_http_request_method":"POST", + "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", + "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" +}` + + tokenJsonMissingUrlField = `{ + "iam_http_request_method":"POST", + "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", + "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==" +}` +) diff --git a/internal/iamauth/util.go b/internal/iamauth/util.go new file mode 100644 index 000000000..bfd5f22d7 --- /dev/null +++ b/internal/iamauth/util.go @@ -0,0 +1,158 @@ +package iamauth + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/consul/internal/iamauth/responses" + "github.com/hashicorp/go-hclog" +) + +type LoginInput struct { + Creds *credentials.Credentials + IncludeIAMEntity bool + STSEndpoint string + STSRegion string + + Logger hclog.Logger + + ServerIDHeaderValue string + // Customizable header names + ServerIDHeaderName string + GetEntityMethodHeader string + GetEntityURLHeader string + GetEntityHeadersHeader string + GetEntityBodyHeader string +} + +// GenerateLoginData populates the necessary data to send for the bearer token. +// https://github.com/hashicorp/go-secure-stdlib/blob/main/awsutil/generate_credentials.go#L232-L301 +func GenerateLoginData(in *LoginInput) (map[string]interface{}, error) { + cfg := aws.Config{ + Credentials: in.Creds, + Region: aws.String(in.STSRegion), + } + if in.STSEndpoint != "" { + cfg.Endpoint = aws.String(in.STSEndpoint) + } else { + cfg.EndpointResolver = endpoints.ResolverFunc(stsSigningResolver) + } + + stsSession, err := session.NewSessionWithOptions(session.Options{Config: cfg}) + if err != nil { + return nil, err + } + + svc := sts.New(stsSession) + stsRequest, _ := svc.GetCallerIdentityRequest(nil) + + // Include the iam:GetRole or iam:GetUser request in headers. + if in.IncludeIAMEntity { + entityRequest, err := formatSignedEntityRequest(svc, in) + if err != nil { + return nil, err + } + + headersJson, err := json.Marshal(entityRequest.HTTPRequest.Header) + if err != nil { + return nil, err + } + requestBody, err := ioutil.ReadAll(entityRequest.HTTPRequest.Body) + if err != nil { + return nil, err + } + + stsRequest.HTTPRequest.Header.Add(in.GetEntityMethodHeader, entityRequest.HTTPRequest.Method) + stsRequest.HTTPRequest.Header.Add(in.GetEntityURLHeader, entityRequest.HTTPRequest.URL.String()) + stsRequest.HTTPRequest.Header.Add(in.GetEntityHeadersHeader, string(headersJson)) + stsRequest.HTTPRequest.Header.Add(in.GetEntityBodyHeader, string(requestBody)) + } + + // Inject the required auth header value, if supplied, and then sign the request including that header + if in.ServerIDHeaderValue != "" { + stsRequest.HTTPRequest.Header.Add(in.ServerIDHeaderName, in.ServerIDHeaderValue) + } + + stsRequest.Sign() + + // Now extract out the relevant parts of the request + headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header) + if err != nil { + return nil, err + } + requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body) + if err != nil { + return nil, err + } + + return map[string]interface{}{ + "iam_http_request_method": stsRequest.HTTPRequest.Method, + "iam_request_url": base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String())), + "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson), + "iam_request_body": base64.StdEncoding.EncodeToString(requestBody), + }, nil +} + +// STS is a really weird service that used to only have global endpoints but now has regional endpoints as well. +// For backwards compatibility, even if you request a region other than us-east-1, it'll still sign for us-east-1. +// See, e.g., https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code +// So we have to shim in this EndpointResolver to force it to sign for the right region +func stsSigningResolver(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { + defaultEndpoint, err := endpoints.DefaultResolver().EndpointFor(service, region, optFns...) + if err != nil { + return defaultEndpoint, err + } + defaultEndpoint.SigningRegion = region + return defaultEndpoint, nil +} + +func formatSignedEntityRequest(svc *sts.STS, in *LoginInput) (*request.Request, error) { + // We need to retrieve the IAM user or role for the iam:GetRole or iam:GetUser request. + // GetCallerIdentity returns this and requires no permissions. + resp, err := svc.GetCallerIdentity(nil) + if err != nil { + return nil, err + } + + arn, err := responses.ParseArn(*resp.Arn) + if err != nil { + return nil, err + } + + iamSession, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{ + Credentials: svc.Config.Credentials, + }, + }) + if err != nil { + return nil, err + } + iamSvc := iam.New(iamSession) + + var req *request.Request + switch arn.Type { + case "role", "assumed-role": + req, _ = iamSvc.GetRoleRequest(&iam.GetRoleInput{RoleName: &arn.FriendlyName}) + case "user": + req, _ = iamSvc.GetUserRequest(&iam.GetUserInput{UserName: &arn.FriendlyName}) + default: + return nil, fmt.Errorf("entity %s is not an IAM role or IAM user", arn.Type) + } + + // Inject the required auth header value, if supplied, and then sign the request including that header + if in.ServerIDHeaderValue != "" { + req.HTTPRequest.Header.Add(in.ServerIDHeaderName, in.ServerIDHeaderValue) + } + + req.Sign() + return req, nil +} diff --git a/lib/glob.go b/lib/glob.go new file mode 100644 index 000000000..969e3ab25 --- /dev/null +++ b/lib/glob.go @@ -0,0 +1,24 @@ +package lib + +import "strings" + +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. +func GlobbedStringsMatch(item, val string) bool { + if len(item) < 2 { + return val == item + } + + hasPrefix := strings.HasPrefix(item, "*") + hasSuffix := strings.HasSuffix(item, "*") + + if hasPrefix && hasSuffix { + return strings.Contains(val, item[1:len(item)-1]) + } else if hasPrefix { + return strings.HasSuffix(val, item[1:]) + } else if hasSuffix { + return strings.HasPrefix(val, item[:len(item)-1]) + } + + return val == item +} diff --git a/lib/glob_test.go b/lib/glob_test.go new file mode 100644 index 000000000..6c29f5ef1 --- /dev/null +++ b/lib/glob_test.go @@ -0,0 +1,37 @@ +package lib + +import "testing" + +func TestGlobbedStringsMatch(t *testing.T) { + tests := []struct { + item string + val string + expect bool + }{ + {"", "", true}, + {"*", "*", true}, + {"**", "**", true}, + {"*t", "t", true}, + {"*t", "test", true}, + {"t*", "test", true}, + {"*test", "test", true}, + {"*test", "a test", true}, + {"test", "a test", false}, + {"*test", "tests", false}, + {"test*", "test", true}, + {"test*", "testsss", true}, + {"test**", "testsss", false}, + {"test**", "test*", true}, + {"**test", "*test", true}, + {"TEST", "test", false}, + {"test", "test", true}, + } + + for _, tt := range tests { + actual := GlobbedStringsMatch(tt.item, tt.val) + + if actual != tt.expect { + t.Fatalf("Bad testcase %#v, expected %t, got %t", tt, tt.expect, actual) + } + } +} From 8b184197b3104107db6dabc9026bc0e68e81b46f Mon Sep 17 00:00:00 2001 From: FFMMM Date: Thu, 31 Mar 2022 10:49:37 -0700 Subject: [PATCH 051/785] polish rpc.service.call metric behavior (#12624) --- agent/config/builder.go | 52 +++++++--- agent/config/builder_test.go | 51 ++++++++++ agent/config/runtime_test.go | 4 +- agent/metrics_test.go | 117 +++++++++++++++++++++- agent/rpc/middleware/interceptors.go | 33 +++--- agent/rpc/middleware/interceptors_test.go | 15 ++- agent/setup.go | 2 - 7 files changed, 230 insertions(+), 44 deletions(-) diff --git a/agent/config/builder.go b/agent/config/builder.go index d9686254b..d5d3bbfb4 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -32,6 +32,7 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" "github.com/hashicorp/consul/agent/dns" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/ipaddr" @@ -640,21 +641,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) { } // Parse the metric filters - var telemetryAllowedPrefixes, telemetryBlockedPrefixes []string - for _, rule := range c.Telemetry.PrefixFilter { - if rule == "" { - b.warn("Cannot have empty filter rule in prefix_filter") - continue - } - switch rule[0] { - case '+': - telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, rule[1:]) - case '-': - telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, rule[1:]) - default: - b.warn("Filter rule must begin with either '+' or '-': %q", rule) - } - } + telemetryAllowedPrefixes, telemetryBlockedPrefixes := b.parsePrefixFilter(&c.Telemetry) // raft performance scaling performanceRaftMultiplier := intVal(c.Performance.RaftMultiplier) @@ -2588,3 +2575,38 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, nil } + +func (b *builder) parsePrefixFilter(telemetry *Telemetry) ([]string, []string) { + var telemetryAllowedPrefixes, telemetryBlockedPrefixes []string + + // TODO(FFMMM): Once one twelve style RPC metrics get out of Beta, don't remove them by default. + operatorPassedOneTwelveRPCMetric := false + oneTwelveRPCMetric := *telemetry.MetricsPrefix + "." + strings.Join(middleware.OneTwelveRPCSummary[0].Name, ".") + + for _, rule := range telemetry.PrefixFilter { + if rule == "" { + b.warn("Cannot have empty filter rule in prefix_filter") + continue + } + switch rule[0] { + case '+': + if rule[1:] == oneTwelveRPCMetric { + operatorPassedOneTwelveRPCMetric = true + } + telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, rule[1:]) + case '-': + if rule[1:] == oneTwelveRPCMetric { + operatorPassedOneTwelveRPCMetric = true + } + telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, rule[1:]) + default: + b.warn("Filter rule must begin with either '+' or '-': %q", rule) + } + } + + if !operatorPassedOneTwelveRPCMetric { + telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, oneTwelveRPCMetric) + } + + return telemetryAllowedPrefixes, telemetryBlockedPrefixes +} diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index 58fd922fc..1bd6d8653 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -384,3 +384,54 @@ func TestBuilder_tlsCipherSuites(t *testing.T) { require.Contains(t, b.err.Error(), invalidCipherSuites) require.Contains(t, b.err.Error(), "cipher suites are not configurable") } + +func TestBuilder_parsePrefixFilter(t *testing.T) { + t.Run("Check that 1.12 rpc metrics are parsed correctly.", func(t *testing.T) { + type testCase struct { + name string + metricsPrefix string + prefixFilter []string + expectedAllowedPrefix []string + expectedBlockedPrefix []string + } + + var testCases = []testCase{ + { + name: "no prefix filter", + metricsPrefix: "somePrefix", + prefixFilter: []string{}, + expectedAllowedPrefix: nil, + expectedBlockedPrefix: []string{"somePrefix.rpc.server.call"}, + }, + { + name: "operator enables 1.12 rpc metrics", + metricsPrefix: "somePrefix", + prefixFilter: []string{"+somePrefix.rpc.server.call"}, + expectedAllowedPrefix: []string{"somePrefix.rpc.server.call"}, + expectedBlockedPrefix: nil, + }, + { + name: "operator enables 1.12 rpc metrics", + metricsPrefix: "somePrefix", + prefixFilter: []string{"-somePrefix.rpc.server.call"}, + expectedAllowedPrefix: nil, + expectedBlockedPrefix: []string{"somePrefix.rpc.server.call"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := builder{} + telemetry := &Telemetry{ + MetricsPrefix: &tc.metricsPrefix, + PrefixFilter: tc.prefixFilter, + } + + allowedPrefix, blockedPrefix := b.parsePrefixFilter(telemetry) + + require.Equal(t, tc.expectedAllowedPrefix, allowedPrefix) + require.Equal(t, tc.expectedBlockedPrefix, blockedPrefix) + }) + } + }) +} diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index d74650c07..ab0798342 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2326,7 +2326,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir rt.Telemetry.AllowedPrefixes = []string{"foo"} - rt.Telemetry.BlockedPrefixes = []string{"bar"} + rt.Telemetry.BlockedPrefixes = []string{"bar", "consul.rpc.server.call"} }, expectedWarnings: []string{`Filter rule must begin with either '+' or '-': "nix"`}, }) @@ -6285,7 +6285,7 @@ func TestLoad_FullConfig(t *testing.T) { DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"}, FilterDefault: true, AllowedPrefixes: []string{"oJotS8XJ"}, - BlockedPrefixes: []string{"cazlEhGn"}, + BlockedPrefixes: []string{"cazlEhGn", "ftO6DySn.rpc.server.call"}, MetricsPrefix: "ftO6DySn", StatsdAddr: "drce87cy", StatsiteAddr: "HpFwKB8R", diff --git a/agent/metrics_test.go b/agent/metrics_test.go index b530eda25..5bbc7de23 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/tlsutil" @@ -43,6 +44,82 @@ func assertMetricExists(t *testing.T, respRec *httptest.ResponseRecorder, metric } } +// assertMetricExistsWithLabels looks in the prometheus metrics reponse for the metric name and all the labels. eg: +// new_rpc_metrics_rpc_server_call{errored="false",method="Status.Ping",request_type="unknown",rpc_type="net/rpc"} +func assertMetricExistsWithLabels(t *testing.T, respRec *httptest.ResponseRecorder, metric string, labelNames []string) { + if respRec.Body.String() == "" { + t.Fatalf("Response body is empty.") + } + + if !strings.Contains(respRec.Body.String(), metric) { + t.Fatalf("Could not find the metric \"%s\" in the /v1/agent/metrics response", metric) + } + + foundAllLabels := false + metrics := respRec.Body.String() + for _, line := range strings.Split(metrics, "\n") { + // skip help lines + if len(line) < 1 || line[0] == '#' { + continue + } + + if strings.Contains(line, metric) { + hasAllLabels := true + for _, labelName := range labelNames { + if !strings.Contains(line, labelName) { + hasAllLabels = false + break + } + } + + if hasAllLabels { + foundAllLabels = true + + // done! + break + } + } + } + + if !foundAllLabels { + t.Fatalf("Could not verify that all named labels \"%s\" exist for the metric \"%s\" in the /v1/agent/metrics response", strings.Join(labelNames, ", "), metric) + } +} + +func assertLabelWithValueForMetricExistsNTime(t *testing.T, respRec *httptest.ResponseRecorder, metric string, label string, labelValue string, occurrences int) { + if respRec.Body.String() == "" { + t.Fatalf("Response body is empty.") + } + + if !strings.Contains(respRec.Body.String(), metric) { + t.Fatalf("Could not find the metric \"%s\" in the /v1/agent/metrics response", metric) + } + + metrics := respRec.Body.String() + // don't look at _sum or _count or other aggregates + metricTarget := metric + "{" + // eg method="Status.Ping" + labelWithValueTarget := label + "=" + "\"" + labelValue + "\"" + + matchesFound := 0 + for _, line := range strings.Split(metrics, "\n") { + // skip help lines + if len(line) < 1 || line[0] == '#' { + continue + } + + if strings.Contains(line, metricTarget) { + if strings.Contains(line, labelWithValueTarget) { + matchesFound++ + } + } + } + + if matchesFound < occurrences { + t.Fatalf("Only found metric \"%s\" %d times. Wanted %d times.", metric, matchesFound, occurrences) + } +} + func assertMetricExistsWithValue(t *testing.T, respRec *httptest.ResponseRecorder, metric string, value string) { if respRec.Body.String() == "" { t.Fatalf("Response body is empty.") @@ -66,13 +143,13 @@ func assertMetricNotExists(t *testing.T, respRec *httptest.ResponseRecorder, met } } -// TestAgent_NewRPCMetrics test for the new RPC metrics. These are the labeled metrics coming from +// TestAgent_OneTwelveRPCMetrics test for the 1.12 style RPC metrics. These are the labeled metrics coming from // agent.rpc.middleware.interceptors package. -func TestAgent_NewRPCMetrics(t *testing.T) { +func TestAgent_OneTwelveRPCMetrics(t *testing.T) { skipIfShortTesting(t) // This test cannot use t.Parallel() since we modify global state, ie the global metrics instance - t.Run("Check new rpc metrics are being emitted", func(t *testing.T) { + t.Run("Check that 1.12 rpc metrics are not emitted by default.", func(t *testing.T) { metricsPrefix := "new_rpc_metrics" hcl := fmt.Sprintf(` telemetry = { @@ -92,7 +169,39 @@ func TestAgent_NewRPCMetrics(t *testing.T) { respRec := httptest.NewRecorder() recordPromMetrics(t, a, respRec) - assertMetricExists(t, respRec, metricsPrefix+"_rpc_server_call") + assertMetricNotExists(t, respRec, metricsPrefix+"_rpc_server_call") + }) + + t.Run("Check that 1.12 rpc metrics are emitted when specified by operator.", func(t *testing.T) { + metricsPrefix := "new_rpc_metrics_2" + allowRPCMetricRule := metricsPrefix + "." + strings.Join(middleware.OneTwelveRPCSummary[0].Name, ".") + hcl := fmt.Sprintf(` + telemetry = { + prometheus_retention_time = "5s" + disable_hostname = true + metrics_prefix = "%s" + prefix_filter = ["+%s"] + } + `, metricsPrefix, allowRPCMetricRule) + + a := StartTestAgent(t, TestAgent{HCL: hcl}) + defer a.Shutdown() + + var out struct{} + err := a.RPC("Status.Ping", struct{}{}, &out) + require.NoError(t, err) + err = a.RPC("Status.Ping", struct{}{}, &out) + require.NoError(t, err) + err = a.RPC("Status.Ping", struct{}{}, &out) + require.NoError(t, err) + + respRec := httptest.NewRecorder() + recordPromMetrics(t, a, respRec) + + // make sure the labels exist for this metric + assertMetricExistsWithLabels(t, respRec, metricsPrefix+"_rpc_server_call", []string{"errored", "method", "request_type", "rpc_type"}) + // make sure we see 3 Status.Ping metrics corresponding to the calls we made above + assertLabelWithValueForMetricExistsNTime(t, respRec, metricsPrefix+"_rpc_server_call", "method", "Status.Ping", 3) }) } diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index a5ee26f4e..c7ac72f35 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -3,6 +3,7 @@ package middleware import ( "reflect" "strconv" + "strings" "time" "github.com/armon/go-metrics" @@ -22,27 +23,26 @@ const RPCTypeInternal = "internal" const RPCTypeNetRPC = "net/rpc" var metricRPCRequest = []string{"rpc", "server", "call"} -var requestLogName = "rpc.server.request" +var requestLogName = strings.Join(metricRPCRequest, "_") -var NewRPCGauges = []prometheus.GaugeDefinition{ +var OneTwelveRPCSummary = []prometheus.SummaryDefinition{ { Name: metricRPCRequest, - Help: "Increments when a server makes an RPC service call. The labels on the metric have more information", + Help: "Measures the time an RPC service call takes to make in milliseconds. Labels mark which RPC method was called and metadata about the call.", }, } type RequestRecorder struct { Logger hclog.Logger - recorderFunc func(key []string, start time.Time, labels []metrics.Label) + recorderFunc func(key []string, val float32, labels []metrics.Label) } func NewRequestRecorder(logger hclog.Logger) *RequestRecorder { - return &RequestRecorder{Logger: logger, recorderFunc: metrics.MeasureSinceWithLabels} + return &RequestRecorder{Logger: logger, recorderFunc: metrics.AddSampleWithLabels} } func (r *RequestRecorder) Record(requestName string, rpcType string, start time.Time, request interface{}, respErrored bool) { - elapsed := time.Since(start) - + elapsed := time.Since(start).Milliseconds() reqType := requestType(request) labels := []metrics.Label{ @@ -52,9 +52,8 @@ func (r *RequestRecorder) Record(requestName string, rpcType string, start time. {Name: "rpc_type", Value: rpcType}, } - // TODO(FFMMM): it'd be neat if we could actually pass the elapsed observed above - r.recorderFunc(metricRPCRequest, start, labels) - + // math.MaxInt64 < math.MaxFloat32 is true so we should be good! + r.recorderFunc(metricRPCRequest, float32(elapsed), labels) r.Logger.Debug(requestLogName, "method", requestName, "errored", respErrored, @@ -64,10 +63,18 @@ func (r *RequestRecorder) Record(requestName string, rpcType string, start time. } func requestType(req interface{}) string { - if r, ok := req.(interface{ IsRead() bool }); ok && r.IsRead() { - return "read" + if r, ok := req.(interface{ IsRead() bool }); ok { + if r.IsRead() { + return "read" + } else { + return "write" + } } - return "write" + + // This logical branch should not happen. If it happens + // it means an underlying request is not implementing the interface. + // Rather than swallowing it up in a "read" or "write", let's be aware of it. + return "unreported" } func GetNetRPCInterceptor(recorder *RequestRecorder) rpc.ServerServiceCallInterceptor { diff --git a/agent/rpc/middleware/interceptors_test.go b/agent/rpc/middleware/interceptors_test.go index e6743a4a6..23d764962 100644 --- a/agent/rpc/middleware/interceptors_test.go +++ b/agent/rpc/middleware/interceptors_test.go @@ -13,9 +13,9 @@ import ( // obs holds all the things we want to assert on that we recorded correctly in our tests. type obs struct { - key []string - start time.Time - labels []metrics.Label + key []string + elapsed float32 + labels []metrics.Label } // recorderStore acts as an in-mem mock storage for all the RequestRecorder.Record() recorderFunc calls. @@ -41,9 +41,8 @@ func (rs *recorderStore) get(key []string) obs { } var store = recorderStore{store: make(map[string]obs)} -var simpleRecorderFunc = func(key []string, start time.Time, labels []metrics.Label) { - o := obs{key: key, start: start, labels: labels} - +var simpleRecorderFunc = func(key []string, val float32, labels []metrics.Label) { + o := obs{key: key, elapsed: val, labels: labels} store.put(key, o) } @@ -71,13 +70,13 @@ func TestRequestRecorder_SimpleOK(t *testing.T) { expectedLabels := []metrics.Label{ {Name: "method", Value: "A.B"}, {Name: "errored", Value: "false"}, - {Name: "request_type", Value: "write"}, + {Name: "request_type", Value: "unreported"}, {Name: "rpc_type", Value: RPCTypeInternal}, } o := store.get(append(metricRPCRequest, expectedLabels[0].Value)) require.Equal(t, o.key, metricRPCRequest) - require.Equal(t, o.start, start) + require.LessOrEqual(t, o.elapsed, float32(start.Sub(time.Now()).Milliseconds())) require.Equal(t, o.labels, expectedLabels) } diff --git a/agent/setup.go b/agent/setup.go index 4921a42d8..bf67c0360 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -25,7 +25,6 @@ import ( "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" - "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/submatview" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/xds" @@ -215,7 +214,6 @@ func getPrometheusDefs(cfg lib.TelemetryConfig, isServer bool) ([]prometheus.Gau CertExpirationGauges, Gauges, raftGauges, - middleware.NewRPCGauges, } // TODO(ffmmm): conditionally add only leader specific metrics to gauges, counters, summaries, etc From 9a2474381ac4f26477973257a1a0f4fe81cb1a55 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 09:49:11 -0700 Subject: [PATCH 052/785] Add expanded token read flag and endpoint option --- .changelog/12670.txt | 3 + agent/acl_endpoint.go | 11 + agent/acl_endpoint_test.go | 11 + agent/consul/acl_endpoint.go | 89 +++++++ agent/consul/acl_endpoint_oss.go | 6 + agent/consul/acl_endpoint_test.go | 175 +++++++++++++ agent/structs/acl.go | 27 +- agent/structs/acl_test.go | 2 +- api/acl.go | 40 +++ command/acl/token/formatter.go | 166 ++++++++++++ command/acl/token/formatter_oss_test.go | 12 + command/acl/token/formatter_test.go | 236 ++++++++++++++++++ command/acl/token/read/token_read.go | 18 +- .../FormatTokenExpanded/oss/basic.json.golden | 48 ++++ .../oss/basic.pretty-meta.golden | 34 +++ .../oss/basic.pretty.golden | 31 +++ .../oss/complex.json.golden | 191 ++++++++++++++ .../oss/complex.pretty-meta.golden | 166 ++++++++++++ .../oss/complex.pretty.golden | 163 ++++++++++++ 19 files changed, 1423 insertions(+), 6 deletions(-) create mode 100644 .changelog/12670.txt create mode 100644 command/acl/token/formatter_oss_test.go create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty-meta.golden create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty.golden create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty-meta.golden create mode 100644 command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty.golden diff --git a/.changelog/12670.txt b/.changelog/12670.txt new file mode 100644 index 000000000..85d9348c5 --- /dev/null +++ b/.changelog/12670.txt @@ -0,0 +1,3 @@ +```release-note:feature +cli: The `token read` command now supports the `-expanded` flag to display detailed role and policy information for the token. +``` diff --git a/agent/acl_endpoint.go b/agent/acl_endpoint.go index 5b9ddec3b..54f6c3948 100644 --- a/agent/acl_endpoint.go +++ b/agent/acl_endpoint.go @@ -378,6 +378,9 @@ func (s *HTTPHandlers) ACLTokenGet(resp http.ResponseWriter, req *http.Request, if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { return nil, err } + if _, ok := req.URL.Query()["expanded"]; ok { + args.Expanded = true + } if args.Datacenter == "" { args.Datacenter = s.agent.config.Datacenter @@ -393,6 +396,14 @@ func (s *HTTPHandlers) ACLTokenGet(resp http.ResponseWriter, req *http.Request, return nil, acl.ErrNotFound } + if args.Expanded { + expanded := &structs.ACLTokenExpanded{ + ACLToken: out.Token, + ExpandedTokenInfo: out.ExpandedTokenInfo, + } + return expanded, nil + } + return out.Token, nil } diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 5087367d8..2c6aad450 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -724,6 +724,17 @@ func TestACL_HTTP(t *testing.T) { require.True(t, ok) require.Equal(t, expected, token) }) + t.Run("Read-expanded", func(t *testing.T) { + expected := tokenMap[idMap["token-test"]] + req, _ := http.NewRequest("GET", "/v1/acl/token/"+expected.AccessorID+"?token=root&expanded=true", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLTokenCRUD(resp, req) + require.NoError(t, err) + tokenResp, ok := obj.(*structs.ACLTokenExpanded) + require.True(t, ok) + require.Equal(t, expected, tokenResp.ACLToken) + require.Len(t, tokenResp.ExpandedPolicies, 3) + }) t.Run("Self", func(t *testing.T) { expected := tokenMap[idMap["token-test"]] req, _ := http.NewRequest("GET", "/v1/acl/token/self?token="+expected.SecretID, nil) diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index 2541c36c1..bac938dfa 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -325,10 +325,99 @@ func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLToke if token == nil { return errNotFound } + + if args.Expanded { + info, err := a.lookupExpandedTokenInfo(ws, state, token) + if err != nil { + return err + } + reply.ExpandedTokenInfo = info + } + return nil }) } +func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, token *structs.ACLToken) (structs.ExpandedTokenInfo, error) { + policyIDs := make(map[string]struct{}) + roleIDs := make(map[string]struct{}) + identityPolicies := make(map[string]*structs.ACLPolicy) + tokenInfo := structs.ExpandedTokenInfo{} + + // Add the token's policies and node/service identity policies + for _, policy := range token.Policies { + policyIDs[policy.ID] = struct{}{} + } + for _, roleLink := range token.Roles { + roleIDs[roleLink.ID] = struct{}{} + } + + for _, identity := range token.ServiceIdentities { + policy := identity.SyntheticPolicy(&token.EnterpriseMeta) + identityPolicies[policy.ID] = policy + } + for _, identity := range token.NodeIdentities { + policy := identity.SyntheticPolicy(&token.EnterpriseMeta) + identityPolicies[policy.ID] = policy + } + + // Get any namespace default roles/policies to look up + nsPolicies, nsRoles, err := getTokenNamespaceDefaults(ws, state, &token.EnterpriseMeta) + if err != nil { + return tokenInfo, err + } + tokenInfo.NamespaceDefaultPolicyIDs = nsPolicies + tokenInfo.NamespaceDefaultRoleIDs = nsRoles + for _, id := range nsPolicies { + policyIDs[id] = struct{}{} + } + for _, id := range nsRoles { + roleIDs[id] = struct{}{} + } + + // Add each role's policies and node/service identity policies + for roleID := range roleIDs { + _, role, err := state.ACLRoleGetByID(ws, roleID, &token.EnterpriseMeta) + if err != nil { + return tokenInfo, err + } + + for _, policy := range role.Policies { + policyIDs[policy.ID] = struct{}{} + } + + for _, identity := range role.ServiceIdentities { + policy := identity.SyntheticPolicy(&role.EnterpriseMeta) + identityPolicies[policy.ID] = policy + } + for _, identity := range role.NodeIdentities { + policy := identity.SyntheticPolicy(&role.EnterpriseMeta) + identityPolicies[policy.ID] = policy + } + + tokenInfo.ExpandedRoles = append(tokenInfo.ExpandedRoles, role) + } + + var policies []*structs.ACLPolicy + for id := range policyIDs { + _, policy, err := state.ACLPolicyGetByID(ws, id, &token.EnterpriseMeta) + if err != nil { + return tokenInfo, err + } + policies = append(policies, policy) + } + for _, policy := range identityPolicies { + policies = append(policies, policy) + } + + tokenInfo.ExpandedPolicies = policies + tokenInfo.AgentACLDefaultPolicy = a.srv.config.ACLResolverSettings.ACLDefaultPolicy + tokenInfo.AgentACLDownPolicy = a.srv.config.ACLResolverSettings.ACLDownPolicy + tokenInfo.ResolvedByAgent = a.srv.config.NodeName + + return tokenInfo, nil +} + func (a *ACL) TokenClone(args *structs.ACLTokenSetRequest, reply *structs.ACLToken) error { if err := a.aclPreCheck(); err != nil { return err diff --git a/agent/consul/acl_endpoint_oss.go b/agent/consul/acl_endpoint_oss.go index 80cb54c80..3cc9e35d4 100644 --- a/agent/consul/acl_endpoint_oss.go +++ b/agent/consul/acl_endpoint_oss.go @@ -5,7 +5,9 @@ package consul import ( "github.com/hashicorp/consul/agent/consul/authmethod" + "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" + memdb "github.com/hashicorp/go-memdb" ) func (a *ACL) tokenUpsertValidateEnterprise(token *structs.ACLToken, existing *structs.ACLToken) error { @@ -37,3 +39,7 @@ func computeTargetEnterpriseMeta( ) (*structs.EnterpriseMeta, error) { return &structs.EnterpriseMeta{}, nil } + +func getTokenNamespaceDefaults(ws memdb.WatchSet, state *state.Store, entMeta *structs.EnterpriseMeta) ([]string, []string, error) { + return nil, nil, nil +} diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 009f1c52d..637872070 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -204,6 +204,181 @@ func TestACLEndpoint_TokenRead(t *testing.T) { require.Nil(t, resp.Token) require.EqualError(t, err, "failed acl token lookup: index error: UUID must be 36 characters") }) + + t.Run("expanded output with role/policy", func(t *testing.T) { + p1, err := upsertTestPolicy(codec, TestDefaultInitialManagementToken, "dc1") + require.NoError(t, err) + + p2, err := upsertTestPolicy(codec, TestDefaultInitialManagementToken, "dc1") + require.NoError(t, err) + + r1, err := upsertTestCustomizedRole(codec, TestDefaultInitialManagementToken, "dc1", func(role *structs.ACLRole) { + role.Policies = []structs.ACLRolePolicyLink{ + { + ID: p2.ID, + }, + } + }) + require.NoError(t, err) + + setReq := structs.ACLTokenSetRequest{ + Datacenter: "dc1", + ACLToken: structs.ACLToken{ + Description: "foobar", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: p1.ID, + }, + }, + Roles: []structs.ACLTokenRoleLink{ + { + ID: r1.ID, + }, + }, + Local: false, + }, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, + } + + setResp := structs.ACLToken{} + err = msgpackrpc.CallWithCodec(codec, "ACL.TokenSet", &setReq, &setResp) + require.NoError(t, err) + require.NotEmpty(t, setResp.AccessorID) + + req := structs.ACLTokenGetRequest{ + Datacenter: "dc1", + TokenID: setResp.AccessorID, + TokenIDType: structs.ACLTokenAccessor, + Expanded: true, + QueryOptions: structs.QueryOptions{Token: TestDefaultInitialManagementToken}, + } + resp := structs.ACLTokenResponse{} + + err = msgpackrpc.CallWithCodec(codec, "ACL.TokenRead", &req, &resp) + require.NoError(t, err) + require.NotNil(t, resp.Token) + require.ElementsMatch(t, []*structs.ACLPolicy{p1, p2}, resp.ExpandedPolicies) + require.ElementsMatch(t, []*structs.ACLRole{r1}, resp.ExpandedRoles) + }) + + t.Run("expanded output with multiple roles that share a policy", func(t *testing.T) { + p1, err := upsertTestPolicy(codec, TestDefaultInitialManagementToken, "dc1") + require.NoError(t, err) + + r1, err := upsertTestCustomizedRole(codec, TestDefaultInitialManagementToken, "dc1", func(role *structs.ACLRole) { + role.Policies = []structs.ACLRolePolicyLink{ + { + ID: p1.ID, + }, + } + }) + require.NoError(t, err) + + r2, err := upsertTestCustomizedRole(codec, TestDefaultInitialManagementToken, "dc1", func(role *structs.ACLRole) { + role.Policies = []structs.ACLRolePolicyLink{ + { + ID: p1.ID, + }, + } + }) + require.NoError(t, err) + + setReq := structs.ACLTokenSetRequest{ + Datacenter: "dc1", + ACLToken: structs.ACLToken{ + Description: "foobar", + Roles: []structs.ACLTokenRoleLink{ + { + ID: r1.ID, + }, + { + ID: r2.ID, + }, + }, + Local: false, + }, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, + } + + setResp := structs.ACLToken{} + err = msgpackrpc.CallWithCodec(codec, "ACL.TokenSet", &setReq, &setResp) + require.NoError(t, err) + require.NotEmpty(t, setResp.AccessorID) + + req := structs.ACLTokenGetRequest{ + Datacenter: "dc1", + TokenID: setResp.AccessorID, + TokenIDType: structs.ACLTokenAccessor, + Expanded: true, + QueryOptions: structs.QueryOptions{Token: TestDefaultInitialManagementToken}, + } + resp := structs.ACLTokenResponse{} + + err = msgpackrpc.CallWithCodec(codec, "ACL.TokenRead", &req, &resp) + require.NoError(t, err) + require.NotNil(t, resp.Token) + require.ElementsMatch(t, []*structs.ACLPolicy{p1}, resp.ExpandedPolicies) + require.ElementsMatch(t, []*structs.ACLRole{r1, r2}, resp.ExpandedRoles) + }) + + t.Run("expanded output with node/service identities", func(t *testing.T) { + setReq := structs.ACLTokenSetRequest{ + Datacenter: "dc1", + ACLToken: structs.ACLToken{ + Description: "foobar", + ServiceIdentities: []*structs.ACLServiceIdentity{ + { + ServiceName: "web", + Datacenters: []string{"dc1"}, + }, + { + ServiceName: "db", + Datacenters: []string{"dc2"}, + }, + }, + NodeIdentities: []*structs.ACLNodeIdentity{ + { + NodeName: "foo", + Datacenter: "dc1", + }, + { + NodeName: "bar", + Datacenter: "dc1", + }, + }, + Local: false, + }, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, + } + + var expectedPolicies []*structs.ACLPolicy + entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + for _, serviceIdentity := range setReq.ACLToken.ServiceIdentities { + expectedPolicies = append(expectedPolicies, serviceIdentity.SyntheticPolicy(entMeta)) + } + for _, serviceIdentity := range setReq.ACLToken.NodeIdentities { + expectedPolicies = append(expectedPolicies, serviceIdentity.SyntheticPolicy(entMeta)) + } + + setResp := structs.ACLToken{} + err := msgpackrpc.CallWithCodec(codec, "ACL.TokenSet", &setReq, &setResp) + require.NoError(t, err) + require.NotEmpty(t, setResp.AccessorID) + + req := structs.ACLTokenGetRequest{ + Datacenter: "dc1", + TokenID: setResp.AccessorID, + TokenIDType: structs.ACLTokenAccessor, + Expanded: true, + QueryOptions: structs.QueryOptions{Token: TestDefaultInitialManagementToken}, + } + resp := structs.ACLTokenResponse{} + + err = msgpackrpc.CallWithCodec(codec, "ACL.TokenRead", &req, &resp) + require.NoError(t, err) + require.NotNil(t, resp.Token) + require.ElementsMatch(t, expectedPolicies, resp.ExpandedPolicies) + }) } func TestACLEndpoint_TokenClone(t *testing.T) { diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 42fa55821..6631fa918 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -158,7 +158,8 @@ func (s *ACLServiceIdentity) SyntheticPolicy(entMeta *EnterpriseMeta) *ACLPolicy policy := &ACLPolicy{} policy.ID = hashID policy.Name = fmt.Sprintf("synthetic-policy-%s", hashID) - policy.Description = "synthetic policy" + sn := NewServiceName(s.ServiceName, entMeta) + policy.Description = fmt.Sprintf("synthetic policy for service identity %q", sn.String()) policy.Rules = rules policy.Syntax = acl.SyntaxCurrent policy.Datacenters = s.Datacenters @@ -202,7 +203,7 @@ func (s *ACLNodeIdentity) SyntheticPolicy(entMeta *EnterpriseMeta) *ACLPolicy { policy := &ACLPolicy{} policy.ID = hashID policy.Name = fmt.Sprintf("synthetic-policy-%s", hashID) - policy.Description = "synthetic policy" + policy.Description = fmt.Sprintf("synthetic policy for node identity %q", s.NodeName) policy.Rules = rules policy.Syntax = acl.SyntaxCurrent policy.Datacenters = []string{s.Datacenter} @@ -1217,7 +1218,8 @@ func (r *ACLTokenSetRequest) RequestDatacenter() string { type ACLTokenGetRequest struct { TokenID string // id used for the token lookup TokenIDType ACLTokenIDType // The Type of ID used to lookup the token - Datacenter string // The datacenter to perform the request within + Expanded bool + Datacenter string // The datacenter to perform the request within EnterpriseMeta QueryOptions } @@ -1313,9 +1315,28 @@ type ACLTokenResponse struct { Token *ACLToken Redacted bool // whether the token's secret was redacted SourceDatacenter string + + ExpandedTokenInfo QueryMeta } +type ExpandedTokenInfo struct { + ExpandedPolicies []*ACLPolicy + ExpandedRoles []*ACLRole + + NamespaceDefaultPolicyIDs []string + NamespaceDefaultRoleIDs []string + + AgentACLDefaultPolicy string + AgentACLDownPolicy string + ResolvedByAgent string +} + +type ACLTokenExpanded struct { + *ACLToken + ExpandedTokenInfo +} + // ACLTokenBatchResponse returns multiple Tokens associated with the same metadata type ACLTokenBatchResponse struct { Tokens []*ACLToken diff --git a/agent/structs/acl_test.go b/agent/structs/acl_test.go index a435d1c57..65afc6bf1 100644 --- a/agent/structs/acl_test.go +++ b/agent/structs/acl_test.go @@ -69,7 +69,6 @@ func TestStructs_ACLServiceIdentity_SyntheticPolicy(t *testing.T) { expect := &ACLPolicy{ Syntax: acl.SyntaxCurrent, Datacenters: test.datacenters, - Description: "synthetic policy", Rules: test.expectRules, } @@ -79,6 +78,7 @@ func TestStructs_ACLServiceIdentity_SyntheticPolicy(t *testing.T) { // strip irrelevant fields before equality got.ID = "" got.Name = "" + got.Description = "" got.Hash = nil require.Equal(t, expect, got) }) diff --git a/api/acl.go b/api/acl.go index 0f44494dd..9989a50b2 100644 --- a/api/acl.go +++ b/api/acl.go @@ -62,6 +62,20 @@ type ACLToken struct { AuthMethodNamespace string `json:",omitempty"` } +type ACLTokenExpanded struct { + ExpandedPolicies []ACLPolicy + ExpandedRoles []ACLRole + + NamespaceDefaultPolicies []string + NamespaceDefaultRoles []string + + AgentACLDefaultPolicy string + AgentACLDownPolicy string + ResolvedByAgent string + + ACLToken +} + type ACLTokenListEntry struct { CreateIndex uint64 ModifyIndex uint64 @@ -788,6 +802,32 @@ func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, return &out, qm, nil } +// TokenReadExpanded retrieves the full token details, as well as the contents of any policies affecting the token. +// The tokenID parameter must be a valid Accessor ID of an existing token. +func (a *ACL) TokenReadExpanded(tokenID string, q *QueryOptions) (*ACLTokenExpanded, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID) + r.setQueryOptions(q) + r.params.Set("expanded", "true") + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLTokenExpanded + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + // TokenReadSelf retrieves the full token details of the token currently // assigned to the API Client. In this manner its possible to read a token // by its Secret ID. diff --git a/command/acl/token/formatter.go b/command/acl/token/formatter.go index e88ee28ba..a1eb050ba 100644 --- a/command/acl/token/formatter.go +++ b/command/acl/token/formatter.go @@ -6,17 +6,22 @@ import ( "fmt" "strings" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" ) const ( PrettyFormat string = "pretty" JSONFormat string = "json" + + WHITESPACE_2 string = "\t" + WHITESPACE_4 string = "\t\t" ) // Formatter defines methods provided by token command output formatter type Formatter interface { FormatToken(token *api.ACLToken) (string, error) + FormatTokenExpanded(token *api.ACLTokenExpanded) (string, error) FormatTokenList(tokens []*api.ACLTokenListEntry) (string, error) } @@ -108,6 +113,159 @@ func (f *prettyFormatter) FormatToken(token *api.ACLToken) (string, error) { return buffer.String(), nil } +func (f *prettyFormatter) FormatTokenExpanded(token *api.ACLTokenExpanded) (string, error) { + var buffer bytes.Buffer + + buffer.WriteString(fmt.Sprintf("AccessorID: %s\n", token.AccessorID)) + buffer.WriteString(fmt.Sprintf("SecretID: %s\n", token.SecretID)) + if token.Partition != "" { + buffer.WriteString(fmt.Sprintf("Partition: %s\n", token.Partition)) + } + if token.Namespace != "" { + buffer.WriteString(fmt.Sprintf("Namespace: %s\n", token.Namespace)) + } + buffer.WriteString(fmt.Sprintf("Description: %s\n", token.Description)) + buffer.WriteString(fmt.Sprintf("Local: %t\n", token.Local)) + if token.AuthMethod != "" { + buffer.WriteString(fmt.Sprintf("Auth Method: %s (Namespace: %s)\n", token.AuthMethod, token.AuthMethodNamespace)) + } + buffer.WriteString(fmt.Sprintf("Create Time: %v\n", token.CreateTime)) + if token.ExpirationTime != nil && !token.ExpirationTime.IsZero() { + buffer.WriteString(fmt.Sprintf("Expiration Time: %v\n", *token.ExpirationTime)) + } + if f.showMeta { + buffer.WriteString(fmt.Sprintf("Hash: %x\n", token.Hash)) + buffer.WriteString(fmt.Sprintf("Create Index: %d\n", token.CreateIndex)) + buffer.WriteString(fmt.Sprintf("Modify Index: %d\n", token.ModifyIndex)) + } + + policies := make(map[string]api.ACLPolicy) + roles := make(map[string]api.ACLRole) + for _, policy := range token.ExpandedPolicies { + policies[policy.ID] = policy + } + for _, role := range token.ExpandedRoles { + roles[role.ID] = role + } + + formatPolicy := func(policy api.ACLPolicy, indent string) { + buffer.WriteString(fmt.Sprintf(indent+"Policy Name: %s\n", policy.Name)) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"ID: %s\n", policy.ID)) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"Description: %s\n", policy.Description)) + buffer.WriteString(indent + WHITESPACE_2 + "Rules:\n") + buffer.WriteString(indent + WHITESPACE_4) + buffer.WriteString(strings.ReplaceAll(policy.Rules, "\n", "\n"+indent+WHITESPACE_4)) + buffer.WriteString("\n\n") + } + + if len(token.ACLToken.Policies) > 0 { + buffer.WriteString("Policies:\n") + for _, policyLink := range token.ACLToken.Policies { + formatPolicy(policies[policyLink.ID], WHITESPACE_2) + } + } + + entMeta := structs.NewEnterpriseMetaWithPartition(token.Partition, token.Namespace) + formatServiceIdentity := func(svcIdentity *api.ACLServiceIdentity, indent string) { + if len(svcIdentity.Datacenters) > 0 { + buffer.WriteString(fmt.Sprintf(indent+"Name: %s (Datacenters: %s)\n", svcIdentity.ServiceName, strings.Join(svcIdentity.Datacenters, ", "))) + } else { + buffer.WriteString(fmt.Sprintf(indent+"Name: %s (Datacenters: all)\n", svcIdentity.ServiceName)) + } + identity := structs.ACLServiceIdentity{ServiceName: svcIdentity.ServiceName, Datacenters: svcIdentity.Datacenters} + policy := identity.SyntheticPolicy(&entMeta) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"Description: %s\n", policy.Description)) + buffer.WriteString(indent + WHITESPACE_2 + "Rules:") + buffer.WriteString(strings.ReplaceAll(policy.Rules, "\n", "\n"+indent+WHITESPACE_4)) + buffer.WriteString("\n\n") + } + if len(token.ACLToken.ServiceIdentities) > 0 { + buffer.WriteString("Service Identities:\n") + for _, svcIdentity := range token.ACLToken.ServiceIdentities { + formatServiceIdentity(svcIdentity, WHITESPACE_2) + } + } + + formatNodeIdentity := func(nodeIdentity *api.ACLNodeIdentity, indent string) { + buffer.WriteString(fmt.Sprintf(indent+"Name: %s (Datacenter: %s)\n", nodeIdentity.NodeName, nodeIdentity.Datacenter)) + identity := structs.ACLNodeIdentity{NodeName: nodeIdentity.NodeName, Datacenter: nodeIdentity.Datacenter} + policy := identity.SyntheticPolicy(&entMeta) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"Description: %s\n", policy.Description)) + buffer.WriteString(indent + WHITESPACE_2 + "Rules:") + buffer.WriteString(strings.ReplaceAll(policy.Rules, "\n", "\n"+indent+WHITESPACE_4)) + buffer.WriteString("\n\n") + } + if len(token.ACLToken.NodeIdentities) > 0 { + buffer.WriteString("Node Identities:\n") + for _, nodeIdentity := range token.ACLToken.NodeIdentities { + formatNodeIdentity(nodeIdentity, WHITESPACE_2) + } + } + + formatRole := func(role api.ACLRole, indent string) { + buffer.WriteString(fmt.Sprintf(indent+"Role Name: %s\n", role.Name)) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"ID: %s\n", role.ID)) + buffer.WriteString(fmt.Sprintf(indent+WHITESPACE_2+"Description: %s\n", role.Description)) + + if len(role.Policies) > 0 { + buffer.WriteString(indent + WHITESPACE_2 + "Policies:\n") + for _, policyLink := range role.Policies { + formatPolicy(policies[policyLink.ID], indent+WHITESPACE_4) + } + } + + if len(role.ServiceIdentities) > 0 { + buffer.WriteString(indent + WHITESPACE_2 + "Service Identities:\n") + for _, svcIdentity := range role.ServiceIdentities { + formatServiceIdentity(svcIdentity, indent+WHITESPACE_4) + } + } + + if len(role.NodeIdentities) > 0 { + buffer.WriteString(indent + WHITESPACE_2 + "Node Identities:\n") + for _, nodeIdentity := range role.NodeIdentities { + formatNodeIdentity(nodeIdentity, indent+WHITESPACE_4) + } + } + } + if len(token.ACLToken.Roles) > 0 { + buffer.WriteString("Roles:\n") + for _, roleLink := range token.ACLToken.Roles { + role := roles[roleLink.ID] + formatRole(role, WHITESPACE_2) + } + } + + buffer.WriteString("=== End of Authorizer Layer 0: Token ===\n") + + if len(token.NamespaceDefaultPolicies) > 0 || len(token.NamespaceDefaultRoles) > 0 { + buffer.WriteString("=== Start of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) ===\n") + buffer.WriteString(fmt.Sprintf("Description: ACL Roles inherited by all Tokens in Namespace %q\n\n", token.Namespace)) + + buffer.WriteString("Namespace Policy Defaults:\n") + for _, policyID := range token.NamespaceDefaultPolicies { + formatPolicy(policies[policyID], WHITESPACE_2) + } + + buffer.WriteString("Namespace Role Defaults:\n") + for _, roleID := range token.NamespaceDefaultRoles { + formatRole(roles[roleID], WHITESPACE_2) + } + + buffer.WriteString("=== End of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) ===\n") + } + + buffer.WriteString("=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) ===\n") + buffer.WriteString("Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults\n") + buffer.WriteString(fmt.Sprintf("Resolved By Agent: %q\n\n", token.ResolvedByAgent)) + buffer.WriteString(fmt.Sprintf("Default Policy: %s\n", token.AgentACLDefaultPolicy)) + buffer.WriteString(WHITESPACE_2 + "Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration)\n\n") + buffer.WriteString(fmt.Sprintf("Down Policy: %s\n", token.AgentACLDownPolicy)) + buffer.WriteString(WHITESPACE_2 + "Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration)\n\n") + + return buffer.String(), nil +} + func (f *prettyFormatter) FormatTokenList(tokens []*api.ACLTokenListEntry) (string, error) { var buffer bytes.Buffer @@ -204,3 +362,11 @@ func (f *jsonFormatter) FormatToken(token *api.ACLToken) (string, error) { } return string(b), nil } + +func (f *jsonFormatter) FormatTokenExpanded(token *api.ACLTokenExpanded) (string, error) { + b, err := json.MarshalIndent(token, "", " ") + if err != nil { + return "", fmt.Errorf("Failed to marshal token: %v", err) + } + return string(b), nil +} diff --git a/command/acl/token/formatter_oss_test.go b/command/acl/token/formatter_oss_test.go new file mode 100644 index 000000000..d825a5ee3 --- /dev/null +++ b/command/acl/token/formatter_oss_test.go @@ -0,0 +1,12 @@ +//go:build !consulent +// +build !consulent + +package token + +import ( + "testing" +) + +func TestFormatTokenExpanded(t *testing.T) { + testFormatTokenExpanded(t, "FormatTokenExpanded/oss") +} diff --git a/command/acl/token/formatter_test.go b/command/acl/token/formatter_test.go index a267c385f..ba93e9dc0 100644 --- a/command/acl/token/formatter_test.go +++ b/command/acl/token/formatter_test.go @@ -254,3 +254,239 @@ func TestFormatTokenList(t *testing.T) { }) } } + +type testCase struct { + tokenExpanded api.ACLTokenExpanded + overrideGoldenName string +} + +func timeRef(in time.Time) *time.Time { + return &in +} + +var expandedTokenTestCases = map[string]testCase{ + "basic": { + tokenExpanded: api.ACLTokenExpanded{ + ExpandedPolicies: []api.ACLPolicy{ + { + ID: "beb04680-815b-4d7c-9e33-3d707c24672c", + Name: "foo", + Description: "user policy on token", + Rules: `service_prefix "" { + policy = "read" +}`, + }, + { + ID: "18788457-584c-4812-80d3-23d403148a90", + Name: "bar", + Description: "other user policy on token", + Rules: `operator = "read"`, + }, + }, + AgentACLDefaultPolicy: "allow", + AgentACLDownPolicy: "deny", + ResolvedByAgent: "leader", + ACLToken: api.ACLToken{ + AccessorID: "fbd2447f-7479-4329-ad13-b021d74f86ba", + SecretID: "869c6e91-4de9-4dab-b56e-87548435f9c6", + Description: "test token", + Local: false, + CreateTime: time.Date(2020, 5, 22, 18, 52, 31, 0, time.UTC), + Hash: []byte{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}, + CreateIndex: 42, + ModifyIndex: 100, + Policies: []*api.ACLLink{ + { + ID: "beb04680-815b-4d7c-9e33-3d707c24672c", + Name: "foo", + }, + { + ID: "18788457-584c-4812-80d3-23d403148a90", + Name: "bar", + }, + }, + }, + }, + }, + "complex": { + tokenExpanded: api.ACLTokenExpanded{ + ExpandedPolicies: []api.ACLPolicy{ + { + ID: "beb04680-815b-4d7c-9e33-3d707c24672c", + Name: "hobbiton", + Description: "user policy on token", + Rules: `service_prefix "" { + policy = "read" +}`, + }, + { + ID: "18788457-584c-4812-80d3-23d403148a90", + Name: "bywater", + Description: "other user policy on token", + Rules: `operator = "read"`, + }, + { + ID: "6204f4cd-4709-441c-ac1b-cb029e940263", + Name: "shire-policy", + Description: "policy for shire role", + Rules: `operator = "write"`, + }, + { + ID: "e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93", + Name: "west-farthing-policy", + Description: "policy for west-farthing role", + Rules: `service "foo" { + policy = "read" +}`, + }, + { + ID: "2b582ff1-4a43-457f-8a2b-30a8265e29a5", + Name: "default-policy-1", + Description: "default policy 1", + Rules: `key "foo" { policy = "write" }`, + }, + { + ID: "b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea", + Name: "default-policy-2", + Description: "default policy 2", + Rules: `key "bar" { policy = "read" }`, + }, + }, + ExpandedRoles: []api.ACLRole{ + { + ID: "3b0a78fe-b9c3-40de-b8ea-7d4d6674b366", + Name: "shire", + Description: "shire role", + Policies: []*api.ACLRolePolicyLink{ + { + ID: "6204f4cd-4709-441c-ac1b-cb029e940263", + }, + }, + ServiceIdentities: []*api.ACLServiceIdentity{ + { + ServiceName: "foo", + Datacenters: []string{"middleearth-southwest"}, + }, + }, + }, + { + ID: "6c9d1e1d-34bc-4d55-80f3-add0890ad791", + Name: "west-farthing", + Description: "west-farthing role", + Policies: []*api.ACLRolePolicyLink{ + { + ID: "e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93", + }, + }, + NodeIdentities: []*api.ACLNodeIdentity{ + { + NodeName: "bar", + Datacenter: "middleearth-southwest", + }, + }, + }, + { + ID: "56033f2b-e1a6-4905-b71d-e011c862bc65", + Name: "ns-default", + Description: "default role", + Policies: []*api.ACLRolePolicyLink{ + { + ID: "b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea", + }, + }, + ServiceIdentities: []*api.ACLServiceIdentity{ + { + ServiceName: "web", + Datacenters: []string{"middleearth-northeast"}, + }, + }, + NodeIdentities: []*api.ACLNodeIdentity{ + { + NodeName: "db", + Datacenter: "middleearth-northwest", + }, + }, + }, + }, + NamespaceDefaultPolicies: []string{"2b582ff1-4a43-457f-8a2b-30a8265e29a5"}, + NamespaceDefaultRoles: []string{"56033f2b-e1a6-4905-b71d-e011c862bc65"}, + AgentACLDefaultPolicy: "deny", + AgentACLDownPolicy: "extend-cache", + ResolvedByAgent: "server-1", + ACLToken: api.ACLToken{ + AccessorID: "fbd2447f-7479-4329-ad13-b021d74f86ba", + SecretID: "869c6e91-4de9-4dab-b56e-87548435f9c6", + Namespace: "foo", + Description: "test token", + Local: false, + AuthMethod: "bar", + AuthMethodNamespace: "baz", + CreateTime: time.Date(2020, 5, 22, 18, 52, 31, 0, time.UTC), + ExpirationTime: timeRef(time.Date(2020, 5, 22, 19, 52, 31, 0, time.UTC)), + Hash: []byte{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}, + CreateIndex: 5, + ModifyIndex: 10, + Policies: []*api.ACLLink{ + { + ID: "beb04680-815b-4d7c-9e33-3d707c24672c", + Name: "hobbiton", + }, + { + ID: "18788457-584c-4812-80d3-23d403148a90", + Name: "bywater", + }, + }, + Roles: []*api.ACLLink{ + { + ID: "3b0a78fe-b9c3-40de-b8ea-7d4d6674b366", + Name: "shire", + }, + { + ID: "6c9d1e1d-34bc-4d55-80f3-add0890ad791", + Name: "west-farthing", + }, + }, + ServiceIdentities: []*api.ACLServiceIdentity{ + { + ServiceName: "gardener", + Datacenters: []string{"middleearth-northwest"}, + }, + }, + NodeIdentities: []*api.ACLNodeIdentity{ + { + NodeName: "bagend", + Datacenter: "middleearth-northwest", + }, + }, + }, + }, + }, +} + +func testFormatTokenExpanded(t *testing.T, dirPath string) { + formatters := map[string]Formatter{ + "pretty": newPrettyFormatter(false), + "pretty-meta": newPrettyFormatter(true), + // the JSON formatter ignores the showMeta + "json": newJSONFormatter(false), + } + + for name, tcase := range expandedTokenTestCases { + t.Run(name, func(t *testing.T) { + for fmtName, formatter := range formatters { + t.Run(fmtName, func(t *testing.T) { + actual, err := formatter.FormatTokenExpanded(&tcase.tokenExpanded) + require.NoError(t, err) + + gName := fmt.Sprintf("%s.%s", name, fmtName) + if tcase.overrideGoldenName != "" { + gName = tcase.overrideGoldenName + } + + expected := golden(t, path.Join(dirPath, gName), actual) + require.Equal(t, expected, actual) + }) + } + }) + } +} diff --git a/command/acl/token/read/token_read.go b/command/acl/token/read/token_read.go index 8b0616cdc..885d7d916 100644 --- a/command/acl/token/read/token_read.go +++ b/command/acl/token/read/token_read.go @@ -28,6 +28,7 @@ type cmd struct { self bool showMeta bool format string + expanded bool } func (c *cmd) init() { @@ -36,6 +37,8 @@ func (c *cmd) init() { "as the content hash and Raft indices should be shown for each entry") c.flags.BoolVar(&c.self, "self", false, "Indicates that the current HTTP token "+ "should be read by secret ID instead of expecting a -id option") + c.flags.BoolVar(&c.expanded, "expanded", false, "Indicates that the contents of the "+ + " policies and roles affecting the token should also be shown.") c.flags.StringVar(&c.tokenID, "id", "", "The Accessor ID of the token to read. "+ "It may be specified as a unique ID prefix but will error if the prefix "+ "matches multiple token Accessor IDs") @@ -69,6 +72,7 @@ func (c *cmd) Run(args []string) int { } var t *api.ACLToken + var expanded *api.ACLTokenExpanded if !c.self { tokenID, err := acl.GetTokenIDFromPartial(client, c.tokenID) if err != nil { @@ -76,7 +80,12 @@ func (c *cmd) Run(args []string) int { return 1 } - t, _, err = client.ACL().TokenRead(tokenID, nil) + if !c.expanded { + t, _, err = client.ACL().TokenRead(tokenID, nil) + } else { + expanded, _, err = client.ACL().TokenReadExpanded(tokenID, nil) + } + if err != nil { c.UI.Error(fmt.Sprintf("Error reading token %q: %v", tokenID, err)) return 1 @@ -94,7 +103,12 @@ func (c *cmd) Run(args []string) int { c.UI.Error(err.Error()) return 1 } - out, err := formatter.FormatToken(t) + var out string + if !c.expanded { + out, err = formatter.FormatToken(t) + } else { + out, err = formatter.FormatTokenExpanded(expanded) + } if err != nil { c.UI.Error(err.Error()) return 1 diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden new file mode 100644 index 000000000..cba80e455 --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden @@ -0,0 +1,48 @@ +{ + "ExpandedPolicies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "foo", + "Description": "user policy on token", + "Rules": "service_prefix \"\" {\n policy = \"read\"\n}", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bar", + "Description": "other user policy on token", + "Rules": "operator = \"read\"", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "ExpandedRoles": null, + "NamespaceDefaultPolicies": null, + "NamespaceDefaultRoles": null, + "AgentACLDefaultPolicy": "allow", + "AgentACLDownPolicy": "deny", + "ResolvedByAgent": "leader", + "CreateIndex": 42, + "ModifyIndex": 100, + "AccessorID": "fbd2447f-7479-4329-ad13-b021d74f86ba", + "SecretID": "869c6e91-4de9-4dab-b56e-87548435f9c6", + "Description": "test token", + "Policies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "foo" + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bar" + } + ], + "Local": false, + "CreateTime": "2020-05-22T18:52:31Z", + "Hash": "YWJjZGVmZ2g=" +} \ No newline at end of file diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty-meta.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty-meta.golden new file mode 100644 index 000000000..401c8ee8a --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty-meta.golden @@ -0,0 +1,34 @@ +AccessorID: fbd2447f-7479-4329-ad13-b021d74f86ba +SecretID: 869c6e91-4de9-4dab-b56e-87548435f9c6 +Description: test token +Local: false +Create Time: 2020-05-22 18:52:31 +0000 UTC +Hash: 6162636465666768 +Create Index: 42 +Modify Index: 100 +Policies: + Policy Name: foo + ID: beb04680-815b-4d7c-9e33-3d707c24672c + Description: user policy on token + Rules: + service_prefix "" { + policy = "read" + } + + Policy Name: bar + ID: 18788457-584c-4812-80d3-23d403148a90 + Description: other user policy on token + Rules: + operator = "read" + +=== End of Authorizer Layer 0: Token === +=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) === +Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults +Resolved By Agent: "leader" + +Default Policy: allow + Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration) + +Down Policy: deny + Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration) + diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty.golden new file mode 100644 index 000000000..73e1fb40b --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.pretty.golden @@ -0,0 +1,31 @@ +AccessorID: fbd2447f-7479-4329-ad13-b021d74f86ba +SecretID: 869c6e91-4de9-4dab-b56e-87548435f9c6 +Description: test token +Local: false +Create Time: 2020-05-22 18:52:31 +0000 UTC +Policies: + Policy Name: foo + ID: beb04680-815b-4d7c-9e33-3d707c24672c + Description: user policy on token + Rules: + service_prefix "" { + policy = "read" + } + + Policy Name: bar + ID: 18788457-584c-4812-80d3-23d403148a90 + Description: other user policy on token + Rules: + operator = "read" + +=== End of Authorizer Layer 0: Token === +=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) === +Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults +Resolved By Agent: "leader" + +Default Policy: allow + Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration) + +Down Policy: deny + Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration) + diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden new file mode 100644 index 000000000..36931e219 --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden @@ -0,0 +1,191 @@ +{ + "ExpandedPolicies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "hobbiton", + "Description": "user policy on token", + "Rules": "service_prefix \"\" {\n policy = \"read\"\n}", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bywater", + "Description": "other user policy on token", + "Rules": "operator = \"read\"", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "6204f4cd-4709-441c-ac1b-cb029e940263", + "Name": "shire-policy", + "Description": "policy for shire role", + "Rules": "operator = \"write\"", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93", + "Name": "west-farthing-policy", + "Description": "policy for west-farthing role", + "Rules": "service \"foo\" {\n policy = \"read\"\n}", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "2b582ff1-4a43-457f-8a2b-30a8265e29a5", + "Name": "default-policy-1", + "Description": "default policy 1", + "Rules": "key \"foo\" { policy = \"write\" }", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea", + "Name": "default-policy-2", + "Description": "default policy 2", + "Rules": "key \"bar\" { policy = \"read\" }", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "ExpandedRoles": [ + { + "ID": "3b0a78fe-b9c3-40de-b8ea-7d4d6674b366", + "Name": "shire", + "Description": "shire role", + "Policies": [ + { + "ID": "6204f4cd-4709-441c-ac1b-cb029e940263", + "Name": "" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "foo", + "Datacenters": [ + "middleearth-southwest" + ] + } + ], + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "6c9d1e1d-34bc-4d55-80f3-add0890ad791", + "Name": "west-farthing", + "Description": "west-farthing role", + "Policies": [ + { + "ID": "e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93", + "Name": "" + } + ], + "NodeIdentities": [ + { + "NodeName": "bar", + "Datacenter": "middleearth-southwest" + } + ], + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "56033f2b-e1a6-4905-b71d-e011c862bc65", + "Name": "ns-default", + "Description": "default role", + "Policies": [ + { + "ID": "b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea", + "Name": "" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "web", + "Datacenters": [ + "middleearth-northeast" + ] + } + ], + "NodeIdentities": [ + { + "NodeName": "db", + "Datacenter": "middleearth-northwest" + } + ], + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "NamespaceDefaultPolicies": [ + "2b582ff1-4a43-457f-8a2b-30a8265e29a5" + ], + "NamespaceDefaultRoles": [ + "56033f2b-e1a6-4905-b71d-e011c862bc65" + ], + "AgentACLDefaultPolicy": "deny", + "AgentACLDownPolicy": "extend-cache", + "ResolvedByAgent": "server-1", + "CreateIndex": 5, + "ModifyIndex": 10, + "AccessorID": "fbd2447f-7479-4329-ad13-b021d74f86ba", + "SecretID": "869c6e91-4de9-4dab-b56e-87548435f9c6", + "Description": "test token", + "Policies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "hobbiton" + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bywater" + } + ], + "Roles": [ + { + "ID": "3b0a78fe-b9c3-40de-b8ea-7d4d6674b366", + "Name": "shire" + }, + { + "ID": "6c9d1e1d-34bc-4d55-80f3-add0890ad791", + "Name": "west-farthing" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "gardener", + "Datacenters": [ + "middleearth-northwest" + ] + } + ], + "NodeIdentities": [ + { + "NodeName": "bagend", + "Datacenter": "middleearth-northwest" + } + ], + "Local": false, + "AuthMethod": "bar", + "ExpirationTime": "2020-05-22T19:52:31Z", + "CreateTime": "2020-05-22T18:52:31Z", + "Hash": "YWJjZGVmZ2g=", + "Namespace": "foo", + "AuthMethodNamespace": "baz" +} \ No newline at end of file diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty-meta.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty-meta.golden new file mode 100644 index 000000000..bc8033edf --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty-meta.golden @@ -0,0 +1,166 @@ +AccessorID: fbd2447f-7479-4329-ad13-b021d74f86ba +SecretID: 869c6e91-4de9-4dab-b56e-87548435f9c6 +Namespace: foo +Description: test token +Local: false +Auth Method: bar (Namespace: baz) +Create Time: 2020-05-22 18:52:31 +0000 UTC +Expiration Time: 2020-05-22 19:52:31 +0000 UTC +Hash: 6162636465666768 +Create Index: 5 +Modify Index: 10 +Policies: + Policy Name: hobbiton + ID: beb04680-815b-4d7c-9e33-3d707c24672c + Description: user policy on token + Rules: + service_prefix "" { + policy = "read" + } + + Policy Name: bywater + ID: 18788457-584c-4812-80d3-23d403148a90 + Description: other user policy on token + Rules: + operator = "read" + +Service Identities: + Name: gardener (Datacenters: middleearth-northwest) + Description: synthetic policy for service identity "gardener" + Rules: + service "gardener" { + policy = "write" + } + service "gardener-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + +Node Identities: + Name: bagend (Datacenter: middleearth-northwest) + Description: synthetic policy for node identity "bagend" + Rules: + node "bagend" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +Roles: + Role Name: shire + ID: 3b0a78fe-b9c3-40de-b8ea-7d4d6674b366 + Description: shire role + Policies: + Policy Name: shire-policy + ID: 6204f4cd-4709-441c-ac1b-cb029e940263 + Description: policy for shire role + Rules: + operator = "write" + + Service Identities: + Name: foo (Datacenters: middleearth-southwest) + Description: synthetic policy for service identity "foo" + Rules: + service "foo" { + policy = "write" + } + service "foo-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + + Role Name: west-farthing + ID: 6c9d1e1d-34bc-4d55-80f3-add0890ad791 + Description: west-farthing role + Policies: + Policy Name: west-farthing-policy + ID: e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93 + Description: policy for west-farthing role + Rules: + service "foo" { + policy = "read" + } + + Node Identities: + Name: bar (Datacenter: middleearth-southwest) + Description: synthetic policy for node identity "bar" + Rules: + node "bar" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +=== End of Authorizer Layer 0: Token === +=== Start of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) === +Description: ACL Roles inherited by all Tokens in Namespace "foo" + +Namespace Policy Defaults: + Policy Name: default-policy-1 + ID: 2b582ff1-4a43-457f-8a2b-30a8265e29a5 + Description: default policy 1 + Rules: + key "foo" { policy = "write" } + +Namespace Role Defaults: + Role Name: ns-default + ID: 56033f2b-e1a6-4905-b71d-e011c862bc65 + Description: default role + Policies: + Policy Name: default-policy-2 + ID: b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea + Description: default policy 2 + Rules: + key "bar" { policy = "read" } + + Service Identities: + Name: web (Datacenters: middleearth-northeast) + Description: synthetic policy for service identity "web" + Rules: + service "web" { + policy = "write" + } + service "web-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + + Node Identities: + Name: db (Datacenter: middleearth-northwest) + Description: synthetic policy for node identity "db" + Rules: + node "db" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +=== End of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) === +=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) === +Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults +Resolved By Agent: "server-1" + +Default Policy: deny + Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration) + +Down Policy: extend-cache + Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration) + diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty.golden new file mode 100644 index 000000000..215cf8b7a --- /dev/null +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.pretty.golden @@ -0,0 +1,163 @@ +AccessorID: fbd2447f-7479-4329-ad13-b021d74f86ba +SecretID: 869c6e91-4de9-4dab-b56e-87548435f9c6 +Namespace: foo +Description: test token +Local: false +Auth Method: bar (Namespace: baz) +Create Time: 2020-05-22 18:52:31 +0000 UTC +Expiration Time: 2020-05-22 19:52:31 +0000 UTC +Policies: + Policy Name: hobbiton + ID: beb04680-815b-4d7c-9e33-3d707c24672c + Description: user policy on token + Rules: + service_prefix "" { + policy = "read" + } + + Policy Name: bywater + ID: 18788457-584c-4812-80d3-23d403148a90 + Description: other user policy on token + Rules: + operator = "read" + +Service Identities: + Name: gardener (Datacenters: middleearth-northwest) + Description: synthetic policy for service identity "gardener" + Rules: + service "gardener" { + policy = "write" + } + service "gardener-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + +Node Identities: + Name: bagend (Datacenter: middleearth-northwest) + Description: synthetic policy for node identity "bagend" + Rules: + node "bagend" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +Roles: + Role Name: shire + ID: 3b0a78fe-b9c3-40de-b8ea-7d4d6674b366 + Description: shire role + Policies: + Policy Name: shire-policy + ID: 6204f4cd-4709-441c-ac1b-cb029e940263 + Description: policy for shire role + Rules: + operator = "write" + + Service Identities: + Name: foo (Datacenters: middleearth-southwest) + Description: synthetic policy for service identity "foo" + Rules: + service "foo" { + policy = "write" + } + service "foo-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + + Role Name: west-farthing + ID: 6c9d1e1d-34bc-4d55-80f3-add0890ad791 + Description: west-farthing role + Policies: + Policy Name: west-farthing-policy + ID: e86f0d1f-71b1-4690-bdfd-ff8c2cd4ae93 + Description: policy for west-farthing role + Rules: + service "foo" { + policy = "read" + } + + Node Identities: + Name: bar (Datacenter: middleearth-southwest) + Description: synthetic policy for node identity "bar" + Rules: + node "bar" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +=== End of Authorizer Layer 0: Token === +=== Start of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) === +Description: ACL Roles inherited by all Tokens in Namespace "foo" + +Namespace Policy Defaults: + Policy Name: default-policy-1 + ID: 2b582ff1-4a43-457f-8a2b-30a8265e29a5 + Description: default policy 1 + Rules: + key "foo" { policy = "write" } + +Namespace Role Defaults: + Role Name: ns-default + ID: 56033f2b-e1a6-4905-b71d-e011c862bc65 + Description: default role + Policies: + Policy Name: default-policy-2 + ID: b55dce64-f2cc-4eb5-8e5f-50e90e63c6ea + Description: default policy 2 + Rules: + key "bar" { policy = "read" } + + Service Identities: + Name: web (Datacenters: middleearth-northeast) + Description: synthetic policy for service identity "web" + Rules: + service "web" { + policy = "write" + } + service "web-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + + Node Identities: + Name: db (Datacenter: middleearth-northwest) + Description: synthetic policy for node identity "db" + Rules: + node "db" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + +=== End of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) === +=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) === +Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults +Resolved By Agent: "server-1" + +Default Policy: deny + Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration) + +Down Policy: extend-cache + Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration) + From 9daed50c3d6d0524b97481210b3d7911c51f62f5 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Thu, 31 Mar 2022 13:49:42 -0500 Subject: [PATCH 053/785] build: run mog separately after the protobufs are generated (#12665) Also ensure that we run mog serially on each package in dependency order. --- GNUmakefile | 23 +++++++++++++++++++---- build-support/scripts/proto-gen.sh | 13 ++----------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 873d1ad60..ecd0e8e38 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -22,7 +22,7 @@ GOPROTOTOOLS = \ github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ github.com/hashicorp/protoc-gen-go-binary@master \ github.com/favadi/protoc-go-inject-tag@v1.3.0 \ - github.com/hashicorp/mog@v0.1.2 + github.com/hashicorp/mog@v0.2.0 GOTAGS ?= GOPATH=$(shell go env GOPATH) @@ -41,6 +41,7 @@ GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) PROTOFILES?=$(shell find . -name '*.proto' | grep -v 'vendor/' | grep -v '.protobuf' ) PROTOGOFILES=$(PROTOFILES:.proto=.pb.go) PROTOGOBINFILES=$(PROTOFILES:.proto=.pb.binary.go) +PROTO_MOG_ORDER=$(shell go list -tags '$(GOTAGS)' -deps ./proto/pb... | grep "consul/proto") ifeq ($(FORCE_REBUILD),1) NOCACHE=--no-cache @@ -372,7 +373,21 @@ protoc-install: chmod +x $(PROTOC_ROOT)/bin/protoc ; \ fi -proto: protoc-install $(PROTOGOFILES) $(PROTOGOBINFILES) +.PHONY: proto +proto: -protoc-files -mog-files + +.PHONY: -mog-files +-mog-files: + @for FULL_PKG in $(PROTO_MOG_ORDER); do \ + PKG="$${FULL_PKG/#github.com\/hashicorp\/consul\//.\/}" ; \ + find "$$PKG" -name '*.gen.go' -delete ; \ + echo "mog -tags '$(GOTAGS)' -source \"$${PKG}/*.pb.go\"" ; \ + mog -tags '$(GOTAGS)' -source "$${PKG}/*.pb.go" ; \ + done + @echo "Generated all mog Go files" + +.PHONY: -protoc-files +-protoc-files: protoc-install $(PROTOGOFILES) $(PROTOGOBINFILES) @echo "Generated all protobuf Go files" %.pb.go %.pb.binary.go: %.proto @@ -403,6 +418,6 @@ envoy-regen: @find "command/connect/envoy/testdata" -name '*.golden' -delete @go test -tags '$(GOTAGS)' ./command/connect/envoy -update -.PHONY: all bin dev dist cov test test-internal cover lint ui static-assets tools proto-tools protoc-check +.PHONY: all bin dev dist cov test test-internal cover lint ui static-assets tools proto-tools .PHONY: docker-images go-build-image ui-build-image static-assets-docker consul-docker ui-docker -.PHONY: version proto test-envoy-integ +.PHONY: version test-envoy-integ diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh index eff35b09a..693a29de1 100755 --- a/build-support/scripts/proto-gen.sh +++ b/build-support/scripts/proto-gen.sh @@ -88,7 +88,6 @@ function main { local proto_go_path=${proto_path%%.proto}.pb.go local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go - local mog_input_path="$(dirname "${proto_path}")" local go_proto_out="paths=source_relative" if is_set "${grpc}" @@ -101,14 +100,14 @@ function main { go_proto_out="${go_proto_out}:" fi - rm -f "${proto_go_path}" ${proto_go_bin_path}" ${proto_go_rpcglue_path}" "${mog_input_path}/*.gen.go" + rm -f "${proto_go_path}" ${proto_go_bin_path}" ${proto_go_rpcglue_path}" # How we run protoc probably needs some documentation. # # This is the path to where # -I="${golang_proto_path}/protobuf" \ local -i ret=0 - status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path} ${mog_input_path}/*.gen.go" + status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path}" echo "debug_run ${protoc_bin} \ -I=\"${golang_proto_path}\" \ -I=\"${golang_proto_mod_path}\" \ @@ -139,14 +138,6 @@ function main { return 1 fi - debug_run mog -source ./${mog_input_path} -tags ${GOTAGS} -ignore-package-load-errors - - if test $? -ne 0 - then - err "Failed to generate mog outputs from ${mog_input_path}" - return 1 - fi - BUILD_TAGS=$(head -n 2 "${proto_path}" | grep '^//go:build\|// +build') if test -n "${BUILD_TAGS}" then From 8552efa955c35842fd3e02d51e0934618e50c664 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Thu, 31 Mar 2022 15:11:49 -0400 Subject: [PATCH 054/785] auto-reload configuration when config files change (#12329) * add config watcher to the config package * add logging to watcher * add test and refactor to add WatcherEvent. * add all API calls and fix a bug with recreated files * add tests for watcher * remove the unnecessary use of context * Add debug log and a test for file rename * use inode to detect if the file is recreated/replaced and only listen to create events. * tidy ups (#1535) * tidy ups * Add tests for inode reconcile * fix linux vs windows syscall * fix linux vs windows syscall * fix windows compile error * increase timeout * use ctime ID * remove remove/creation test as it's a use case that fail in linux * fix linux/windows to use Ino/CreationTime * fix the watcher to only overwrite current file id * fix linter error * fix remove/create test * set reconcile loop to 200 Milliseconds * fix watcher to not trigger event on remove, add more tests * on a remove event try to add the file back to the watcher and trigger the handler if success * fix race condition * fix flaky test * fix race conditions * set level to info * fix when file is removed and get an event for it after * fix to trigger handler when we get a remove but re-add fail * fix error message * add tests for directory watch and fixes * detect if a file is a symlink and return an error on Add * rename Watcher to FileWatcher and remove symlink deref * add fsnotify@v1.5.1 * fix go mod * do not reset timer on errors, rename OS specific files * rename New func * events trigger on write and rename * add missing test * fix flaking tests * fix flaky test * check reconcile when removed * delete invalid file * fix test to create files with different mod time. * back date file instead of sleeping * add watching file in agent command. * fix watcher call to use new API * add configuration and stop watcher when server stop * add certs as watched files * move FileWatcher to the agent start instead of the command code * stop watcher before replacing it * save watched files in agent * add add and remove interfaces to the file watcher * fix remove to not return an error * use `Add` and `Remove` to update certs files * fix tests * close events channel on the file watcher even when the context is done * extract `NotAutoReloadableRuntimeConfig` is a separate struct * fix linter errors * add Ca configs and outgoing verify to the not auto reloadable config * add some logs and fix to use background context * add tests to auto-config reload * remove stale test * add tests to changes to config files * add check to see if old cert files still trigger updates * rename `NotAutoReloadableRuntimeConfig` to `StaticRuntimeConfig` * fix to re add both key and cert file. Add test to cover this case. * review suggestion Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * add check to static runtime config changes * fix test * add changelog file * fix review comments * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * update flag description Co-authored-by: FFMMM * fix compilation error * add static runtime config support * fix test * fix review comments * fix log test * Update .changelog/12329.txt Co-authored-by: Dan Upton * transfer tests to runtime_test.go * fix filewatcher Replace to not deadlock. * avoid having lingering locks Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * split ReloadConfig func * fix warning message Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * convert `FileWatcher` into an interface * fix compilation errors * fix tests * extract func for adding and removing files Co-authored-by: Ashwin Venkatesh Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: FFMMM Co-authored-by: Daniel Upton --- .changelog/12329.txt | 3 + agent/agent.go | 117 +++++- agent/agent_test.go | 394 +++++++++++++++++- agent/config/builder.go | 13 +- agent/config/config.go | 1 + agent/config/file_watcher.go | 122 ++++-- agent/config/file_watcher_test.go | 103 ++++- agent/config/flags.go | 1 + agent/config/runtime.go | 35 +- agent/config/runtime_test.go | 54 ++- .../TestRuntimeConfig_Sanitize.golden | 7 +- agent/local/state.go | 4 +- agent/setup.go | 3 +- agent/structs/acl.go | 15 +- agent/structs/config_entry_intentions.go | 4 +- agent/structs/connect_ca.go | 4 +- agent/testagent.go | 14 +- command/agent/agent.go | 1 - lib/stringslice/stringslice.go | 9 + 19 files changed, 781 insertions(+), 123 deletions(-) create mode 100644 .changelog/12329.txt diff --git a/.changelog/12329.txt b/.changelog/12329.txt new file mode 100644 index 000000000..4960a9bfd --- /dev/null +++ b/.changelog/12329.txt @@ -0,0 +1,3 @@ +```release-note:feature +config: automatically reload config when a file changes using the `auto-reload-config` CLI flag or `auto_reload_config` config option. +``` diff --git a/agent/agent.go b/agent/agent.go index e103629c2..fef53c0f2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path/filepath" + "reflect" "regexp" "strconv" "strings" @@ -360,6 +361,10 @@ type Agent struct { // run by the Agent routineManager *routine.Manager + // FileWatcher is the watcher responsible to report events when a config file + // changed + FileWatcher config.Watcher + // xdsServer serves the XDS protocol for configuring Envoy proxies. xdsServer *xds.Server @@ -443,6 +448,21 @@ func New(bd BaseDeps) (*Agent, error) { // TODO: pass in a fully populated apiServers into Agent.New a.apiServers = NewAPIServers(a.logger) + for _, f := range []struct { + Cfg tlsutil.ProtocolConfig + }{ + {a.baseDeps.RuntimeConfig.TLS.InternalRPC}, + {a.baseDeps.RuntimeConfig.TLS.GRPC}, + {a.baseDeps.RuntimeConfig.TLS.HTTPS}, + } { + if f.Cfg.KeyFile != "" { + a.baseDeps.WatchedFiles = append(a.baseDeps.WatchedFiles, f.Cfg.KeyFile) + } + if f.Cfg.CertFile != "" { + a.baseDeps.WatchedFiles = append(a.baseDeps.WatchedFiles, f.Cfg.CertFile) + } + } + return &a, nil } @@ -692,6 +712,26 @@ func (a *Agent) Start(ctx context.Context) error { {Name: "pre_release", Value: a.config.VersionPrerelease}, }) + // start a go routine to reload config based on file watcher events + if a.baseDeps.RuntimeConfig.AutoReloadConfig && len(a.baseDeps.WatchedFiles) > 0 { + w, err := config.NewFileWatcher(a.baseDeps.WatchedFiles, a.baseDeps.Logger) + if err != nil { + a.baseDeps.Logger.Error("error loading config", "error", err) + } else { + a.FileWatcher = w + a.baseDeps.Logger.Debug("starting file watcher") + a.FileWatcher.Start(context.Background()) + go func() { + for event := range a.FileWatcher.EventsCh() { + a.baseDeps.Logger.Debug("auto-reload config triggered", "event-file", event.Filename) + err := a.AutoReloadConfig() + if err != nil { + a.baseDeps.Logger.Error("error loading config", "error", err) + } + } + }() + } + } return nil } @@ -1084,8 +1124,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co cfg.SerfWANConfig.MemberlistConfig.CIDRsAllowed = runtimeCfg.SerfAllowedCIDRsWAN cfg.SerfLANConfig.MemberlistConfig.AdvertiseAddr = runtimeCfg.SerfAdvertiseAddrLAN.IP.String() cfg.SerfLANConfig.MemberlistConfig.AdvertisePort = runtimeCfg.SerfAdvertiseAddrLAN.Port - cfg.SerfLANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.EncryptVerifyIncoming - cfg.SerfLANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.EncryptVerifyOutgoing + cfg.SerfLANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.StaticRuntimeConfig.EncryptVerifyIncoming + cfg.SerfLANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.StaticRuntimeConfig.EncryptVerifyOutgoing cfg.SerfLANConfig.MemberlistConfig.GossipInterval = runtimeCfg.GossipLANGossipInterval cfg.SerfLANConfig.MemberlistConfig.GossipNodes = runtimeCfg.GossipLANGossipNodes cfg.SerfLANConfig.MemberlistConfig.ProbeInterval = runtimeCfg.GossipLANProbeInterval @@ -1101,8 +1141,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co cfg.SerfWANConfig.MemberlistConfig.BindPort = runtimeCfg.SerfBindAddrWAN.Port cfg.SerfWANConfig.MemberlistConfig.AdvertiseAddr = runtimeCfg.SerfAdvertiseAddrWAN.IP.String() cfg.SerfWANConfig.MemberlistConfig.AdvertisePort = runtimeCfg.SerfAdvertiseAddrWAN.Port - cfg.SerfWANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.EncryptVerifyIncoming - cfg.SerfWANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.EncryptVerifyOutgoing + cfg.SerfWANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.StaticRuntimeConfig.EncryptVerifyIncoming + cfg.SerfWANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.StaticRuntimeConfig.EncryptVerifyOutgoing cfg.SerfWANConfig.MemberlistConfig.GossipInterval = runtimeCfg.GossipWANGossipInterval cfg.SerfWANConfig.MemberlistConfig.GossipNodes = runtimeCfg.GossipWANGossipNodes cfg.SerfWANConfig.MemberlistConfig.ProbeInterval = runtimeCfg.GossipWANProbeInterval @@ -1294,11 +1334,11 @@ func segmentConfig(config *config.RuntimeConfig) ([]consul.NetworkSegment, error if config.ReconnectTimeoutLAN != 0 { serfConf.ReconnectTimeout = config.ReconnectTimeoutLAN } - if config.EncryptVerifyIncoming { - serfConf.MemberlistConfig.GossipVerifyIncoming = config.EncryptVerifyIncoming + if config.StaticRuntimeConfig.EncryptVerifyIncoming { + serfConf.MemberlistConfig.GossipVerifyIncoming = config.StaticRuntimeConfig.EncryptVerifyIncoming } - if config.EncryptVerifyOutgoing { - serfConf.MemberlistConfig.GossipVerifyOutgoing = config.EncryptVerifyOutgoing + if config.StaticRuntimeConfig.EncryptVerifyOutgoing { + serfConf.MemberlistConfig.GossipVerifyOutgoing = config.StaticRuntimeConfig.EncryptVerifyOutgoing } var rpcAddr *net.TCPAddr @@ -1372,6 +1412,11 @@ func (a *Agent) ShutdownAgent() error { // Stop the watches to avoid any notification/state change during shutdown a.stopAllWatches() + // Stop config file watcher + if a.FileWatcher != nil { + a.FileWatcher.Stop() + } + a.stopLicenseManager() // this would be cancelled anyways (by the closing of the shutdown ch) but @@ -3694,10 +3739,18 @@ func (a *Agent) DisableNodeMaintenance() { a.logger.Info("Node left maintenance mode") } +func (a *Agent) AutoReloadConfig() error { + return a.reloadConfig(true) +} + +func (a *Agent) ReloadConfig() error { + return a.reloadConfig(false) +} + // ReloadConfig will atomically reload all configuration, including // all services, checks, tokens, metadata, dnsServer configs, etc. // It will also reload all ongoing watches. -func (a *Agent) ReloadConfig() error { +func (a *Agent) reloadConfig(autoReload bool) error { newCfg, err := a.baseDeps.AutoConfig.ReadConfig() if err != nil { return err @@ -3708,6 +3761,39 @@ func (a *Agent) ReloadConfig() error { // breaking some existing behavior. newCfg.NodeID = a.config.NodeID + //if auto reload is enabled, make sure we have the right certs file watched. + if autoReload { + for _, f := range []struct { + oldCfg tlsutil.ProtocolConfig + newCfg tlsutil.ProtocolConfig + }{ + {a.config.TLS.InternalRPC, newCfg.TLS.InternalRPC}, + {a.config.TLS.GRPC, newCfg.TLS.GRPC}, + {a.config.TLS.HTTPS, newCfg.TLS.HTTPS}, + } { + if f.oldCfg.KeyFile != f.newCfg.KeyFile { + a.FileWatcher.Replace(f.oldCfg.KeyFile, f.newCfg.KeyFile) + if err != nil { + return err + } + } + if f.oldCfg.CertFile != f.newCfg.CertFile { + a.FileWatcher.Replace(f.oldCfg.CertFile, f.newCfg.CertFile) + if err != nil { + return err + } + } + if revertStaticConfig(f.oldCfg, f.newCfg) { + a.logger.Warn("Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes.", "StaticRuntimeConfig", f.oldCfg, "StaticRuntimeConfig From file", f.newCfg) + } + } + if !reflect.DeepEqual(newCfg.StaticRuntimeConfig, a.config.StaticRuntimeConfig) { + a.logger.Warn("Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes.", "StaticRuntimeConfig", a.config.StaticRuntimeConfig, "StaticRuntimeConfig From file", newCfg.StaticRuntimeConfig) + // reset not reloadable fields + newCfg.StaticRuntimeConfig = a.config.StaticRuntimeConfig + } + } + // DEPRECATED: Warn users on reload if they're emitting deprecated metrics. Remove this warning and the flagged // metrics in a future release of Consul. if !a.config.Telemetry.DisableCompatOneNine { @@ -3717,6 +3803,19 @@ func (a *Agent) ReloadConfig() error { return a.reloadConfigInternal(newCfg) } +func revertStaticConfig(oldCfg tlsutil.ProtocolConfig, newCfg tlsutil.ProtocolConfig) bool { + newNewCfg := oldCfg + newNewCfg.CertFile = newCfg.CertFile + newNewCfg.KeyFile = newCfg.KeyFile + newOldcfg := newCfg + newOldcfg.CertFile = oldCfg.CertFile + newOldcfg.KeyFile = oldCfg.KeyFile + if !reflect.DeepEqual(newOldcfg, oldCfg) { + return true + } + return false +} + // reloadConfigInternal is mainly needed for some unit tests. Instead of parsing // the configuration using CLI flags and on disk config, this just takes a // runtime configuration and applies it. diff --git a/agent/agent_test.go b/agent/agent_test.go index 25708ace6..43d9bd31d 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -5328,9 +5328,395 @@ func uniqueAddrs(srvs []apiServer) map[string]struct{} { return result } -func runStep(t *testing.T, name string, fn func(t *testing.T)) { - t.Helper() - if !t.Run(name, fn) { - t.FailNow() +func TestAgent_AutoReloadDoReload_WhenCertAndKeyUpdated(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") } + + certsDir := testutil.TempDir(t, "auto-config") + + // write some test TLS certificates out to the cfg dir + serverName := "server.dc1.consul" + signer, _, err := tlsutil.GeneratePrivateKey() + require.NoError(t, err) + + ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + + cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + + certFile := filepath.Join(certsDir, "cert.pem") + caFile := filepath.Join(certsDir, "cacert.pem") + keyFile := filepath.Join(certsDir, "key.pem") + + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) + + // generate a gossip key + gossipKey := make([]byte, 32) + n, err := rand.Read(gossipKey) + require.NoError(t, err) + require.Equal(t, 32, n) + gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey) + + hclConfig := TestACLConfigWithParams(nil) + ` + encrypt = "` + gossipKeyEncoded + `" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "` + caFile + `" + cert_file = "` + certFile + `" + key_file = "` + keyFile + `" + connect { enabled = true } + auto_reload_config = true + ` + + srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig}) + defer srv.Shutdown() + + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + aeCert := srv.tlsConfigurator.Cert() + require.NotNil(t, aeCert) + + cert2, privateKey2, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert2), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey2), 0600)) + + retry.Run(t, func(r *retry.R) { + aeCert2 := srv.tlsConfigurator.Cert() + require.NotEqual(r, aeCert.Certificate, aeCert2.Certificate) + }) + +} + +func TestAgent_AutoReloadDoNotReload_WhenCaUpdated(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + certsDir := testutil.TempDir(t, "auto-config") + + // write some test TLS certificates out to the cfg dir + serverName := "server.dc1.consul" + signer, _, err := tlsutil.GeneratePrivateKey() + require.NoError(t, err) + + ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + + cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + + certFile := filepath.Join(certsDir, "cert.pem") + caFile := filepath.Join(certsDir, "cacert.pem") + keyFile := filepath.Join(certsDir, "key.pem") + + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) + + // generate a gossip key + gossipKey := make([]byte, 32) + n, err := rand.Read(gossipKey) + require.NoError(t, err) + require.Equal(t, 32, n) + gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey) + + hclConfig := TestACLConfigWithParams(nil) + ` + encrypt = "` + gossipKeyEncoded + `" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "` + caFile + `" + cert_file = "` + certFile + `" + key_file = "` + keyFile + `" + connect { enabled = true } + auto_reload_config = true + ` + + srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig}) + defer srv.Shutdown() + + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + aeCA := srv.tlsConfigurator.ManualCAPems() + require.NotNil(t, aeCA) + + ca2, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca2), 0600)) + + // wait a bit to see if it get updated. + time.Sleep(time.Second) + + aeCA2 := srv.tlsConfigurator.ManualCAPems() + require.NotNil(t, aeCA2) + require.Equal(t, aeCA, aeCA2) +} + +func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + certsDir := testutil.TempDir(t, "auto-config") + + // write some test TLS certificates out to the cfg dir + serverName := "server.dc1.consul" + signer, _, err := tlsutil.GeneratePrivateKey() + require.NoError(t, err) + + ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + + cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + + certFile := filepath.Join(certsDir, "cert.pem") + caFile := filepath.Join(certsDir, "cacert.pem") + keyFile := filepath.Join(certsDir, "key.pem") + + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) + + // generate a gossip key + gossipKey := make([]byte, 32) + n, err := rand.Read(gossipKey) + require.NoError(t, err) + require.Equal(t, 32, n) + gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey) + + hclConfig := TestACLConfigWithParams(nil) + + configFile := testutil.TempDir(t, "config") + "/config.hcl" + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFile+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}}) + defer srv.Shutdown() + + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + cert1 := srv.tlsConfigurator.Cert() + + certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + certFileNew := filepath.Join(certsDir, "cert_new.pem") + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFileNew+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + // cert should not change as we did not update the associated key + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) + + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + + // cert should change as we did not update the associated key + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) +} + +func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + certsDir := testutil.TempDir(t, "auto-config") + + // write some test TLS certificates out to the cfg dir + serverName := "server.dc1.consul" + signer, _, err := tlsutil.GeneratePrivateKey() + require.NoError(t, err) + + ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + + cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + + certFile := filepath.Join(certsDir, "cert.pem") + caFile := filepath.Join(certsDir, "cacert.pem") + keyFile := filepath.Join(certsDir, "key.pem") + + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) + + // generate a gossip key + gossipKey := make([]byte, 32) + n, err := rand.Read(gossipKey) + require.NoError(t, err) + require.Equal(t, 32, n) + gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey) + + hclConfig := TestACLConfigWithParams(nil) + + configFile := testutil.TempDir(t, "config") + "/config.hcl" + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFile+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}}) + defer srv.Shutdown() + + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + cert1 := srv.tlsConfigurator.Cert() + + certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + certFileNew := filepath.Join(certsDir, "cert_new.pem") + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + // cert should not change as we did not update the associated key + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) + + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFileNew+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + // cert should change as we did not update the associated key + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) + cert2 := srv.tlsConfigurator.Cert() + + certNew2, privateKeyNew2, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew2), 0600)) + // cert should not change as we did not update the associated cert + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.Equal(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.Equal(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) + + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew2), 0600)) + + // cert should change as we did update the associated key + time.Sleep(1 * time.Second) + retry.Run(t, func(r *retry.R) { + require.NotEqual(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate) + require.NotEqual(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + }) } diff --git a/agent/config/builder.go b/agent/config/builder.go index d5d3bbfb4..1762f3f6d 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -37,6 +37,7 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/lib/stringslice" libtempl "github.com/hashicorp/consul/lib/template" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/tlsutil" @@ -107,7 +108,8 @@ func Load(opts LoadOpts) (LoadResult, error) { if err := b.validate(cfg); err != nil { return r, err } - return LoadResult{RuntimeConfig: &cfg, Warnings: b.Warnings}, nil + watcherFiles := stringslice.CloneStringSlice(opts.ConfigFiles) + return LoadResult{RuntimeConfig: &cfg, Warnings: b.Warnings, WatchedFiles: watcherFiles}, nil } // LoadResult is the result returned from Load. The caller is responsible for @@ -115,6 +117,7 @@ func Load(opts LoadOpts) (LoadResult, error) { type LoadResult struct { RuntimeConfig *RuntimeConfig Warnings []string + WatchedFiles []string } // builder constructs and validates a runtime configuration from multiple @@ -938,6 +941,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) { c.Cache.EntryFetchMaxBurst, cache.DefaultEntryFetchMaxBurst, ), }, + AutoReloadConfig: boolVal(c.AutoReloadConfig), CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), CheckOutputMaxSize: intValWithDefault(c.CheckOutputMaxSize, 4096), Checks: checks, @@ -978,8 +982,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) { EnableRemoteScriptChecks: enableRemoteScriptChecks, EnableLocalScriptChecks: enableLocalScriptChecks, EncryptKey: stringVal(c.EncryptKey), - EncryptVerifyIncoming: boolVal(c.EncryptVerifyIncoming), - EncryptVerifyOutgoing: boolVal(c.EncryptVerifyOutgoing), GRPCPort: grpcPort, GRPCAddrs: grpcAddrs, HTTPMaxConnsPerClient: intVal(c.Limits.HTTPMaxConnsPerClient), @@ -987,6 +989,11 @@ func (b *builder) build() (rt RuntimeConfig, err error) { KVMaxValueSize: uint64Val(c.Limits.KVMaxValueSize), LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime), LeaveOnTerm: leaveOnTerm, + StaticRuntimeConfig: StaticRuntimeConfig{ + EncryptVerifyIncoming: boolVal(c.EncryptVerifyIncoming), + EncryptVerifyOutgoing: boolVal(c.EncryptVerifyOutgoing), + }, + Logging: logging.Config{ LogLevel: stringVal(c.LogLevel), LogJSON: boolVal(c.LogJSON), diff --git a/agent/config/config.go b/agent/config/config.go index e8caa74b7..42d43f1d9 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -210,6 +210,7 @@ type Config struct { ReconnectTimeoutLAN *string `mapstructure:"reconnect_timeout"` ReconnectTimeoutWAN *string `mapstructure:"reconnect_timeout_wan"` RejoinAfterLeave *bool `mapstructure:"rejoin_after_leave"` + AutoReloadConfig *bool `mapstructure:"auto_reload_config"` RetryJoinIntervalLAN *string `mapstructure:"retry_interval"` RetryJoinIntervalWAN *string `mapstructure:"retry_interval_wan"` RetryJoinLAN []string `mapstructure:"retry_join"` diff --git a/agent/config/file_watcher.go b/agent/config/file_watcher.go index 1e35e7080..d85abca4b 100644 --- a/agent/config/file_watcher.go +++ b/agent/config/file_watcher.go @@ -14,19 +14,29 @@ import ( const timeoutDuration = 200 * time.Millisecond -type FileWatcher struct { +type Watcher interface { + Start(ctx context.Context) + Stop() error + Add(filename string) error + Remove(filename string) + Replace(oldFile, newFile string) error + EventsCh() chan *FileWatcherEvent +} + +type fileWatcher struct { watcher *fsnotify.Watcher configFiles map[string]*watchedFile + configFilesLock sync.RWMutex logger hclog.Logger reconcileTimeout time.Duration cancel context.CancelFunc done chan interface{} stopOnce sync.Once - //EventsCh Channel where an event will be emitted when a file change is detected + //eventsCh Channel where an event will be emitted when a file change is detected // a call to Start is needed before any event is emitted // after a Call to Stop succeed, the channel will be closed - EventsCh chan *FileWatcherEvent + eventsCh chan *FileWatcherEvent } type watchedFile struct { @@ -38,24 +48,23 @@ type FileWatcherEvent struct { } //NewFileWatcher create a file watcher that will watch all the files/folders from configFiles -// if success a FileWatcher will be returned and a nil error -// otherwise an error and a nil FileWatcher are returned -func NewFileWatcher(configFiles []string, logger hclog.Logger) (*FileWatcher, error) { +// if success a fileWatcher will be returned and a nil error +// otherwise an error and a nil fileWatcher are returned +func NewFileWatcher(configFiles []string, logger hclog.Logger) (Watcher, error) { ws, err := fsnotify.NewWatcher() if err != nil { return nil, err } - w := &FileWatcher{ + w := &fileWatcher{ watcher: ws, logger: logger.Named("file-watcher"), configFiles: make(map[string]*watchedFile), - EventsCh: make(chan *FileWatcherEvent), + eventsCh: make(chan *FileWatcherEvent), reconcileTimeout: timeoutDuration, done: make(chan interface{}), - stopOnce: sync.Once{}, } for _, f := range configFiles { - err = w.add(f) + err = w.Add(f) if err != nil { return nil, fmt.Errorf("error adding file %q: %w", f, err) } @@ -66,7 +75,7 @@ func NewFileWatcher(configFiles []string, logger hclog.Logger) (*FileWatcher, er // Start start a file watcher, with a copy of the passed context. // calling Start multiple times is a noop -func (w *FileWatcher) Start(ctx context.Context) { +func (w *fileWatcher) Start(ctx context.Context) { if w.cancel == nil { cancelCtx, cancel := context.WithCancel(ctx) w.cancel = cancel @@ -76,21 +85,19 @@ func (w *FileWatcher) Start(ctx context.Context) { // Stop the file watcher // calling Stop multiple times is a noop, Stop must be called after a Start -func (w *FileWatcher) Stop() error { +func (w *fileWatcher) Stop() error { var err error w.stopOnce.Do(func() { w.cancel() <-w.done - close(w.EventsCh) err = w.watcher.Close() }) return err } -func (w *FileWatcher) add(filename string) error { - if isSymLink(filename) { - return fmt.Errorf("symbolic links are not supported %s", filename) - } +// Add a file to the file watcher +// Add will lock the file watcher during the add +func (w *fileWatcher) Add(filename string) error { filename = filepath.Clean(filename) w.logger.Trace("adding file", "file", filename) if err := w.watcher.Add(filename); err != nil { @@ -100,25 +107,63 @@ func (w *FileWatcher) add(filename string) error { if err != nil { return err } - w.configFiles[filename] = &watchedFile{modTime: modTime} + w.addFile(filename, modTime) return nil } -func isSymLink(filename string) bool { - fi, err := os.Lstat(filename) - if err != nil { - return false - } - if fi.Mode()&os.ModeSymlink != 0 { - return true - } - return false +// Remove a file from the file watcher +// Remove will lock the file watcher during the remove +func (w *fileWatcher) Remove(filename string) { + w.removeFile(filename) } -func (w *FileWatcher) watch(ctx context.Context) { +// Replace a file in the file watcher +// Replace will lock the file watcher during the replace +func (w *fileWatcher) Replace(oldFile, newFile string) error { + if oldFile == newFile { + return nil + } + newFile = filepath.Clean(newFile) + w.logger.Trace("adding file", "file", newFile) + if err := w.watcher.Add(newFile); err != nil { + return err + } + modTime, err := w.getFileModifiedTime(newFile) + if err != nil { + return err + } + w.replaceFile(oldFile, newFile, modTime) + return nil +} + +func (w *fileWatcher) replaceFile(oldFile, newFile string, modTime time.Time) { + w.configFilesLock.Lock() + defer w.configFilesLock.Unlock() + delete(w.configFiles, oldFile) + w.configFiles[newFile] = &watchedFile{modTime: modTime} +} + +func (w *fileWatcher) addFile(filename string, modTime time.Time) { + w.configFilesLock.Lock() + defer w.configFilesLock.Unlock() + w.configFiles[filename] = &watchedFile{modTime: modTime} +} + +func (w *fileWatcher) removeFile(filename string) { + w.configFilesLock.Lock() + defer w.configFilesLock.Unlock() + delete(w.configFiles, filename) +} + +func (w *fileWatcher) EventsCh() chan *FileWatcherEvent { + return w.eventsCh +} + +func (w *fileWatcher) watch(ctx context.Context) { ticker := time.NewTicker(w.reconcileTimeout) defer ticker.Stop() defer close(w.done) + defer close(w.eventsCh) for { select { @@ -144,7 +189,7 @@ func (w *FileWatcher) watch(ctx context.Context) { } } -func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) error { +func (w *fileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) error { w.logger.Trace("event received ", "filename", event.Name, "OP", event.Op) // we only want Create and Remove events to avoid triggering a reload on file modification if !isCreateEvent(event) && !isRemoveEvent(event) && !isWriteEvent(event) && !isRenameEvent(event) { @@ -168,7 +213,7 @@ func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) err if isCreateEvent(event) || isWriteEvent(event) || isRenameEvent(event) { w.logger.Trace("call the handler", "filename", event.Name, "OP", event.Op) select { - case w.EventsCh <- &FileWatcherEvent{Filename: filename}: + case w.eventsCh <- &FileWatcherEvent{Filename: filename}: case <-ctx.Done(): return ctx.Err() } @@ -177,9 +222,11 @@ func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) err return nil } -func (w *FileWatcher) isWatched(filename string) (*watchedFile, string, bool) { +func (w *fileWatcher) isWatched(filename string) (*watchedFile, string, bool) { path := filename + w.configFilesLock.RLock() configFile, ok := w.configFiles[path] + w.configFilesLock.RUnlock() if ok { return configFile, path, true } @@ -192,14 +239,17 @@ func (w *FileWatcher) isWatched(filename string) (*watchedFile, string, bool) { // try to see if the watched path is the parent dir newPath := filepath.Dir(path) w.logger.Trace("get dir", "dir", newPath) + w.configFilesLock.RLock() configFile, ok = w.configFiles[newPath] + w.configFilesLock.RUnlock() } return configFile, path, ok } -func (w *FileWatcher) reconcile(ctx context.Context) { +func (w *fileWatcher) reconcile(ctx context.Context) { + w.configFilesLock.Lock() + defer w.configFilesLock.Unlock() for filename, configFile := range w.configFiles { - w.logger.Trace("reconciling", "filename", filename) newModTime, err := w.getFileModifiedTime(filename) if err != nil { w.logger.Error("failed to get file modTime", "file", filename, "err", err) @@ -213,9 +263,9 @@ func (w *FileWatcher) reconcile(ctx context.Context) { } if !configFile.modTime.Equal(newModTime) { w.logger.Trace("call the handler", "filename", filename, "old modTime", configFile.modTime, "new modTime", newModTime) - w.configFiles[filename].modTime = newModTime + configFile.modTime = newModTime select { - case w.EventsCh <- &FileWatcherEvent{Filename: filename}: + case w.eventsCh <- &FileWatcherEvent{Filename: filename}: case <-ctx.Done(): return } @@ -239,7 +289,7 @@ func isRenameEvent(event fsnotify.Event) bool { return event.Op&fsnotify.Rename == fsnotify.Rename } -func (w *FileWatcher) getFileModifiedTime(filename string) (time.Time, error) { +func (w *fileWatcher) getFileModifiedTime(filename string) (time.Time, error) { fileInfo, err := os.Stat(filename) if err != nil { return time.Time{}, err diff --git a/agent/config/file_watcher_test.go b/agent/config/file_watcher_test.go index 68689e708..064729c53 100644 --- a/agent/config/file_watcher_test.go +++ b/agent/config/file_watcher_test.go @@ -27,7 +27,9 @@ func TestWatcherRenameEvent(t *testing.T) { fileTmp := createTempConfigFile(t, "temp_config3") filepaths := []string{createTempConfigFile(t, "temp_config1"), createTempConfigFile(t, "temp_config2")} - w, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + w := wi.(*fileWatcher) + require.NoError(t, err) w.Start(context.Background()) defer func() { @@ -36,10 +38,66 @@ func TestWatcherRenameEvent(t *testing.T) { require.NoError(t, err) err = os.Rename(fileTmp, filepaths[0]) + time.Sleep(w.reconcileTimeout + 50*time.Millisecond) require.NoError(t, err) - require.NoError(t, assertEvent(filepaths[0], w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepaths[0], w.eventsCh, defaultTimeout)) // make sure we consume all events - assertEvent(filepaths[0], w.EventsCh, defaultTimeout) + _ = assertEvent(filepaths[0], w.eventsCh, defaultTimeout) +} + +func TestWatcherAddRemove(t *testing.T) { + var filepaths []string + wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + w := wi.(*fileWatcher) + require.NoError(t, err) + file1 := createTempConfigFile(t, "temp_config1") + err = w.Add(file1) + require.NoError(t, err) + file2 := createTempConfigFile(t, "temp_config2") + err = w.Add(file2) + require.NoError(t, err) + w.Remove(file2) + _, ok := w.configFiles[file1] + require.True(t, ok) + _, ok = w.configFiles[file2] + require.False(t, ok) + +} + +func TestWatcherAddWhileRunning(t *testing.T) { + var filepaths []string + wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + w := wi.(*fileWatcher) + require.NoError(t, err) + w.Start(context.Background()) + defer func() { + _ = w.Stop() + }() + file1 := createTempConfigFile(t, "temp_config1") + err = w.Add(file1) + require.NoError(t, err) + file2 := createTempConfigFile(t, "temp_config2") + err = w.Add(file2) + require.NoError(t, err) + w.Remove(file2) + require.Len(t, w.configFiles, 1) + _, ok := w.configFiles[file1] + require.True(t, ok) + _, ok = w.configFiles[file2] + require.False(t, ok) +} + +func TestWatcherRemoveNotFound(t *testing.T) { + var filepaths []string + w, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + require.NoError(t, err) + w.Start(context.Background()) + defer func() { + _ = w.Stop() + }() + + file := createTempConfigFile(t, "temp_config2") + w.Remove(file) } func TestWatcherAddNotExist(t *testing.T) { @@ -69,7 +127,7 @@ func TestEventWatcherWrite(t *testing.T) { require.NoError(t, err) err = file.Sync() require.NoError(t, err) - require.NoError(t, assertEvent(file.Name(), w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(file.Name(), w.EventsCh(), defaultTimeout)) } func TestEventWatcherRead(t *testing.T) { @@ -84,7 +142,7 @@ func TestEventWatcherRead(t *testing.T) { _, err = os.ReadFile(filepath) require.NoError(t, err) - require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event") + require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event") } func TestEventWatcherChmod(t *testing.T) { @@ -107,7 +165,7 @@ func TestEventWatcherChmod(t *testing.T) { err = file.Chmod(0777) require.NoError(t, err) - require.Error(t, assertEvent(file.Name(), w.EventsCh, defaultTimeout), "timedout waiting for event") + require.Error(t, assertEvent(file.Name(), w.EventsCh(), defaultTimeout), "timedout waiting for event") } func TestEventWatcherRemoveCreate(t *testing.T) { @@ -130,7 +188,7 @@ func TestEventWatcherRemoveCreate(t *testing.T) { err = recreated.Sync() require.NoError(t, err) // this an event coming from the reconcile loop - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) } func TestEventWatcherMove(t *testing.T) { @@ -147,8 +205,9 @@ func TestEventWatcherMove(t *testing.T) { for i := 0; i < 10; i++ { filepath2 := createTempConfigFile(t, "temp_config2") err = os.Rename(filepath2, filepath) + time.Sleep(timeoutDuration + 50*time.Millisecond) require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) } } @@ -157,7 +216,8 @@ func TestEventReconcileMove(t *testing.T) { filepath2 := createTempConfigFile(t, "temp_config2") err := os.Chtimes(filepath, time.Now(), time.Now().Add(-1*time.Second)) require.NoError(t, err) - w, err := NewFileWatcher([]string{filepath}, hclog.New(&hclog.LoggerOptions{})) + wi, err := NewFileWatcher([]string{filepath}, hclog.New(&hclog.LoggerOptions{})) + w := wi.(*fileWatcher) require.NoError(t, err) w.Start(context.Background()) defer func() { @@ -169,8 +229,9 @@ func TestEventReconcileMove(t *testing.T) { require.NoError(t, err) err = os.Rename(filepath2, filepath) + time.Sleep(timeoutDuration + 50*time.Millisecond) require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, 2000*time.Millisecond)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), 2000*time.Millisecond)) } func TestEventWatcherDirCreateRemove(t *testing.T) { @@ -187,11 +248,11 @@ func TestEventWatcherDirCreateRemove(t *testing.T) { require.NoError(t, err) err = file.Close() require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) err = os.Remove(name) require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) } } @@ -212,9 +273,9 @@ func TestEventWatcherDirMove(t *testing.T) { for i := 0; i < 100; i++ { filepathTmp := createTempConfigFile(t, "temp_config2") - os.Rename(filepathTmp, name) + err = os.Rename(filepathTmp, name) require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) } } @@ -235,9 +296,9 @@ func TestEventWatcherDirMoveTrim(t *testing.T) { for i := 0; i < 100; i++ { filepathTmp := createTempConfigFile(t, "temp_config2") - os.Rename(filepathTmp, name) + err = os.Rename(filepathTmp, name) require.NoError(t, err) - require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout)) + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) } } @@ -260,9 +321,9 @@ func TestEventWatcherSubDirMove(t *testing.T) { for i := 0; i < 2; i++ { filepathTmp := createTempConfigFile(t, "temp_config2") - os.Rename(filepathTmp, name) + err = os.Rename(filepathTmp, name) require.NoError(t, err) - require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event") + require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event") } } @@ -283,7 +344,7 @@ func TestEventWatcherDirRead(t *testing.T) { _, err = os.ReadFile(name) require.NoError(t, err) - require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event") + require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event") } func TestEventWatcherMoveSoftLink(t *testing.T) { @@ -295,8 +356,8 @@ func TestEventWatcherMoveSoftLink(t *testing.T) { require.NoError(t, err) w, err := NewFileWatcher([]string{name}, hclog.New(&hclog.LoggerOptions{})) - require.Error(t, err, "symbolic link are not supported") - require.Nil(t, w) + require.NoError(t, err) + require.NotNil(t, w) } diff --git a/agent/config/flags.go b/agent/config/flags.go index 00deebe1b..b2e3c35ba 100644 --- a/agent/config/flags.go +++ b/agent/config/flags.go @@ -76,6 +76,7 @@ func AddFlags(fs *flag.FlagSet, f *LoadOpts) { add(&f.FlagValues.DNSRecursors, "recursor", "Address of an upstream DNS server. Can be specified multiple times.") add(&f.FlagValues.PrimaryGateways, "primary-gateway", "Address of a mesh gateway in the primary datacenter to use to bootstrap WAN federation at start time with retries enabled. Can be specified multiple times.") add(&f.FlagValues.RejoinAfterLeave, "rejoin", "Ignores a previous leave and attempts to rejoin the cluster.") + add(&f.FlagValues.AutoReloadConfig, "auto-reload-config", "Watches config files for changes and auto reloads the files when modified.") add(&f.FlagValues.RetryJoinIntervalLAN, "retry-interval", "Time to wait between join attempts.") add(&f.FlagValues.RetryJoinIntervalWAN, "retry-interval-wan", "Time to wait between join -wan attempts.") add(&f.FlagValues.RetryJoinLAN, "retry-join", "Address of an agent to join at start time with retries enabled. Can be specified multiple times.") diff --git a/agent/config/runtime.go b/agent/config/runtime.go index af3dd51e1..99c51f335 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -29,6 +29,22 @@ type RuntimeSOAConfig struct { Minttl uint32 // 0, } +// StaticRuntimeConfig specifies the subset of configuration the consul agent actually +// uses and that are not reloadable by configuration auto reload. +type StaticRuntimeConfig struct { + // EncryptVerifyIncoming enforces incoming gossip encryption and can be + // used to upshift to encrypted gossip on a running cluster. + // + // hcl: encrypt_verify_incoming = (true|false) + EncryptVerifyIncoming bool + + // EncryptVerifyOutgoing enforces outgoing gossip encryption and can be + // used to upshift to encrypted gossip on a running cluster. + // + // hcl: encrypt_verify_outgoing = (true|false) + EncryptVerifyOutgoing bool +} + // RuntimeConfig specifies the configuration the consul agent actually // uses. Is is derived from one or more Config structures which can come // from files, flags and/or environment variables. @@ -651,18 +667,6 @@ type RuntimeConfig struct { // flag: -encrypt string EncryptKey string - // EncryptVerifyIncoming enforces incoming gossip encryption and can be - // used to upshift to encrypted gossip on a running cluster. - // - // hcl: encrypt_verify_incoming = (true|false) - EncryptVerifyIncoming bool - - // EncryptVerifyOutgoing enforces outgoing gossip encryption and can be - // used to upshift to encrypted gossip on a running cluster. - // - // hcl: encrypt_verify_outgoing = (true|false) - EncryptVerifyOutgoing bool - // GRPCPort is the port the gRPC server listens on. Currently this only // exposes the xDS and ext_authz APIs for Envoy and it is disabled by default. // @@ -1298,6 +1302,11 @@ type RuntimeConfig struct { // hcl: skip_leave_on_interrupt = (true|false) SkipLeaveOnInt bool + // AutoReloadConfig indicate if the config will be + //auto reloaded bases on config file modification + // hcl: auto_reload_config = (true|false) + AutoReloadConfig bool + // StartJoinAddrsLAN is a list of addresses to attempt to join -lan when the // agent starts. If Serf is unable to communicate with any of these // addresses, then the agent will error and exit. @@ -1374,6 +1383,8 @@ type RuntimeConfig struct { // hcl: unix_sockets { user = string } UnixSocketUser string + StaticRuntimeConfig StaticRuntimeConfig + // Watches are used to monitor various endpoints and to invoke a // handler to act appropriately. These are managed entirely in the // agent layer using the standard APIs. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index ab0798342..408241e40 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -906,6 +906,18 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, }) + run(t, testCase{ + desc: "-datacenter empty", + args: []string{ + `-auto-reload-config`, + `-data-dir=` + dataDir, + }, + expected: func(rt *RuntimeConfig) { + rt.AutoReloadConfig = true + rt.DataDir = dataDir + }, + }) + // ------------------------------------------------------------ // ports and addresses // @@ -5906,24 +5918,27 @@ func TestLoad_FullConfig(t *testing.T) { EnableRemoteScriptChecks: true, EnableLocalScriptChecks: true, EncryptKey: "A4wELWqH", - EncryptVerifyIncoming: true, - EncryptVerifyOutgoing: true, - GRPCPort: 4881, - GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")}, - HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")}, - HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"}, - AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")}, - HTTPPort: 7999, - HTTPResponseHeaders: map[string]string{"M6TKa9NP": "xjuxjOzQ", "JRCrHZed": "rl0mTx81"}, - HTTPSAddrs: []net.Addr{tcpAddr("95.17.17.19:15127")}, - HTTPMaxConnsPerClient: 100, - HTTPMaxHeaderBytes: 10, - HTTPSHandshakeTimeout: 2391 * time.Millisecond, - HTTPSPort: 15127, - HTTPUseCache: false, - KVMaxValueSize: 1234567800, - LeaveDrainTime: 8265 * time.Second, - LeaveOnTerm: true, + StaticRuntimeConfig: StaticRuntimeConfig{ + EncryptVerifyIncoming: true, + EncryptVerifyOutgoing: true, + }, + + GRPCPort: 4881, + GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")}, + HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")}, + HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"}, + AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")}, + HTTPPort: 7999, + HTTPResponseHeaders: map[string]string{"M6TKa9NP": "xjuxjOzQ", "JRCrHZed": "rl0mTx81"}, + HTTPSAddrs: []net.Addr{tcpAddr("95.17.17.19:15127")}, + HTTPMaxConnsPerClient: 100, + HTTPMaxHeaderBytes: 10, + HTTPSHandshakeTimeout: 2391 * time.Millisecond, + HTTPSPort: 15127, + HTTPUseCache: false, + KVMaxValueSize: 1234567800, + LeaveDrainTime: 8265 * time.Second, + LeaveOnTerm: true, Logging: logging.Config{ LogLevel: "k1zo9Spt", LogJSON: true, @@ -6760,7 +6775,8 @@ func TestRuntime_APIConfigHTTP(t *testing.T) { &net.UnixAddr{Name: "/var/run/foo"}, &net.TCPAddr{IP: net.ParseIP("198.18.0.1"), Port: 5678}, }, - Datacenter: "dc-test", + Datacenter: "dc-test", + StaticRuntimeConfig: StaticRuntimeConfig{}, } cfg, err := rt.APIConfig(false) diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index a8e2f46ee..5356761e4 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -63,6 +63,7 @@ "AutoEncryptDNSSAN": [], "AutoEncryptIPSAN": [], "AutoEncryptTLS": false, + "AutoReloadConfig": false, "AutopilotCleanupDeadServers": false, "AutopilotDisableUpgradeMigration": false, "AutopilotLastContactThreshold": "0s", @@ -182,8 +183,6 @@ "EnableLocalScriptChecks": false, "EnableRemoteScriptChecks": false, "EncryptKey": "hidden", - "EncryptVerifyIncoming": false, - "EncryptVerifyOutgoing": false, "EnterpriseRuntimeConfig": {}, "ExposeMaxPort": 0, "ExposeMinPort": 0, @@ -348,6 +347,10 @@ "SkipLeaveOnInt": false, "StartJoinAddrsLAN": [], "StartJoinAddrsWAN": [], + "StaticRuntimeConfig": { + "EncryptVerifyIncoming": false, + "EncryptVerifyOutgoing": false + }, "SyncCoordinateIntervalMin": "0s", "SyncCoordinateRateTarget": 0, "TLS": { diff --git a/agent/local/state.go b/agent/local/state.go index 8427068d7..e0bc8ae11 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" "github.com/hashicorp/go-hclog" @@ -1104,7 +1106,7 @@ func (l *State) updateSyncState() error { // copy so that we don't retain a pointer to any actual state // store info for in-memory RPCs. if nextService.EnableTagOverride { - nextService.Tags = structs.CloneStringSlice(rs.Tags) + nextService.Tags = stringslice.CloneStringSlice(rs.Tags) changed = true } diff --git a/agent/setup.go b/agent/setup.go index bf67c0360..0799c472a 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -45,6 +45,7 @@ type BaseDeps struct { AutoConfig *autoconf.AutoConfig // TODO: use an interface Cache *cache.Cache ViewStore *submatview.Store + WatchedFiles []string } // MetricsHandler provides an http.Handler for displaying metrics. @@ -61,7 +62,7 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) if err != nil { return d, err } - + d.WatchedFiles = result.WatchedFiles cfg := result.RuntimeConfig logConf := cfg.Logging logConf.Name = logging.Agent diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 42fa55821..25165c105 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -11,6 +11,8 @@ import ( "strings" "time" + "github.com/hashicorp/consul/lib/stringslice" + "golang.org/x/crypto/blake2b" "github.com/hashicorp/consul/acl" @@ -128,7 +130,7 @@ type ACLServiceIdentity struct { func (s *ACLServiceIdentity) Clone() *ACLServiceIdentity { s2 := *s - s2.Datacenters = CloneStringSlice(s.Datacenters) + s2.Datacenters = stringslice.CloneStringSlice(s.Datacenters) return &s2 } @@ -606,7 +608,7 @@ func (t *ACLPolicy) UnmarshalJSON(data []byte) error { func (p *ACLPolicy) Clone() *ACLPolicy { p2 := *p - p2.Datacenters = CloneStringSlice(p.Datacenters) + p2.Datacenters = stringslice.CloneStringSlice(p.Datacenters) return &p2 } @@ -1415,15 +1417,6 @@ type ACLPolicyBatchDeleteRequest struct { PolicyIDs []string } -func CloneStringSlice(s []string) []string { - if len(s) == 0 { - return nil - } - out := make([]string, len(s)) - copy(out, s) - return out -} - // ACLRoleSetRequest is used at the RPC layer for creation and update requests type ACLRoleSetRequest struct { Role ACLRole // The role to upsert diff --git a/agent/structs/config_entry_intentions.go b/agent/structs/config_entry_intentions.go index c77683319..8829c2178 100644 --- a/agent/structs/config_entry_intentions.go +++ b/agent/structs/config_entry_intentions.go @@ -6,6 +6,8 @@ import ( "strings" "time" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul/acl" ) @@ -303,7 +305,7 @@ func (p *IntentionHTTPPermission) Clone() *IntentionHTTPPermission { } } - p2.Methods = CloneStringSlice(p.Methods) + p2.Methods = stringslice.CloneStringSlice(p.Methods) return &p2 } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 9d3f00d1c..ca08506e8 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -5,6 +5,8 @@ import ( "reflect" "time" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/lib" @@ -156,7 +158,7 @@ func (c *CARoot) Clone() *CARoot { } newCopy := *c - newCopy.IntermediateCerts = CloneStringSlice(c.IntermediateCerts) + newCopy.IntermediateCerts = stringslice.CloneStringSlice(c.IntermediateCerts) return &newCopy } diff --git a/agent/testagent.go b/agent/testagent.go index fd35eb712..3910a78d9 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -43,7 +43,8 @@ type TestAgent struct { // Name is an optional name of the agent. Name string - HCL string + configFiles []string + HCL string // Config is the agent configuration. If Config is nil then // TestConfig() is used. If Config.DataDir is set then it is @@ -94,6 +95,16 @@ func NewTestAgent(t *testing.T, hcl string) *TestAgent { return a } +// NewTestAgent returns a started agent with the given configuration. It fails +// the test if the Agent could not be started. +// The caller is responsible for calling Shutdown() to stop the agent and remove +// temporary directories. +func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string) *TestAgent { + a := StartTestAgent(t, TestAgent{configFiles: configFiles, HCL: hcl}) + t.Cleanup(func() { a.Shutdown() }) + return a +} + // StartTestAgent and wait for it to become available. If the agent fails to // start the test will be marked failed and execution will stop. // @@ -186,6 +197,7 @@ func (a *TestAgent) Start(t *testing.T) error { config.DefaultConsulSource(), config.DevConsulSource(), }, + ConfigFiles: a.configFiles, } result, err := config.Load(opts) if result.RuntimeConfig != nil { diff --git a/command/agent/agent.go b/command/agent/agent.go index 6a8d042c3..2167ba63b 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -172,7 +172,6 @@ func (c *cmd) run(args []string) int { ui.Error(err.Error()) return 1 } - c.logger = bd.Logger agent, err := agent.New(bd) if err != nil { diff --git a/lib/stringslice/stringslice.go b/lib/stringslice/stringslice.go index eea77def7..aadf8a551 100644 --- a/lib/stringslice/stringslice.go +++ b/lib/stringslice/stringslice.go @@ -68,3 +68,12 @@ func MergeSorted(a, b []string) []string { } return out } + +func CloneStringSlice(s []string) []string { + if len(s) == 0 { + return nil + } + out := make([]string, len(s)) + copy(out, s) + return out +} From 4974d8471bb9be3c2056e71d187a1ecc61f2d820 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 12:18:40 -0700 Subject: [PATCH 055/785] Log a warning when a terminating gateway service has TLS but not SNI configured --- agent/consul/config_endpoint.go | 8 ++++++++ agent/structs/config_entry.go | 8 ++++++++ agent/structs/config_entry_gateways.go | 16 ++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index e87e9eced..2e72f992e 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -89,6 +89,14 @@ func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error return err } + // Log any applicable warnings about the contents of the config entry. + if warnEntry, ok := args.Entry.(structs.WarningConfigEntry); ok { + warnings := warnEntry.Warnings() + for _, warning := range warnings { + c.logger.Warn(warning) + } + } + if err := args.Entry.CanWrite(authz); err != nil { return err } diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 7222a1ec6..09e05fa4c 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -82,6 +82,14 @@ type UpdatableConfigEntry interface { ConfigEntry } +// WarningConfigEntry is an optional interface implemented by a ConfigEntry +// if it wants to be able to emit warnings when it is being upserted. +type WarningConfigEntry interface { + Warnings() []string + + ConfigEntry +} + // ServiceConfiguration is the top-level struct for the configuration of a service // across the entire cluster. type ServiceConfigEntry struct { diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index 94014230d..fc9c840a0 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -570,6 +570,22 @@ func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { return &e.EnterpriseMeta } +func (e *TerminatingGatewayConfigEntry) Warnings() []string { + if e == nil { + return nil + } + + warnings := make([]string, 0) + for _, svc := range e.Services { + if (svc.CAFile != "" || svc.CertFile != "" || svc.KeyFile != "") && svc.SNI == "" { + warning := fmt.Sprintf("TLS is configured but SNI is not set for service %q. Enabling SNI is strongly recommended when using TLS.", svc.Name) + warnings = append(warnings, warning) + } + } + + return warnings +} + // GatewayService is used to associate gateways with their linked services. type GatewayService struct { Gateway ServiceName From cc3c39b920ab17b7cb49d0d6cf468443d561873f Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 12:19:16 -0700 Subject: [PATCH 056/785] Recommend SNI with TLS in the terminating gateway docs --- .../docs/connect/config-entries/terminating-gateway.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx index 8da3b20e6..0c6a4bf56 100644 --- a/website/content/docs/connect/config-entries/terminating-gateway.mdx +++ b/website/content/docs/connect/config-entries/terminating-gateway.mdx @@ -30,6 +30,9 @@ from the terminating gateway will be encrypted using one-way TLS authentication. and [private key](/docs/connect/config-entries/terminating-gateway#keyfile) are also specified connections from the terminating gateway will be encrypted using mutual TLS authentication. +~> Setting the `SNI` field is strongly recommended when enabling TLS to a service. If this field is not set, +Consul will not attempt to verify the Subject Alternative Name fields in the service's certificate. + If none of these are provided, Consul will **only** encrypt connections to the gateway and not from the gateway to the destination service. From 8dd4e609c13bce392d22214005a57b6485a0ae3a Mon Sep 17 00:00:00 2001 From: FFMMM Date: Thu, 31 Mar 2022 13:04:33 -0700 Subject: [PATCH 057/785] docs: new rpc metric (#12608) --- website/content/docs/agent/telemetry.mdx | 120 ++++++++++++++++------- 1 file changed, 83 insertions(+), 37 deletions(-) diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index e17b75396..4f4ef8983 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -289,7 +289,7 @@ performance degradations related to Bolt DB, these metrics will show the issue a **What to look for:** -The primary thing to look for are increases in the `consul.raft.boltdb.storeLogs` times. Its value will directly govern an +The primary thing to look for are increases in the `consul.raft.boltdb.storeLogs` times. Its value will directly govern an upper limit to the throughput of write operations within Consul. In Consul each write operation will turn into a single Raft log to be committed. Raft will process these @@ -313,7 +313,7 @@ to drastically increase disk write throughput, potentially beyond what the under detect this situation you can look at the `consul.raft.boltdb.freelistBytes` metric. This metric is a count of the extra bytes that are being written for each log storage operation beyond the log data itself. While not a clear indicator of an actual issue, this metric can be used to diagnose why the `consul.raft.boltdb.storeLogs` metric -is high. +is high. If Bolt DB log storage performance becomes an issue and is caused by free list management then setting [`raft_boltdb.NoFreelistSync`](/docs/agent/options#NoFreelistSync) to `true` in the server's configuration @@ -519,47 +519,93 @@ These metrics are used to monitor the health of the Consul servers. | `consul.grpc.server.streams` | Measures the number of active gRPC streams handled by the server. | streams | gauge | | `consul.xds.server.streams` | Measures the number of active xDS streams handled by the server split by protocol version. | streams | gauge | + +## Server Workload + +** Requirements: ** +* Consul 1.12.0+ + +Label based RPC metrics were added in Consul 1.12.0 as a Beta feature to better understand the workload on a Consul server and, where that workload is coming from. The following metric(s) provide that insight + +| Metric | Description | Unit | Type | +| ------------------------------------- | --------------------------------------------------------- | ------ | --------- | +| `consul.rpc.server.call` | Measures the elapsed time taken to complete an RPC call. | ms | summary | + +Note that values of the `consul.rpc.server.call` may emit as `0 ms`. That means that the elapsed time < `1 ms`. + +### Labels + +The the server workload metrics above come with the following labels: + +| Label Name | Description | Possible values | +| ------------------------------------- | --------------------------------------------------------- | --------------------------------------- | +| `method` | The name of the RPC method. | The value of any RPC request in Consul. | +| `errored` | Indicates whether the RPC call errored. | `True` or `False`. | +| `request_type` | Whether it is a `read` or `write` request. | `read`, `write` or `unreported`. | +| `rpc_type` | The RPC implementation. | `net/rpc` or `internal`. | + +#### Label Explanations + +The `internal` value for the `rpc_type` in the table above refers to leader and cluster management RPC operations that Consul performs. +Historically, `internal` RPC operation metrics were accounted under the same metric names. + +The `unreported` value for the `request_type` in the table above refers to RPC requests within Consul where it is difficult to ascertain whether a request is `read` or `write` type. + +Here is a Prometheus style example of an RPC metric and its labels: + + + +```json + ... + consul_rpc_server_call{errored="false",method="Catalog.ListNodes",request_type="read",rpc_type="net/rpc",quantile="0.5"} 255 + ... +``` + + + +Any metric in this section can be turned off with the [`prefix_filter`](/docs/agent/options#telemetry-prefix_filter). + ## Cluster Health These metrics give insight into the health of the cluster as a whole. | Metric | Description | Unit | Type | | ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | ------- | -| `consul.memberlist.degraded.probe` | Counts the number of times the agent has performed failure detection on another agent at a slower probe rate. The agent uses its own health metric as an indicator to perform this action. (If its health score is low, means that the node is healthy, and vice versa.) | probes / interval | counter | -| `consul.memberlist.degraded.timeout` | Counts the number of times an agent was marked as a dead node, whilst not getting enough confirmations from a randomly selected list of agent nodes in an agent's membership. | occurrence / interval | counter | -| `consul.memberlist.msg.dead` | Counts the number of times an agent has marked another agent to be a dead node. | messages / interval | counter | -| `consul.memberlist.health.score` | Describes a node's perception of its own health based on how well it is meeting the soft real-time requirements of the protocol. This metric ranges from 0 to 8, where 0 indicates "totally healthy". This health score is used to scale the time between outgoing probes, and higher scores translate into longer probing intervals. For more details see section IV of the Lifeguard paper: https://arxiv.org/pdf/1707.00788.pdf | score | gauge | -| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/options#ports). | suspect messages received / interval | counter | -| `consul.memberlist.tcp.accept` | Counts the number of times an agent has accepted an incoming TCP stream connection. | connections accepted / interval | counter | -| `consul.memberlist.udp.sent/received` | Measures the total number of bytes sent/received by an agent through the UDP protocol. | bytes sent or bytes received / interval | counter | -| `consul.memberlist.tcp.connect` | Counts the number of times an agent has initiated a push/pull sync with an other agent. | push/pull initiated / interval | counter | -| `consul.memberlist.tcp.sent` | Measures the total number of bytes sent by an agent through the TCP protocol | bytes sent / interval | counter | -| `consul.memberlist.gossip` | Measures the time taken for gossip messages to be broadcasted to a set of randomly selected nodes. | ms | timer | -| `consul.memberlist.msg_alive` | Counts the number of alive messages, that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | -| `consul.memberlist.msg_dead` | The number of dead messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | -| `consul.memberlist.msg_suspect` | The number of suspect messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | -| `consul.memberlist.probeNode` | Measures the time taken to perform a single round of failure detection on a select agent. | nodes / Interval | counter | -| `consul.memberlist.pushPullNode` | Measures the number of agents that have exchanged state with this agent. | nodes / Interval | counter | -| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | failures / interval | counter | -| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | flaps / interval | counter | -| `consul.serf.member.join` | Increments when an agent joins the cluster. If an agent flapped or failed this counter also increments when it re-joins. | joins / interval | counter | -| `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter | -| `consul.serf.events` | Increments when an agent processes an [event](/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter | -| `consul.serf.msgs.sent` | This metric is sample of the number of bytes of messages broadcast to the cluster. In a given time interval, the sum of this metric is the total number of bytes sent and the count is the number of messages sent. | message bytes / interval | counter | -| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers | gauge | -| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. All non-leader servers will report `NaN`. | boolean | gauge | -| `consul.session_ttl.active` | Tracks the active number of sessions being tracked. | sessions | gauge | -| `consul.catalog.service.query.` | Increments for each catalog query for the given service. | queries | counter | -| `consul.catalog.service.query-tag..` | Increments for each catalog query for the given service with the given tag. | queries | counter | -| `consul.catalog.service.query-tags..` | Increments for each catalog query for the given service with the given tags. | queries | counter | -| `consul.catalog.service.not-found.` | Increments for each catalog query where the given service could not be found. | queries | counter | -| `consul.catalog.connect.query.` | Increments for each connect-based catalog query for the given service. | queries | counter | -| `consul.catalog.connect.query-tag..` | Increments for each connect-based catalog query for the given service with the given tag. | queries | counter | -| `consul.catalog.connect.query-tags..` | Increments for each connect-based catalog query for the given service with the given tags. | queries | counter | -| `consul.catalog.connect.not-found.` | Increments for each connect-based catalog query where the given service could not be found. | queries | counter | -| `consul.mesh.active-root-ca.expiry` | The number of seconds until the root CA expires, updated every hour. | seconds | gauge | -| `consul.mesh.active-signing-ca.expiry` | The number of seconds until the signing CA expires, updated every hour. | seconds | gauge | -| `consul.agent.tls.cert.expiry` | The number of seconds until the Agent TLS certificate expires, updated every hour. | seconds | gauge | +| `consul.memberlist.degraded.probe` | Counts the number of times the agent has performed failure detection on another agent at a slower probe rate. The agent uses its own health metric as an indicator to perform this action. (If its health score is low, means that the node is healthy, and vice versa.) | probes / interval | counter | +| `consul.memberlist.degraded.timeout` | Counts the number of times an agent was marked as a dead node, whilst not getting enough confirmations from a randomly selected list of agent nodes in an agent's membership. | occurrence / interval | counter | +| `consul.memberlist.msg.dead` | Counts the number of times an agent has marked another agent to be a dead node. | messages / interval | counter | +| `consul.memberlist.health.score` | Describes a node's perception of its own health based on how well it is meeting the soft real-time requirements of the protocol. This metric ranges from 0 to 8, where 0 indicates "totally healthy". This health score is used to scale the time between outgoing probes, and higher scores translate into longer probing intervals. For more details see section IV of the Lifeguard paper: https://arxiv.org/pdf/1707.00788.pdf | score | gauge | +| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/options#ports). | suspect messages received / interval | counter | +| `consul.memberlist.tcp.accept` | Counts the number of times an agent has accepted an incoming TCP stream connection. | connections accepted / interval | counter | +| `consul.memberlist.udp.sent/received` | Measures the total number of bytes sent/received by an agent through the UDP protocol. | bytes sent or bytes received / interval | counter | +| `consul.memberlist.tcp.connect` | Counts the number of times an agent has initiated a push/pull sync with an other agent. | push/pull initiated / interval | counter | +| `consul.memberlist.tcp.sent` | Measures the total number of bytes sent by an agent through the TCP protocol | bytes sent / interval | counter | +| `consul.memberlist.gossip` | Measures the time taken for gossip messages to be broadcasted to a set of randomly selected nodes. | ms | timer | +| `consul.memberlist.msg_alive` | Counts the number of alive messages, that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | +| `consul.memberlist.msg_dead` | The number of dead messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | +| `consul.memberlist.msg_suspect` | The number of suspect messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | +| `consul.memberlist.probeNode` | Measures the time taken to perform a single round of failure detection on a select agent. | nodes / Interval | counter | +| `consul.memberlist.pushPullNode` | Measures the number of agents that have exchanged state with this agent. | nodes / Interval | counter | +| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | failures / interval | counter | +| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | flaps / interval | counter | +| `consul.serf.member.join` | Increments when an agent joins the cluster. If an agent flapped or failed this counter also increments when it re-joins. | joins / interval | counter | +| `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter | +| `consul.serf.events` | Increments when an agent processes an [event](/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter | +| `consul.serf.msgs.sent` | This metric is sample of the number of bytes of messages broadcast to the cluster. In a given time interval, the sum of this metric is the total number of bytes sent and the count is the number of messages sent. | message bytes / interval | counter | +| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers | gauge | +| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. All non-leader servers will report `NaN`. | boolean | gauge | +| `consul.session_ttl.active` | Tracks the active number of sessions being tracked. | sessions | gauge | +| `consul.catalog.service.query.` | Increments for each catalog query for the given service. | queries | counter | +| `consul.catalog.service.query-tag..` | Increments for each catalog query for the given service with the given tag. | queries | counter | +| `consul.catalog.service.query-tags..` | Increments for each catalog query for the given service with the given tags. | queries | counter | +| `consul.catalog.service.not-found.` | Increments for each catalog query where the given service could not be found. | queries | counter | +| `consul.catalog.connect.query.` | Increments for each connect-based catalog query for the given service. | queries | counter | +| `consul.catalog.connect.query-tag..` | Increments for each connect-based catalog query for the given service with the given tag. | queries | counter | +| `consul.catalog.connect.query-tags..` | Increments for each connect-based catalog query for the given service with the given tags. | queries | counter | +| `consul.catalog.connect.not-found.` | Increments for each connect-based catalog query where the given service could not be found. | queries | counter | +| `consul.mesh.active-root-ca.expiry` | The number of seconds until the root CA expires, updated every hour. | seconds | gauge | +| `consul.mesh.active-signing-ca.expiry`| The number of seconds until the signing CA expires, updated every hour. | seconds | gauge | +| `consul.agent.tls.cert.expiry` | The number of seconds until the Agent TLS certificate expires, updated every hour. | seconds | gauge | ## Connect Built-in Proxy Metrics From c7204528c5a7f07c864a8a0ea6fd59301c408a90 Mon Sep 17 00:00:00 2001 From: Eric Date: Thu, 31 Mar 2022 16:24:46 -0400 Subject: [PATCH 058/785] Implement Lambda Patching in the Serverless Plugin --- agent/proxycfg/testing_terminating_gateway.go | 10 +- agent/xds/serverless_plugin_oss_test.go | 107 +++++++ agent/xds/serverlessplugin/copied.go | 59 ++++ agent/xds/serverlessplugin/lambda_patcher.go | 170 +++++++++++ .../serverlessplugin/lambda_patcher_test.go | 83 ++++++ agent/xds/serverlessplugin/patcher.go | 81 ++++++ agent/xds/serverlessplugin/patcher_test.go | 100 +++++++ .../xds/serverlessplugin/serverlessplugin.go | 115 +++++++- ...da-terminating-gateway.envoy-1-20-x.golden | 169 +++++++++++ ...da-terminating-gateway.envoy-1-20-x.golden | 272 ++++++++++++++++++ agent/xds/xdscommon/xdscommon.go | 3 +- agent/xds/xdscommon/xdscommon_oss_test.go | 9 +- 12 files changed, 1172 insertions(+), 6 deletions(-) create mode 100644 agent/xds/serverless_plugin_oss_test.go create mode 100644 agent/xds/serverlessplugin/copied.go create mode 100644 agent/xds/serverlessplugin/lambda_patcher.go create mode 100644 agent/xds/serverlessplugin/lambda_patcher_test.go create mode 100644 agent/xds/serverlessplugin/patcher.go create mode 100644 agent/xds/serverlessplugin/patcher_test.go create mode 100644 agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden diff --git a/agent/proxycfg/testing_terminating_gateway.go b/agent/proxycfg/testing_terminating_gateway.go index 5b9889c85..e66ef3399 100644 --- a/agent/proxycfg/testing_terminating_gateway.go +++ b/agent/proxycfg/testing_terminating_gateway.go @@ -642,13 +642,19 @@ func TestConfigSnapshotTerminatingGatewayIgnoreExtraResolvers(t testing.T) *Conf }) } -func TestConfigSnapshotTerminatingGatewayWithServiceDefaultsMeta(t testing.T) *ConfigSnapshot { +func TestConfigSnapshotTerminatingGatewayWithLambdaService(t testing.T) *ConfigSnapshot { web := structs.NewServiceName("web", nil) return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{ { CorrelationID: serviceConfigIDPrefix + web.String(), Result: &structs.ServiceConfigResponse{ - Meta: map[string]string{"a": "b"}, + ProxyConfig: map[string]interface{}{"protocol": "http"}, + Meta: map[string]string{ + "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "lambda-arn", + "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "us-east-1", + }, }, }, }) diff --git a/agent/xds/serverless_plugin_oss_test.go b/agent/xds/serverless_plugin_oss_test.go new file mode 100644 index 000000000..20c9a4d2d --- /dev/null +++ b/agent/xds/serverless_plugin_oss_test.go @@ -0,0 +1,107 @@ +//go:build !consulent +// +build !consulent + +package xds + +import ( + "path/filepath" + "sort" + "testing" + + envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + "github.com/golang/protobuf/proto" + testinf "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/xds/proxysupport" + "github.com/hashicorp/consul/agent/xds/serverlessplugin" + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerlessPluginFromSnapshot(t *testing.T) { + tests := []struct { + name string + create func(t testinf.T) *proxycfg.ConfigSnapshot + }{ + { + name: "lambda-terminating-gateway", + create: proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaService, + }, + } + + latestEnvoyVersion := proxysupport.EnvoyVersions[0] + for _, envoyVersion := range proxysupport.EnvoyVersions { + sf, err := determineSupportedProxyFeaturesFromString(envoyVersion) + require.NoError(t, err) + t.Run("envoy-"+envoyVersion, func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Sanity check default with no overrides first + snap := tt.create(t) + + // We need to replace the TLS certs with deterministic ones to make golden + // files workable. Note we don't update these otherwise they'd change + // golden files for every test case and so not be any use! + setupTLSRootsAndLeaf(t, snap) + + g := newResourceGenerator(testutil.Logger(t), nil, nil, false) + g.ProxyFeatures = sf + + res, err := g.allResourcesFromSnapshot(snap) + require.NoError(t, err) + + indexedResources := indexResources(g.Logger, res) + newResourceMap, err := serverlessplugin.MutateIndexedResources(indexedResources, xdscommon.MakePluginConfiguration(snap)) + require.NoError(t, err) + + entities := []struct { + name string + key string + sorter func([]proto.Message) func(int, int) bool + }{ + { + name: "clusters", + key: xdscommon.ClusterType, + sorter: func(msgs []proto.Message) func(int, int) bool { + return func(i, j int) bool { + return msgs[i].(*envoy_cluster_v3.Cluster).Name < msgs[j].(*envoy_cluster_v3.Cluster).Name + } + }, + }, + { + name: "listeners", + key: xdscommon.ListenerType, + sorter: func(msgs []proto.Message) func(int, int) bool { + return func(i, j int) bool { + return msgs[i].(*envoy_listener_v3.Listener).Name < msgs[j].(*envoy_listener_v3.Listener).Name + } + }, + }, + } + + for _, entity := range entities { + var msgs []proto.Message + for _, e := range newResourceMap.Index[entity.key] { + msgs = append(msgs, e) + } + + sort.Slice(msgs, entity.sorter(msgs)) + r, err := createResponse(entity.key, "00000001", "00000001", msgs) + require.NoError(t, err) + + t.Run(entity.name, func(t *testing.T) { + gotJSON := protoToJSON(t, r) + + require.JSONEq(t, goldenEnvoy(t, + filepath.Join("serverless_plugin", entity.name, tt.name), + envoyVersion, latestEnvoyVersion, gotJSON), gotJSON) + }) + } + }) + } + }) + } +} diff --git a/agent/xds/serverlessplugin/copied.go b/agent/xds/serverlessplugin/copied.go new file mode 100644 index 000000000..3e99038aa --- /dev/null +++ b/agent/xds/serverlessplugin/copied.go @@ -0,0 +1,59 @@ +package serverlessplugin + +import ( + envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + + "github.com/golang/protobuf/ptypes" + + "github.com/golang/protobuf/proto" +) + +// This is copied from xds and not put into the shared package because I'm not +// convinced it should be shared. + +func makeUpstreamTLSTransportSocket(tlsContext *envoy_tls_v3.UpstreamTlsContext) (*envoy_core_v3.TransportSocket, error) { + if tlsContext == nil { + return nil, nil + } + return makeTransportSocket("tls", tlsContext) +} + +func makeTransportSocket(name string, config proto.Message) (*envoy_core_v3.TransportSocket, error) { + any, err := ptypes.MarshalAny(config) + if err != nil { + return nil, err + } + return &envoy_core_v3.TransportSocket{ + Name: name, + ConfigType: &envoy_core_v3.TransportSocket_TypedConfig{ + TypedConfig: any, + }, + }, nil +} + +func makeEnvoyHTTPFilter(name string, cfg proto.Message) (*envoy_http_v3.HttpFilter, error) { + any, err := ptypes.MarshalAny(cfg) + if err != nil { + return nil, err + } + + return &envoy_http_v3.HttpFilter{ + Name: name, + ConfigType: &envoy_http_v3.HttpFilter_TypedConfig{TypedConfig: any}, + }, nil +} + +func makeFilter(name string, cfg proto.Message) (*envoy_listener_v3.Filter, error) { + any, err := ptypes.MarshalAny(cfg) + if err != nil { + return nil, err + } + + return &envoy_listener_v3.Filter{ + Name: name, + ConfigType: &envoy_listener_v3.Filter_TypedConfig{TypedConfig: any}, + }, nil +} diff --git a/agent/xds/serverlessplugin/lambda_patcher.go b/agent/xds/serverlessplugin/lambda_patcher.go new file mode 100644 index 000000000..5121a36df --- /dev/null +++ b/agent/xds/serverlessplugin/lambda_patcher.go @@ -0,0 +1,170 @@ +package serverlessplugin + +import ( + "errors" + "fmt" + + envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_lambda_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/aws_lambda/v3" + envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + envoy_resource_v3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + pstruct "github.com/golang/protobuf/ptypes/struct" + + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/api" +) + +const ( + lambdaPrefix string = "serverless.consul.hashicorp.com/v1alpha1" + lambdaEnabledTag string = lambdaPrefix + "/lambda/enabled" + lambdaArnTag string = lambdaPrefix + "/lambda/arn" + lambdaPayloadPassthroughTag string = lambdaPrefix + "/lambda/payload-passhthrough" + lambdaRegionTag string = lambdaPrefix + "/lambda/region" + lambdaInvocationMode string = lambdaPrefix + "/lambda/invocation-mode" +) + +type lambdaPatcher struct { + arn string + payloadPassthrough bool + region string + kind api.ServiceKind + invocationMode envoy_lambda_v3.Config_InvocationMode +} + +var _ patcher = (*lambdaPatcher)(nil) + +func makeLambdaPatcher(serviceConfig xdscommon.ServiceConfig) (patcher, bool) { + var patcher lambdaPatcher + if !isStringTrue(serviceConfig.Meta[lambdaEnabledTag]) { + return patcher, true + } + + arn := serviceConfig.Meta[lambdaArnTag] + if arn == "" { + return patcher, false + } + + region := serviceConfig.Meta[lambdaRegionTag] + if region == "" { + return patcher, false + } + + payloadPassthrough := isStringTrue(serviceConfig.Meta[lambdaPayloadPassthroughTag]) + + invocationModeStr := serviceConfig.Meta[lambdaInvocationMode] + invocationMode := envoy_lambda_v3.Config_SYNCHRONOUS + if invocationModeStr == "asynchronous" { + invocationMode = envoy_lambda_v3.Config_ASYNCHRONOUS + } + + return lambdaPatcher{ + arn: arn, + payloadPassthrough: payloadPassthrough, + region: region, + kind: serviceConfig.Kind, + invocationMode: invocationMode, + }, true +} + +func isStringTrue(v string) bool { + return v == "true" +} + +func (p lambdaPatcher) CanPatch(kind api.ServiceKind) bool { + return kind == p.kind +} + +func (p lambdaPatcher) PatchCluster(c *envoy_cluster_v3.Cluster) (*envoy_cluster_v3.Cluster, bool, error) { + transportSocket, err := makeUpstreamTLSTransportSocket(&envoy_tls_v3.UpstreamTlsContext{ + Sni: "*.amazonaws.com", + }) + + if err != nil { + return c, false, fmt.Errorf("failed to make transport socket: %w", err) + } + + cluster := &envoy_cluster_v3.Cluster{ + Name: c.Name, + ConnectTimeout: c.ConnectTimeout, + ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS}, + DnsLookupFamily: envoy_cluster_v3.Cluster_V4_ONLY, + LbPolicy: envoy_cluster_v3.Cluster_ROUND_ROBIN, + Metadata: &envoy_core_v3.Metadata{ + FilterMetadata: map[string]*pstruct.Struct{ + "com.amazonaws.lambda": { + Fields: map[string]*pstruct.Value{ + "egress_gateway": {Kind: &pstruct.Value_BoolValue{BoolValue: true}}, + }, + }, + }, + }, + LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{ + ClusterName: c.Name, + Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{ + { + LbEndpoints: []*envoy_endpoint_v3.LbEndpoint{ + { + HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ + Endpoint: &envoy_endpoint_v3.Endpoint{ + Address: &envoy_core_v3.Address{ + Address: &envoy_core_v3.Address_SocketAddress{ + SocketAddress: &envoy_core_v3.SocketAddress{ + Address: fmt.Sprintf("lambda.%s.amazonaws.com", p.region), + PortSpecifier: &envoy_core_v3.SocketAddress_PortValue{ + PortValue: 443, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + TransportSocket: transportSocket, + } + return cluster, true, nil +} + +func (p lambdaPatcher) PatchFilter(filter *envoy_listener_v3.Filter) (*envoy_listener_v3.Filter, bool, error) { + if filter.Name != "envoy.filters.network.http_connection_manager" { + return filter, false, nil + } + if typedConfig := filter.GetTypedConfig(); typedConfig == nil { + return filter, false, errors.New("error getting typed config for http filter") + } + + config := envoy_resource_v3.GetHTTPConnectionManager(filter) + if config == nil { + return filter, false, errors.New("error unmarshalling filter") + } + httpFilter, err := makeEnvoyHTTPFilter( + "envoy.filters.http.aws_lambda", + &envoy_lambda_v3.Config{ + Arn: p.arn, + PayloadPassthrough: p.payloadPassthrough, + InvocationMode: p.invocationMode, + }, + ) + if err != nil { + return filter, false, err + } + + config.HttpFilters = []*envoy_http_v3.HttpFilter{ + httpFilter, + {Name: "envoy.filters.http.router"}, + } + config.StripMatchingHostPort = true + newFilter, err := makeFilter("envoy.filters.network.http_connection_manager", config) + if err != nil { + return filter, false, errors.New("error making new filter") + } + + return newFilter, true, nil +} diff --git a/agent/xds/serverlessplugin/lambda_patcher_test.go b/agent/xds/serverlessplugin/lambda_patcher_test.go new file mode 100644 index 000000000..09ac0cc36 --- /dev/null +++ b/agent/xds/serverlessplugin/lambda_patcher_test.go @@ -0,0 +1,83 @@ +package serverlessplugin + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/api" +) + +func TestMakeLambdaPatcher(t *testing.T) { + kind := api.ServiceKindTerminatingGateway + cases := []struct { + name string + enabled bool + arn string + payloadPassthrough bool + region string + expected lambdaPatcher + ok bool + }{ + { + name: "no meta", + ok: true, + }, + { + name: "lambda disabled", + enabled: false, + ok: true, + }, + { + name: "missing arn", + enabled: true, + region: "blah", + ok: false, + }, + { + name: "missing region", + enabled: true, + region: "arn", + ok: false, + }, + { + name: "including payload passthrough", + enabled: true, + arn: "arn", + region: "blah", + payloadPassthrough: true, + expected: lambdaPatcher{ + arn: "arn", + payloadPassthrough: true, + region: "blah", + kind: kind, + }, + ok: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + config := xdscommon.ServiceConfig{ + Kind: kind, + Meta: map[string]string{ + lambdaEnabledTag: strconv.FormatBool(tc.enabled), + lambdaArnTag: tc.arn, + lambdaRegionTag: tc.region, + }, + } + + if tc.payloadPassthrough { + config.Meta[lambdaPayloadPassthroughTag] = strconv.FormatBool(tc.payloadPassthrough) + } + + patcher, ok := makeLambdaPatcher(config) + + require.Equal(t, tc.ok, ok) + + require.Equal(t, tc.expected, patcher) + }) + } +} diff --git a/agent/xds/serverlessplugin/patcher.go b/agent/xds/serverlessplugin/patcher.go new file mode 100644 index 000000000..58847bcb6 --- /dev/null +++ b/agent/xds/serverlessplugin/patcher.go @@ -0,0 +1,81 @@ +package serverlessplugin + +import ( + envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/api" +) + +// patcher is the interface that each serverless integration must implement. It +// is responsible for modifying the xDS structures based on only the state of +// the patcher. +type patcher interface { + // CanPatch determines if the patcher can mutate resources for the given api.ServiceKind + CanPatch(api.ServiceKind) bool + + // PatchCluster patches a cluster to include the custom Envoy configuration + // required to integrate with the serverless integration. + PatchCluster(*envoy_cluster_v3.Cluster) (*envoy_cluster_v3.Cluster, bool, error) + + // PatchFilter patches an Envoy filter to include the custom Envoy + // configuration required to integrate with the serverless integration. + PatchFilter(*envoy_listener_v3.Filter) (*envoy_listener_v3.Filter, bool, error) +} + +type patchers map[api.CompoundServiceName]patcher + +func getPatcher(patchers patchers, kind api.ServiceKind, name api.CompoundServiceName) patcher { + patcher, ok := patchers[name] + + if !ok { + return nil + } + + if !patcher.CanPatch(kind) { + return nil + } + + return patcher +} + +// getPatcherBySNI gets the patcher for the associated SNI. +func getPatcherBySNI(config xdscommon.PluginConfiguration, kind api.ServiceKind, sni string) patcher { + serviceName, ok := config.SNIToServiceName[sni] + + if !ok { + return nil + } + + serviceConfig, ok := config.ServiceConfigs[serviceName] + if !ok { + return nil + } + + p := makePatcher(serviceConfig) + if p == nil || !p.CanPatch(kind) { + return nil + } + + return p +} + +func makePatcher(serviceConfig xdscommon.ServiceConfig) patcher { + for _, constructor := range patchConstructors { + patcher, ok := constructor(serviceConfig) + if ok { + return patcher + } + } + + return nil +} + +// patchConstructor is used to construct patchers based on +// xdscommon.ServiceConfig. This function contains all of the logic around +// turning Meta data into the patcher. +type patchConstructor func(xdscommon.ServiceConfig) (patcher, bool) + +// patchConstructors contains all patchers that getPatchers tries to create. +var patchConstructors = []patchConstructor{makeLambdaPatcher} diff --git a/agent/xds/serverlessplugin/patcher_test.go b/agent/xds/serverlessplugin/patcher_test.go new file mode 100644 index 000000000..456cc0cd4 --- /dev/null +++ b/agent/xds/serverlessplugin/patcher_test.go @@ -0,0 +1,100 @@ +package serverlessplugin + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/api" +) + +func TestGetPatcherBySNI(t *testing.T) { + cases := []struct { + name string + sni string + kind api.ServiceKind + expected patcher + config *xdscommon.PluginConfiguration + }{ + { + name: "no sni match", + sni: "not-matching", + }, + { + name: "no patcher", + config: &xdscommon.PluginConfiguration{}, + sni: "lambda-sni", + }, + { + name: "no kind match", + kind: api.ServiceKindIngressGateway, + sni: "lambda-sni", + }, + { + name: "full match", + sni: "lambda-sni", + kind: api.ServiceKindTerminatingGateway, + expected: lambdaPatcher{ + arn: "arn", + region: "region", + payloadPassthrough: false, + kind: api.ServiceKindTerminatingGateway, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + config := sampleConfig() + if tc.config != nil { + config = *tc.config + } + patcher := getPatcherBySNI(config, tc.kind, tc.sni) + + if tc.expected == nil { + require.Empty(t, patcher) + } else { + require.Equal(t, tc.expected, patcher) + } + }) + } +} + +var ( + lambdaService = api.CompoundServiceName{Name: "lambda"} + disabledLambdaService = api.CompoundServiceName{Name: "disabled-lambda"} + invalidLambdaService = api.CompoundServiceName{Name: "invalid-lambda"} +) + +func sampleConfig() xdscommon.PluginConfiguration { + return xdscommon.PluginConfiguration{ + ServiceConfigs: map[api.CompoundServiceName]xdscommon.ServiceConfig{ + lambdaService: { + Kind: api.ServiceKindTerminatingGateway, + Meta: map[string]string{ + lambdaEnabledTag: "true", + lambdaArnTag: "arn", + lambdaRegionTag: "region", + }, + }, + disabledLambdaService: { + Kind: api.ServiceKindTerminatingGateway, + Meta: map[string]string{ + lambdaEnabledTag: "false", + lambdaArnTag: "arn", + lambdaRegionTag: "region", + }, + }, + invalidLambdaService: { + Kind: api.ServiceKindTerminatingGateway, + Meta: map[string]string{ + lambdaEnabledTag: "true", + }, + }, + }, + SNIToServiceName: map[string]api.CompoundServiceName{ + "lambda-sni": lambdaService, + }, + } +} diff --git a/agent/xds/serverlessplugin/serverlessplugin.go b/agent/xds/serverlessplugin/serverlessplugin.go index 8ad6c6273..8ab38e374 100644 --- a/agent/xds/serverlessplugin/serverlessplugin.go +++ b/agent/xds/serverlessplugin/serverlessplugin.go @@ -1,9 +1,122 @@ package serverlessplugin import ( + "fmt" + + envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/agent/xds/xdscommon" + "github.com/hashicorp/consul/api" ) +// MutateIndexedResources updates indexed xDS structures to include patches for +// serverless integrations. It is responsible for constructing all of the +// patchers and forwarding xDS structs onto the appropriate patcher. If any +// portion of this function fails, it will record the error and continue. The +// behavior is appropriate since the unpatched xDS structures this receives are +// typically invalid. func MutateIndexedResources(resources *xdscommon.IndexedResources, config xdscommon.PluginConfiguration) (*xdscommon.IndexedResources, error) { - return resources, nil + var resultErr error + + // The serverless plugin only supports terminating gateays for now, but will + // likely add connect proxies soon. + if config.Kind != api.ServiceKindTerminatingGateway { + return resources, resultErr + } + + for _, indexType := range []string{ + xdscommon.ClusterType, + xdscommon.ListenerType, + } { + for nameOrSNI, msg := range resources.Index[indexType] { + switch resource := msg.(type) { + case *envoy_cluster_v3.Cluster: + patcher := getPatcherBySNI(config, config.Kind, nameOrSNI) + if patcher == nil { + continue + } + + newCluster, patched, err := patcher.PatchCluster(resource) + if err != nil { + resultErr = multierror.Append(resultErr, fmt.Errorf("error patching cluster: %w", err)) + continue + } + if patched { + resources.Index[xdscommon.ClusterType][nameOrSNI] = newCluster + } + + case *envoy_listener_v3.Listener: + newListener, patched, err := patchTerminatingGatewayListener(resource, config) + if err != nil { + resultErr = multierror.Append(resultErr, fmt.Errorf("error patching listener: %w", err)) + continue + } + if patched { + resources.Index[xdscommon.ListenerType][nameOrSNI] = newListener + } + + default: + resultErr = multierror.Append(resultErr, fmt.Errorf("unsupported type was skipped: %T", resource)) + } + } + } + + return resources, resultErr +} + +func patchTerminatingGatewayListener(l *envoy_listener_v3.Listener, config xdscommon.PluginConfiguration) (proto.Message, bool, error) { + var resultErr error + patched := false + for _, filterChain := range l.FilterChains { + sni := getSNI(filterChain) + + if sni == "" { + continue + } + + patcher := getPatcherBySNI(config, config.Kind, sni) + + if patcher == nil { + continue + } + + var filters []*envoy_listener_v3.Filter + + for _, filter := range filterChain.Filters { + newFilter, ok, err := patcher.PatchFilter(filter) + + if err != nil { + resultErr = multierror.Append(resultErr, fmt.Errorf("error patching listener filter: %w", err)) + filters = append(filters, filter) + } + if ok { + filters = append(filters, newFilter) + patched = true + } + } + filterChain.Filters = filters + } + + return l, patched, resultErr +} + +func getSNI(chain *envoy_listener_v3.FilterChain) string { + var sni string + + if chain == nil { + return sni + } + + if chain.FilterChainMatch == nil { + return sni + } + + if len(chain.FilterChainMatch.ServerNames) == 0 { + return sni + } + + return chain.FilterChainMatch.ServerNames[0] } diff --git a/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway.envoy-1-20-x.golden new file mode 100644 index 000000000..aa3deff05 --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway.envoy-1-20-x.golden @@ -0,0 +1,169 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "api.altdomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "filename": "api.cert.pem" + }, + "privateKey": { + "filename": "api.key.pem" + } + } + ], + "validationContext": { + "trustedCa": { + "filename": "ca.cert.pem" + } + } + } + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "cache.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "db.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "UNHEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "lambda.us-east-1.amazonaws.com", + "portValue": 443 + } + } + } + } + ] + } + ] + }, + "dnsLookupFamily": "V4_ONLY", + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "sni": "*.amazonaws.com" + } + }, + "metadata": { + "filterMetadata": { + "com.amazonaws.lambda": { + "egress_gateway": true + } + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden new file mode 100644 index 000000000..e0b77f6f4 --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden @@ -0,0 +1,272 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.web.default.default.dc1", + "rds": { + "configSource": { + "ads": { + + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.aws_lambda", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config", + "arn": "lambda-arn" + } + }, + { + "name": "envoy.filters.http.router" + } + ], + "tracing": { + "randomSampling": { + + } + }, + "stripMatchingHostPort": true + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster" + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector" + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/xdscommon/xdscommon.go b/agent/xds/xdscommon/xdscommon.go index de856c355..b1b7da121 100644 --- a/agent/xds/xdscommon/xdscommon.go +++ b/agent/xds/xdscommon/xdscommon.go @@ -78,7 +78,8 @@ type PluginConfiguration struct { // associated service's CompoundServiceName EnvoyIDToServiceName map[string]api.CompoundServiceName - // Kind is mode the local Envoy proxy is running in + // Kind is mode the local Envoy proxy is running in. For now, only + // terminating gateways are supported. Kind api.ServiceKind } diff --git a/agent/xds/xdscommon/xdscommon_oss_test.go b/agent/xds/xdscommon/xdscommon_oss_test.go index d11c9b015..c92be3ba5 100644 --- a/agent/xds/xdscommon/xdscommon_oss_test.go +++ b/agent/xds/xdscommon/xdscommon_oss_test.go @@ -13,7 +13,7 @@ import ( ) func TestMakePluginConfiguration_TerminatingGateway(t *testing.T) { - snap := proxycfg.TestConfigSnapshotTerminatingGatewayWithServiceDefaultsMeta(t) + snap := proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaService(t) webService := api.CompoundServiceName{ Name: "web", @@ -41,7 +41,12 @@ func TestMakePluginConfiguration_TerminatingGateway(t *testing.T) { ServiceConfigs: map[api.CompoundServiceName]ServiceConfig{ webService: { Kind: api.ServiceKindTerminatingGateway, - Meta: map[string]string{"a": "b"}, + Meta: map[string]string{ + "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "lambda-arn", + "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "us-east-1", + }, }, apiService: { Kind: api.ServiceKindTerminatingGateway, From 116b6c57cbdcc73f75635ae77e4760d959ec3ade Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 13:46:14 -0700 Subject: [PATCH 059/785] Use the GatewayService SNI field for upstream SAN validation --- agent/proxycfg/testing_terminating_gateway.go | 24 +++ agent/xds/clusters.go | 16 +- agent/xds/clusters_test.go | 4 + ...erminating-gateway-sni.envoy-1-20-x.golden | 174 ++++++++++++++++++ .../config-entries/terminating-gateway.mdx | 20 +- 5 files changed, 232 insertions(+), 6 deletions(-) create mode 100644 agent/xds/testdata/clusters/terminating-gateway-sni.envoy-1-20-x.golden diff --git a/agent/proxycfg/testing_terminating_gateway.go b/agent/proxycfg/testing_terminating_gateway.go index 5b9889c85..14092f450 100644 --- a/agent/proxycfg/testing_terminating_gateway.go +++ b/agent/proxycfg/testing_terminating_gateway.go @@ -526,6 +526,30 @@ func testConfigSnapshotTerminatingGatewayLBConfig(t testing.T, variant string) * }) } +func TestConfigSnapshotTerminatingGatewaySNI(t testing.T) *ConfigSnapshot { + return TestConfigSnapshotTerminatingGateway(t, true, nil, []cache.UpdateEvent{ + { + CorrelationID: "gateway-services", + Result: &structs.IndexedGatewayServices{ + Services: []*structs.GatewayService{ + { + Service: structs.NewServiceName("web", nil), + CAFile: "ca.cert.pem", + SNI: "foo.com", + }, + { + Service: structs.NewServiceName("api", nil), + CAFile: "ca.cert.pem", + CertFile: "api.cert.pem", + KeyFile: "api.key.pem", + SNI: "bar.com", + }, + }, + }, + }, + }) +} + func TestConfigSnapshotTerminatingGatewayHostnameSubsets(t testing.T) *ConfigSnapshot { var ( api = structs.NewServiceName("api", nil) diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 4c9855036..63442eb51 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -390,6 +390,9 @@ func (s *ResourceGenerator) injectGatewayServiceAddons(cfgSnap *proxycfg.ConfigS } if mapping.SNI != "" { tlsContext.Sni = mapping.SNI + if err := injectRawSANMatcher(tlsContext.CommonTlsContext, []string{mapping.SNI}); err != nil { + return fmt.Errorf("failed to inject SNI matcher into TLS context: %v", err) + } } transportSocket, err := makeUpstreamTLSTransportSocket(tlsContext) @@ -803,6 +806,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( // injectSANMatcher updates a TLS context so that it verifies the upstream SAN. func injectSANMatcher(tlsContext *envoy_tls_v3.CommonTlsContext, spiffeIDs ...connect.SpiffeIDService) error { + var matchStrings []string + for _, id := range spiffeIDs { + matchStrings = append(matchStrings, id.URI().String()) + } + + return injectRawSANMatcher(tlsContext, matchStrings) +} + +func injectRawSANMatcher(tlsContext *envoy_tls_v3.CommonTlsContext, matchStrings []string) error { validationCtx, ok := tlsContext.ValidationContextType.(*envoy_tls_v3.CommonTlsContext_ValidationContext) if !ok { return fmt.Errorf("invalid type: expected CommonTlsContext_ValidationContext, got %T", @@ -810,10 +822,10 @@ func injectSANMatcher(tlsContext *envoy_tls_v3.CommonTlsContext, spiffeIDs ...co } var matchers []*envoy_matcher_v3.StringMatcher - for _, id := range spiffeIDs { + for _, m := range matchStrings { matchers = append(matchers, &envoy_matcher_v3.StringMatcher{ MatchPattern: &envoy_matcher_v3.StringMatcher_Exact{ - Exact: id.URI().String(), + Exact: m, }, }) } diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index c8a1e98e5..8f4156aed 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -581,6 +581,10 @@ func TestClustersFromSnapshot(t *testing.T) { name: "terminating-gateway-hostname-service-subsets", create: proxycfg.TestConfigSnapshotTerminatingGatewayHostnameSubsets, }, + { + name: "terminating-gateway-sni", + create: proxycfg.TestConfigSnapshotTerminatingGatewaySNI, + }, { name: "terminating-gateway-ignore-extra-resolvers", create: proxycfg.TestConfigSnapshotTerminatingGatewayIgnoreExtraResolvers, diff --git a/agent/xds/testdata/clusters/terminating-gateway-sni.envoy-1-20-x.golden b/agent/xds/testdata/clusters/terminating-gateway-sni.envoy-1-20-x.golden new file mode 100644 index 000000000..5b784eabc --- /dev/null +++ b/agent/xds/testdata/clusters/terminating-gateway-sni.envoy-1-20-x.golden @@ -0,0 +1,174 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "api.altdomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "filename": "api.cert.pem" + }, + "privateKey": { + "filename": "api.key.pem" + } + } + ], + "validationContext": { + "trustedCa": { + "filename": "ca.cert.pem" + }, + "matchSubjectAltNames": [ + { + "exact": "bar.com" + } + ] + } + }, + "sni": "bar.com" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "cache.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "db.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "UNHEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "validationContext": { + "trustedCa": { + "filename": "ca.cert.pem" + }, + "matchSubjectAltNames": [ + { + "exact": "foo.com" + } + ] + } + }, + "sni": "foo.com" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx index 0c6a4bf56..80966017a 100644 --- a/website/content/docs/connect/config-entries/terminating-gateway.mdx +++ b/website/content/docs/connect/config-entries/terminating-gateway.mdx @@ -166,6 +166,7 @@ Services = [ { Name = "billing" CAFile = "/etc/certs/ca-chain.cert.pem" + SNI = "billing.service.com" } ] ``` @@ -179,6 +180,7 @@ spec: services: - name: billing caFile: /etc/certs/ca-chain.cert.pem + sni: billing.service.com ``` ```json @@ -189,6 +191,7 @@ spec: { "Name": "billing", "CAFile": "/etc/certs/ca-chain.cert.pem" + "SNI": "billing.service.com" } ] } @@ -217,6 +220,7 @@ Services = [ Namespace = "finance" Name = "billing" CAFile = "/etc/certs/ca-chain.cert.pem" + SNI = "billing.service.com" } ] ``` @@ -231,6 +235,7 @@ spec: - name: billing namespace: finance caFile: /etc/certs/ca-chain.cert.pem + sni: billing.service.com ``` ```json @@ -242,7 +247,8 @@ spec: { "Namespace": "finance", "Name": "billing", - "CAFile": "/etc/certs/ca-chain.cert.pem" + "CAFile": "/etc/certs/ca-chain.cert.pem", + "SNI": "billing.service.com" } ] } @@ -276,6 +282,7 @@ Services = [ CAFile = "/etc/certs/ca-chain.cert.pem" KeyFile = "/etc/certs/gateway.key.pem" CertFile = "/etc/certs/gateway.cert.pem" + SNI = "billing.service.com" } ] ``` @@ -291,6 +298,7 @@ spec: caFile: /etc/certs/ca-chain.cert.pem keyFile: /etc/certs/gateway.key.pem certFile: /etc/certs/gateway.cert.pem + sni: billing.service.com ``` ```json @@ -302,7 +310,8 @@ spec: "Name": "billing", "CAFile": "/etc/certs/ca-chain.cert.pem", "KeyFile": "/etc/certs/gateway.key.pem", - "CertFile": "/etc/certs/gateway.cert.pem" + "CertFile": "/etc/certs/gateway.cert.pem", + "SNI": "billing.service.com" } ] } @@ -333,6 +342,7 @@ Services = [ CAFile = "/etc/certs/ca-chain.cert.pem" KeyFile = "/etc/certs/gateway.key.pem" CertFile = "/etc/certs/gateway.cert.pem" + SNI = "billing.service.com" } ] ``` @@ -349,6 +359,7 @@ spec: caFile: /etc/certs/ca-chain.cert.pem keyFile: /etc/certs/gateway.key.pem certFile: /etc/certs/gateway.cert.pem + sni: billing.service.com ``` ```json @@ -362,7 +373,8 @@ spec: "Name": "billing", "CAFile": "/etc/certs/ca-chain.cert.pem", "KeyFile": "/etc/certs/gateway.key.pem", - "CertFile": "/etc/certs/gateway.cert.pem" + "CertFile": "/etc/certs/gateway.cert.pem", + "SNI": "billing.service.com" } ] } @@ -399,7 +411,7 @@ Services = [ }, { Name = "billing" - CAFile = "/etc/billing-ca/ca-chain.cert.pem", + CAFile = "/etc/billing-ca/ca-chain.cert.pem" SNI = "billing.service.com" } ] From 2a35d4a518cc03fd576bc6feac4bbe83eadcf924 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 14:05:02 -0700 Subject: [PATCH 060/785] Add changelog note --- .changelog/12672.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/12672.txt diff --git a/.changelog/12672.txt b/.changelog/12672.txt new file mode 100644 index 000000000..449890850 --- /dev/null +++ b/.changelog/12672.txt @@ -0,0 +1,3 @@ +```release-note:security +connect: Properly set SNI when configured for services behind a terminating gateway. +``` From 7a016a4c465597f7ac4e3f8788aad81c56b45fde Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 31 Mar 2022 15:03:41 -0700 Subject: [PATCH 061/785] Add doc examples for expanded token read CLI and API --- website/content/api-docs/acl/tokens.mdx | 89 +++++++++++++++++++++ website/content/commands/acl/token/read.mdx | 39 +++++++++ 2 files changed, 128 insertions(+) diff --git a/website/content/api-docs/acl/tokens.mdx b/website/content/api-docs/acl/tokens.mdx index b73acc50e..7afada637 100644 --- a/website/content/api-docs/acl/tokens.mdx +++ b/website/content/api-docs/acl/tokens.mdx @@ -188,6 +188,9 @@ The corresponding CLI command is [`consul acl token read`](/commands/acl/token/r the namespace will be inherited from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. +- `expanded` `(bool: false)` - If this field is set, the contents of all policies and + roles affecting the token will also be returned. + ### Sample Request ```shell-session @@ -225,6 +228,92 @@ for reading other secrets which given even more permissions. } ``` +Sample response when setting the `expanded` parameter: + +```json +{ + "AccessorID": "fbd2447f-7479-4329-ad13-b021d74f86ba", + "SecretID": "869c6e91-4de9-4dab-b56e-87548435f9c6", + "Description": "test token", + "Policies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "foo" + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bar" + } + ], + "Local": false, + "CreateTime": "2020-05-22T18:52:31Z", + "Hash": "YWJjZGVmZ2g=", + "ExpandedPolicies": [ + { + "ID": "beb04680-815b-4d7c-9e33-3d707c24672c", + "Name": "foo", + "Description": "user policy on token", + "Rules": "service_prefix \"\" {\n policy = \"read\"\n}", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "18788457-584c-4812-80d3-23d403148a90", + "Name": "bar", + "Description": "other user policy on token", + "Rules": "operator = \"read\"", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + { + "ID": "6204f4cd-4709-441c-ac1b-cb029e940263", + "Name": "admin policy", + "Description": "policy for admin role", + "Rules": "operator = \"write\"", + "Datacenters": null, + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "ExpandedRoles": [ + { + "ID": "3b0a78fe-b9c3-40de-b8ea-7d4d6674b366", + "Name": "admin", + "Description": "admin role", + "Policies": [ + { + "ID": "6204f4cd-4709-441c-ac1b-cb029e940263", + "Name": "admin policy" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "web", + "Datacenters": [ + "southwest" + ] + } + ], + "Hash": null, + "CreateIndex": 0, + "ModifyIndex": 0 + } + ], + "NamespaceDefaultPolicies": null, + "NamespaceDefaultRoles": null, + "AgentACLDefaultPolicy": "allow", + "AgentACLDownPolicy": "deny", + "ResolvedByAgent": "server-1", + "CreateIndex": 42, + "ModifyIndex": 100 +} +``` + ## Read Self Token This endpoint returns the ACL token details that matches the secret ID diff --git a/website/content/commands/acl/token/read.mdx b/website/content/commands/acl/token/read.mdx index e249e1b38..85f0f6a4e 100644 --- a/website/content/commands/acl/token/read.mdx +++ b/website/content/commands/acl/token/read.mdx @@ -40,6 +40,9 @@ Usage: `consul acl token read [options] [args]` - `-self` - Indicates that the current HTTP token should be read by secret ID instead of expecting a -id option. +- `-expanded` - Indicates that the contents of the policies and roles affecting + the token should also be shown. + - `-format={pretty|json}` - Command output format. The default value is `pretty`. #### Enterprise Options @@ -87,3 +90,39 @@ Local: false Create Time: 0001-01-01 00:00:00 +0000 UTC Policies: ``` + +Get token details (Expanded) + +```shell-session +$ consul acl token read -expanded -id 986 +AccessorID: 986193b5-e2b5-eb26-6264-b524ea60cc6d +SecretID: ec15675e-2999-d789-832e-8c4794daa8d7 +Description: Read Nodes and Services +Local: false +Create Time: 2018-10-22 15:33:39.01789 -0400 EDT +Policies: + Policy Name: foo + ID: beb04680-815b-4d7c-9e33-3d707c24672c + Description: user policy on token + Rules: + service_prefix "" { + policy = "read" + } + + Policy Name: bar + ID: 18788457-584c-4812-80d3-23d403148a90 + Description: other user policy on token + Rules: + operator = "read" + +=== End of Authorizer Layer 0: Token === +=== Start of Authorizer Layer 2: Agent Configuration Defaults (Inherited) === +Description: Defined at request-time by the agent that resolves the ACL token; other agents may have different configuration defaults +Resolved By Agent: "leader" + +Default Policy: allow + Description: Backstop rule used if no preceding layer has a matching rule (refer to default_policy option in agent configuration) + +Down Policy: deny + Description: Defines what to do if this Token's information cannot be read from the primary_datacenter (refer to down_policy option in agent configuration) +``` From aa29324a24c02a41f4487bd1a455163cd11d4c47 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Thu, 31 Mar 2022 23:35:38 -0700 Subject: [PATCH 062/785] Avoid using sys/mounts to enable namespaces (#12655) * Avoid doing list of /sys/mounts From an internal ticket "Support standard "Vault namespace in the path" semantics for Connect Vault CA Provider" Vault allows the namespace to be specified as a prefix in the path of a PKI definition, but this doesn't currently work for ```IntermediatePKIPath``` specifications, because we attempt to list all of the paths to check if ours is already defined. This doesn't really work in a namespaced world. This changes the IntermediatePKIPath code to follow the same pattern as the root key, where we directly get the key rather than listing. This code is difficult to write automated tests for because it relies on features of Vault Enterprise, which isn't currently part of our test framework, so it was tested manually. Signed-off-by: Mark Anderson * add changelog Signed-off-by: Mark Anderson --- .changelog/12655.txt | 4 ++++ agent/connect/ca/provider_vault.go | 28 ++++++++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) create mode 100644 .changelog/12655.txt diff --git a/.changelog/12655.txt b/.changelog/12655.txt new file mode 100644 index 000000000..48237a314 --- /dev/null +++ b/.changelog/12655.txt @@ -0,0 +1,4 @@ +```release-note:improvement +Removed impediments to using a namespace prefixed IntermediatePKIPath +in a CA definition. +``` diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index beec649c3..787e5a247 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -356,22 +356,22 @@ func (v *VaultProvider) setupIntermediatePKIPath() error { if v.setupIntermediatePKIPathDone { return nil } - mounts, err := v.client.Sys().ListMounts() + + _, err := v.getCA(v.config.IntermediatePKIPath) if err != nil { - return err - } + if err == ErrBackendNotMounted { + err := v.client.Sys().Mount(v.config.IntermediatePKIPath, &vaultapi.MountInput{ + Type: "pki", + Description: "intermediate CA backend for Consul Connect", + Config: vaultapi.MountConfigInput{ + MaxLeaseTTL: v.config.IntermediateCertTTL.String(), + }, + }) - // Mount the backend if it isn't mounted already. - if _, ok := mounts[v.config.IntermediatePKIPath]; !ok { - err := v.client.Sys().Mount(v.config.IntermediatePKIPath, &vaultapi.MountInput{ - Type: "pki", - Description: "intermediate CA backend for Consul Connect", - Config: vaultapi.MountConfigInput{ - MaxLeaseTTL: v.config.IntermediateCertTTL.String(), - }, - }) - - if err != nil { + if err != nil { + return err + } + } else { return err } } From bbf0d13fc64128239fc62e7e9d41c0324278d268 Mon Sep 17 00:00:00 2001 From: Eric Date: Fri, 1 Apr 2022 10:38:56 -0400 Subject: [PATCH 063/785] Fix the Kubernetes service name for DNS --- website/content/docs/k8s/dns.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/k8s/dns.mdx b/website/content/docs/k8s/dns.mdx index d99158705..47c9fc189 100644 --- a/website/content/docs/k8s/dns.mdx +++ b/website/content/docs/k8s/dns.mdx @@ -26,11 +26,11 @@ to turn on [Consul to Kubernetes Service Sync](/docs/k8s/service-sync#consul-to- To configure KubeDNS or CoreDNS you'll first need the `ClusterIP` of the Consul DNS service created by the [Helm chart](/docs/k8s/helm). -The default name of the Consul DNS service will be `consul-consul-dns`. Use +The default name of the Consul DNS service will be `consul-dns`. Use that name to get the `ClusterIP`: ```shell-session -$ kubectl get svc consul-consul-dns --output jsonpath='{.spec.clusterIP}' +$ kubectl get svc consul-dns --output jsonpath='{.spec.clusterIP}' 10.35.240.78% ``` From 66391186ce532cd11929610d8c7c4601de51ec69 Mon Sep 17 00:00:00 2001 From: Eric Date: Fri, 1 Apr 2022 10:32:38 -0400 Subject: [PATCH 064/785] Tweak the Lambda Envoy configuration generated by the serverless patcher - Move from `strip_matching_host_port` to `strip_any_host_port` - Remove `auto_host_rewrite` since it conflicts with `strip_any_host_port` --- .changelog/12681.txt | 3 ++ agent/xds/serverless_plugin_oss_test.go | 9 ++++++ agent/xds/serverlessplugin/lambda_patcher.go | 27 ++++++++++++++++- agent/xds/serverlessplugin/patcher.go | 6 ++++ .../xds/serverlessplugin/serverlessplugin.go | 17 +++++++++++ ...da-terminating-gateway.envoy-1-20-x.golden | 4 +-- ...da-terminating-gateway.envoy-1-20-x.golden | 30 +++++++++++++++++++ 7 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 .changelog/12681.txt create mode 100644 agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway.envoy-1-20-x.golden diff --git a/.changelog/12681.txt b/.changelog/12681.txt new file mode 100644 index 000000000..614770332 --- /dev/null +++ b/.changelog/12681.txt @@ -0,0 +1,3 @@ +```release-note:feature +xds: Add the ability to invoke AWS Lambdas through terminating gateways. +``` diff --git a/agent/xds/serverless_plugin_oss_test.go b/agent/xds/serverless_plugin_oss_test.go index 20c9a4d2d..5fbfdc1c6 100644 --- a/agent/xds/serverless_plugin_oss_test.go +++ b/agent/xds/serverless_plugin_oss_test.go @@ -80,6 +80,15 @@ func TestServerlessPluginFromSnapshot(t *testing.T) { } }, }, + { + name: "routes", + key: xdscommon.RouteType, + sorter: func(msgs []proto.Message) func(int, int) bool { + return func(i, j int) bool { + return msgs[i].(*envoy_listener_v3.Listener).Name < msgs[j].(*envoy_listener_v3.Listener).Name + } + }, + }, } for _, entity := range entities { diff --git a/agent/xds/serverlessplugin/lambda_patcher.go b/agent/xds/serverlessplugin/lambda_patcher.go index 5121a36df..c5d54d9cf 100644 --- a/agent/xds/serverlessplugin/lambda_patcher.go +++ b/agent/xds/serverlessplugin/lambda_patcher.go @@ -8,6 +8,7 @@ import ( envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" envoy_lambda_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/aws_lambda/v3" envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" @@ -78,6 +79,28 @@ func (p lambdaPatcher) CanPatch(kind api.ServiceKind) bool { return kind == p.kind } +func (p lambdaPatcher) PatchRoute(route *envoy_route_v3.RouteConfiguration) (*envoy_route_v3.RouteConfiguration, bool, error) { + if p.kind != api.ServiceKindTerminatingGateway { + return route, false, nil + } + + for _, virtualHost := range route.VirtualHosts { + for _, route := range virtualHost.Routes { + action, ok := route.Action.(*envoy_route_v3.Route_Route) + + if !ok { + continue + } + + // When auto_host_rewrite is set it conflicts with strip_any_host_port + // on the http_connection_manager filter. + action.Route.HostRewriteSpecifier = nil + } + } + + return route, true, nil +} + func (p lambdaPatcher) PatchCluster(c *envoy_cluster_v3.Cluster) (*envoy_cluster_v3.Cluster, bool, error) { transportSocket, err := makeUpstreamTLSTransportSocket(&envoy_tls_v3.UpstreamTlsContext{ Sni: "*.amazonaws.com", @@ -160,7 +183,9 @@ func (p lambdaPatcher) PatchFilter(filter *envoy_listener_v3.Filter) (*envoy_lis httpFilter, {Name: "envoy.filters.http.router"}, } - config.StripMatchingHostPort = true + config.StripPortMode = &envoy_http_v3.HttpConnectionManager_StripAnyHostPort{ + StripAnyHostPort: true, + } newFilter, err := makeFilter("envoy.filters.network.http_connection_manager", config) if err != nil { return filter, false, errors.New("error making new filter") diff --git a/agent/xds/serverlessplugin/patcher.go b/agent/xds/serverlessplugin/patcher.go index 58847bcb6..8db1c95a1 100644 --- a/agent/xds/serverlessplugin/patcher.go +++ b/agent/xds/serverlessplugin/patcher.go @@ -3,6 +3,7 @@ package serverlessplugin import ( envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/hashicorp/consul/agent/xds/xdscommon" "github.com/hashicorp/consul/api" @@ -15,6 +16,11 @@ type patcher interface { // CanPatch determines if the patcher can mutate resources for the given api.ServiceKind CanPatch(api.ServiceKind) bool + // patchRoute patches a route to include the custom Envoy configuration + // PatchCluster patches a cluster to include the custom Envoy configuration + // required to integrate with the serverless integration. + PatchRoute(*envoy_route_v3.RouteConfiguration) (*envoy_route_v3.RouteConfiguration, bool, error) + // PatchCluster patches a cluster to include the custom Envoy configuration // required to integrate with the serverless integration. PatchCluster(*envoy_cluster_v3.Cluster) (*envoy_cluster_v3.Cluster, bool, error) diff --git a/agent/xds/serverlessplugin/serverlessplugin.go b/agent/xds/serverlessplugin/serverlessplugin.go index 8ab38e374..1d7387000 100644 --- a/agent/xds/serverlessplugin/serverlessplugin.go +++ b/agent/xds/serverlessplugin/serverlessplugin.go @@ -5,6 +5,7 @@ import ( envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/golang/protobuf/proto" "github.com/hashicorp/go-multierror" @@ -30,6 +31,7 @@ func MutateIndexedResources(resources *xdscommon.IndexedResources, config xdscom for _, indexType := range []string{ xdscommon.ClusterType, xdscommon.ListenerType, + xdscommon.RouteType, } { for nameOrSNI, msg := range resources.Index[indexType] { switch resource := msg.(type) { @@ -58,6 +60,21 @@ func MutateIndexedResources(resources *xdscommon.IndexedResources, config xdscom resources.Index[xdscommon.ListenerType][nameOrSNI] = newListener } + case *envoy_route_v3.RouteConfiguration: + patcher := getPatcherBySNI(config, config.Kind, nameOrSNI) + if patcher == nil { + continue + } + + newRoute, patched, err := patcher.PatchRoute(resource) + if err != nil { + resultErr = multierror.Append(resultErr, fmt.Errorf("error patching route: %w", err)) + continue + } + if patched { + resources.Index[xdscommon.RouteType][nameOrSNI] = newRoute + } + default: resultErr = multierror.Append(resultErr, fmt.Errorf("unsupported type was skipped: %T", resource)) } diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden index e0b77f6f4..d412ef5ce 100644 --- a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden @@ -211,7 +211,7 @@ } }, - "stripMatchingHostPort": true + "stripAnyHostPort": true } } ], @@ -269,4 +269,4 @@ ], "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", "nonce": "00000001" -} \ No newline at end of file +} diff --git a/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway.envoy-1-20-x.golden new file mode 100644 index 000000000..4cefff9c6 --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway.envoy-1-20-x.golden @@ -0,0 +1,30 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + } + ], + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file From 3060b5cb8f9afeb33df080257842698809bdd1e4 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 1 Apr 2022 10:30:26 -0500 Subject: [PATCH 065/785] xds: errors from the xds serverless plugin are fatal (#12682) --- agent/xds/delta.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/agent/xds/delta.go b/agent/xds/delta.go index 7b4aa5242..872ac31aa 100644 --- a/agent/xds/delta.go +++ b/agent/xds/delta.go @@ -215,9 +215,8 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove if s.serverlessPluginEnabled { newResourceMap, err = serverlessplugin.MutateIndexedResources(newResourceMap, xdscommon.MakePluginConfiguration(cfgSnap)) - if err != nil { - generator.Logger.Warn("failed to patch xDS resources in the serverless plugin", "err", err) + return status.Errorf(codes.Unavailable, "failed to patch xDS resources in the serverless plugin: %v", err) } } From b0cba2ec0329a8442ff31023e8e2f418daf592a8 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Fri, 1 Apr 2022 10:35:56 -0700 Subject: [PATCH 066/785] mark disable_compat_1.9 to deprecate in 1.13, change default to true (#12675) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> --- .changelog/12675.txt | 3 + agent/agent.go | 4 +- agent/config/builder.go | 2 +- agent/http.go | 3 +- agent/metrics_test.go | 61 +++++++++++++++++++ website/content/docs/agent/options.mdx | 2 +- .../docs/upgrading/upgrade-specific.mdx | 14 ++++- 7 files changed, 82 insertions(+), 7 deletions(-) create mode 100644 .changelog/12675.txt diff --git a/.changelog/12675.txt b/.changelog/12675.txt new file mode 100644 index 000000000..0f46bb5a9 --- /dev/null +++ b/.changelog/12675.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +telemetry: the disable_compat_1.9 option now defaults to true. 1.9 style `consul.http...` metrics can still be enabled by setting `disable_compat_1.9 = false`. However, we will remove these metrics in 1.13. +``` \ No newline at end of file diff --git a/agent/agent.go b/agent/agent.go index fef53c0f2..7a313cb4f 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -698,7 +698,7 @@ func (a *Agent) Start(ctx context.Context) error { // DEPRECATED: Warn users if they're emitting deprecated metrics. Remove this warning and the flagged metrics in a // future release of Consul. if !a.config.Telemetry.DisableCompatOneNine { - a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in a future version of Consul. Set `telemetry { disable_compat_1.9 = true }` to disable them.") + a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.") } if a.tlsConfigurator.Cert() != nil { @@ -3797,7 +3797,7 @@ func (a *Agent) reloadConfig(autoReload bool) error { // DEPRECATED: Warn users on reload if they're emitting deprecated metrics. Remove this warning and the flagged // metrics in a future release of Consul. if !a.config.Telemetry.DisableCompatOneNine { - a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in a future version of Consul. Set `telemetry { disable_compat_1.9 = true }` to disable them.") + a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.") } return a.reloadConfigInternal(newCfg) diff --git a/agent/config/builder.go b/agent/config/builder.go index 1762f3f6d..b3bdc46f3 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -910,7 +910,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) { CirconusCheckTags: stringVal(c.Telemetry.CirconusCheckTags), CirconusSubmissionInterval: stringVal(c.Telemetry.CirconusSubmissionInterval), CirconusSubmissionURL: stringVal(c.Telemetry.CirconusSubmissionURL), - DisableCompatOneNine: boolVal(c.Telemetry.DisableCompatOneNine), + DisableCompatOneNine: boolValWithDefault(c.Telemetry.DisableCompatOneNine, true), DisableHostname: boolVal(c.Telemetry.DisableHostname), DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr), DogstatsdTags: c.Telemetry.DogstatsdTags, diff --git a/agent/http.go b/agent/http.go index ba0067b62..16a3a2150 100644 --- a/agent/http.go +++ b/agent/http.go @@ -228,8 +228,7 @@ func (s *HTTPHandlers) handler(enableDebug bool) http.Handler { labels := []metrics.Label{{Name: "method", Value: req.Method}, {Name: "path", Value: path_label}} metrics.MeasureSinceWithLabels([]string{"api", "http"}, start, labels) - // DEPRECATED Emit pre-1.9 metric as `consul.http...` to maintain backwards compatibility. Enabled by - // default. Users may set `telemetry { disable_compat_1.9 = true }` + // DEPRECATED Emit pre-1.9 metric as `consul.http...`. This will be removed in 1.13. if !s.agent.config.Telemetry.DisableCompatOneNine { key := append([]string{"http", req.Method}, parts...) metrics.MeasureSince(key, start) diff --git a/agent/metrics_test.go b/agent/metrics_test.go index 5bbc7de23..2aedc0180 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -255,6 +255,67 @@ func TestHTTPHandlers_AgentMetrics_ConsulAutopilot_Prometheus(t *testing.T) { }) } +// TestHTTPHandlers_AgentMetrics_Disable1Dot9MetricsChange adds testing around the 1.9 style metrics +// https://www.consul.io/docs/agent/options#telemetry-disable_compat_1.9 +func TestHTTPHandlers_AgentMetrics_Disable1Dot9MetricsChange(t *testing.T) { + skipIfShortTesting(t) + // This test cannot use t.Parallel() since we modify global state, ie the global metrics instance + + // 1.9 style http metrics looked like this: + // agent_http_2_http_GET_v1_agent_members{quantile="0.5"} 0.1329520046710968 + t.Run("check that no consul.http metrics are emitted by default", func(t *testing.T) { + hcl := ` + telemetry = { + prometheus_retention_time = "5s" + disable_hostname = true + metrics_prefix = "agent_http" + } + ` + + a := StartTestAgent(t, TestAgent{HCL: hcl}) + defer a.Shutdown() + + // we have to use the `a.srv.handler()` to actually trigger the wrapped function + uri := fmt.Sprintf("http://%s%s", a.HTTPAddr(), "/v1/agent/members") + req, err := http.NewRequest("GET", uri, nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + handler := a.srv.handler(true) + handler.ServeHTTP(resp, req) + + respRec := httptest.NewRecorder() + recordPromMetrics(t, a, respRec) + + assertMetricNotExists(t, respRec, "agent_http_http_GET_v1_agent_members") + }) + + t.Run("check that we can still turn on consul.http metrics", func(t *testing.T) { + hcl := ` + telemetry = { + prometheus_retention_time = "5s", + disable_compat_1.9 = false + metrics_prefix = "agent_http_2" + } + ` + + a := StartTestAgent(t, TestAgent{HCL: hcl}) + defer a.Shutdown() + + uri := fmt.Sprintf("http://%s%s", a.HTTPAddr(), "/v1/agent/members") + req, err := http.NewRequest("GET", uri, nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + + handler := a.srv.handler(true) + handler.ServeHTTP(resp, req) + + respRec := httptest.NewRecorder() + recordPromMetrics(t, a, respRec) + + assertMetricExists(t, respRec, "agent_http_2_http_GET_v1_agent_members") + }) +} + func TestHTTPHandlers_AgentMetrics_TLSCertExpiry_Prometheus(t *testing.T) { skipIfShortTesting(t) // This test cannot use t.Parallel() since we modify global state, ie the global metrics instance diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/options.mdx index 2b1c1b867..696d8bc88 100644 --- a/website/content/docs/agent/options.mdx +++ b/website/content/docs/agent/options.mdx @@ -2175,7 +2175,7 @@ There are also a number of common configuration options supported by all provide geo location or datacenter, dc:sfo). By default, this is left blank and not used. - `disable_compat_1.9` ((#telemetry-disable_compat_1.9)) - This allows users to disable metrics deprecated in 1.9 so they are no longer emitted, saving on performance and storage in large deployments. Defaults to false. + This allows users to disable metrics deprecated in 1.9 so they are no longer emitted, improving performance and reducing storage in large deployments. As of 1.12 this defaults to `true` and will be removed, along with 1.9 style http metrics in 1.13. - `disable_hostname` ((#telemetry-disable_hostname)) This controls whether or not to prepend runtime telemetry with the machine's diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index e7284dd38..b3fb7477f 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -14,12 +14,24 @@ provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. +## Consul 1.12.0 + +### 1.9 Telemetry Compatibility + +#### Changing the default behavior for option + +The [`disable_compat_19`](/docs/agent/options#telemetry-disable_compat_1.9) telemetry configuration option now defaults +to `true`. In prior Consul versions (1.10.x through 1.11.x), the config defaulted to `false`. If you require 1.9 style +`consul.http...` metrics, you may enable them by setting the flag to `false`. However, be advised that these metrics, as +well as the flag will be removed in upcoming Consul 1.13. We recommend changing your instrumentation to use 1.10 and later +style `consul.api.http...` metrics and removing the configuration flag from your setup. + ## Consul 1.11.0 ### 1.10 Compatibility Consul Enterprise versions 1.10.0 through 1.10.4 contain a latent bug that causes those client or server agents to deregister their own services or health -checks when some of the servers have been upgraded to 1.11. Before upgrading Consul Enterprise servers to 1.11, all Consul agents should first +checks when some of the servers have been upgraded to 1.11. Before upgrading Consul Enterprise servers to 1.11, all Consul agents should first be upgraded to 1.10.7 or higher to ensure forward compatibility and prevent flapping of catalog registrations. From d099eca725023e2dd6341d28e91845668be87e7f Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 4 Apr 2022 09:45:03 +0100 Subject: [PATCH 067/785] ui: Initial Server Status Overview Page (#12599) --- ui/packages/consul-ui/app/abilities/raft.js | 6 - ui/packages/consul-ui/app/abilities/zone.js | 10 + .../components/consul/server/card/layout.scss | 2 +- .../components/consul/server/list/index.hbs | 8 +- .../consul-ui/app/components/tile/index.scss | 2 +- ui/packages/consul-ui/app/models/dc.js | 3 + .../consul-ui/app/services/repository/dc.js | 63 ++++- .../base/decoration/base-placeholders.scss | 14 +- .../base/decoration/base-variables.scss | 1 + ui/packages/consul-ui/app/styles/routes.scss | 1 + .../routes/dc/overview/serverstatus.scss | 135 ++++++++++ .../consul-ui/app/templates/application.hbs | 3 + .../consul-ui/app/templates/dc/show.hbs | 22 +- .../consul-ui/app/templates/dc/show/index.hbs | 6 + .../app/templates/dc/show/serverstatus.hbs | 240 ++++++++++++++++++ .../mock-api/v1/operator/autopilot/state | 29 ++- .../consul-ui/tests/unit/abilities/-test.js | 3 + .../consul-ui/translations/common/en-us.yaml | 4 + .../consul-ui/translations/routes/en-us.yaml | 12 +- .../consul-ui/vendor/consul-ui/routes.js | 8 +- 20 files changed, 525 insertions(+), 47 deletions(-) delete mode 100644 ui/packages/consul-ui/app/abilities/raft.js create mode 100644 ui/packages/consul-ui/app/abilities/zone.js create mode 100644 ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss create mode 100644 ui/packages/consul-ui/app/templates/dc/show/index.hbs create mode 100644 ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs diff --git a/ui/packages/consul-ui/app/abilities/raft.js b/ui/packages/consul-ui/app/abilities/raft.js deleted file mode 100644 index 169619ad8..000000000 --- a/ui/packages/consul-ui/app/abilities/raft.js +++ /dev/null @@ -1,6 +0,0 @@ -import BaseAbility from './base'; - -export default class RaftAbility extends BaseAbility { - resource = 'operator'; - segmented = false; -} diff --git a/ui/packages/consul-ui/app/abilities/zone.js b/ui/packages/consul-ui/app/abilities/zone.js new file mode 100644 index 000000000..a976bd9e6 --- /dev/null +++ b/ui/packages/consul-ui/app/abilities/zone.js @@ -0,0 +1,10 @@ +import BaseAbility from './base'; +import { inject as service } from '@ember/service'; + +export default class ZoneAbility extends BaseAbility { + @service('env') env; + + get canRead() { + return this.env.var('CONSUL_NSPACES_ENABLED'); + } +} diff --git a/ui/packages/consul-ui/app/components/consul/server/card/layout.scss b/ui/packages/consul-ui/app/components/consul/server/card/layout.scss index 8ca116a8f..a1c679bd4 100644 --- a/ui/packages/consul-ui/app/components/consul/server/card/layout.scss +++ b/ui/packages/consul-ui/app/components/consul/server/card/layout.scss @@ -16,7 +16,7 @@ margin-bottom: calc(var(--padding-y) / 2); } %consul-server-card.voting-status-leader dd { - margin-left: calc(var(--tile-size) + var(--padding-x)); + margin-left: calc(var(--tile-size) + 1rem); /* 16px */ } diff --git a/ui/packages/consul-ui/app/components/consul/server/list/index.hbs b/ui/packages/consul-ui/app/components/consul/server/list/index.hbs index 4f9d1b50a..2144fdd9f 100644 --- a/ui/packages/consul-ui/app/components/consul/server/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/server/list/index.hbs @@ -7,9 +7,11 @@
    {{#each @items as |item|}}
  • - + + +
  • {{/each}}
diff --git a/ui/packages/consul-ui/app/components/tile/index.scss b/ui/packages/consul-ui/app/components/tile/index.scss index 7754657eb..55ceb7b40 100644 --- a/ui/packages/consul-ui/app/components/tile/index.scss +++ b/ui/packages/consul-ui/app/components/tile/index.scss @@ -30,7 +30,7 @@ border-color: rgb(var(--tone-gray-999) / 10%); } %with-leader-tile::after { - --icon-name: icon-star-circle; + --icon-name: icon-star-fill; --icon-size: icon-700; color: rgb(var(--strawberry-500)); } diff --git a/ui/packages/consul-ui/app/models/dc.js b/ui/packages/consul-ui/app/models/dc.js index da0e06551..48250722b 100644 --- a/ui/packages/consul-ui/app/models/dc.js +++ b/ui/packages/consul-ui/app/models/dc.js @@ -14,6 +14,9 @@ export default class Datacenter extends Model { @attr('string') Leader; @attr() Voters; // [] @attr() Servers; // [] the API uses {} but we reshape that on the frontend + @attr() RedundancyZones; + @attr() Default; // added by the frontend, {Servers: []} any server that isn't in a zone + @attr() ReadReplicas; // @attr('boolean') Local; @attr('boolean') Primary; diff --git a/ui/packages/consul-ui/app/services/repository/dc.js b/ui/packages/consul-ui/app/services/repository/dc.js index 517c932ba..f47dfc683 100644 --- a/ui/packages/consul-ui/app/services/repository/dc.js +++ b/ui/packages/consul-ui/app/services/repository/dc.js @@ -108,21 +108,56 @@ export default class DcService extends RepositoryService { GET /v1/operator/autopilot/state?${{ dc }} X-Request-ID: ${uri} `)( - (headers, body, cache) => ({ - meta: { - version: 2, - uri: uri, - interval: 30 * SECONDS - }, - body: cache( - { - ...body, - // turn servers into an array instead of a map/object - Servers: Object.values(body.Servers) + (headers, body, cache) => { + // turn servers into an array instead of a map/object + const servers = Object.values(body.Servers); + const grouped = []; + return { + meta: { + version: 2, + uri: uri, }, - uri => uri`${MODEL_NAME}:///${''}/${''}/${dc}/datacenter` - ) - }) + body: cache( + { + ...body, + // all servers + Servers: servers, + RedundancyZones: Object.entries(body.RedundancyZones || {}).map(([key, value]) => { + const zone = { + ...value, + Name: key, + Healthy: true, + // convert the string[] to Server[] + Servers: value.Servers.reduce((prev, item) => { + const server = body.Servers[item]; + // TODO: It is not currently clear whether we should be + // taking ReadReplicas out of the RedundancyZones when we + // encounter one in a Zone once this is cleared up either + // way we can either remove this comment or make any + // necessary amends here + if(!server.ReadReplica) { + // keep a record of things + grouped.push(server.ID); + prev.push(server); + } + return prev; + }, []), + } + return zone; + }), + ReadReplicas: (body.ReadReplicas || []).map(item => { + // keep a record of things + grouped.push(item); + return body.Servers[item]; + }), + Default: { + Servers: servers.filter(item => !grouped.includes(item.ID)) + } + }, + uri => uri`${MODEL_NAME}:///${''}/${''}/${dc}/datacenter` + ) + } + } ); } diff --git a/ui/packages/consul-ui/app/styles/base/decoration/base-placeholders.scss b/ui/packages/consul-ui/app/styles/base/decoration/base-placeholders.scss index 095755d00..8e64f496b 100644 --- a/ui/packages/consul-ui/app/styles/base/decoration/base-placeholders.scss +++ b/ui/packages/consul-ui/app/styles/base/decoration/base-placeholders.scss @@ -10,13 +10,13 @@ } %visually-unhidden, %unvisually-hidden { - position: static; - clip: unset; - overflow: visible; - width: auto; - height: auto; - margin: 0; - padding: 0; + position: static !important; + clip: unset !important; + overflow: visible !important; + width: auto !important; + height: auto !important; + margin: 0 !important; + padding: 0 !important; } %visually-hidden-text { text-indent: -9000px; diff --git a/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss b/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss index e97f8df12..d2ee83711 100644 --- a/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss +++ b/ui/packages/consul-ui/app/styles/base/decoration/base-variables.scss @@ -15,6 +15,7 @@ --decor-border-400: 4px solid; /* box-shadowing*/ + --decor-elevation-000: none; --decor-elevation-100: 0 3px 2px rgb(var(--black) / 6%); --decor-elevation-200: 0 2px 4px rgb(var(--black) / 10%); --decor-elevation-300: 0 5px 1px -2px rgb(var(--black) / 12%); diff --git a/ui/packages/consul-ui/app/styles/routes.scss b/ui/packages/consul-ui/app/styles/routes.scss index 34f12616b..238790812 100644 --- a/ui/packages/consul-ui/app/styles/routes.scss +++ b/ui/packages/consul-ui/app/styles/routes.scss @@ -3,3 +3,4 @@ @import 'routes/dc/kv/index'; @import 'routes/dc/acls/index'; @import 'routes/dc/intentions/index'; +@import 'routes/dc/overview/serverstatus'; diff --git a/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss new file mode 100644 index 000000000..0e035f8a2 --- /dev/null +++ b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss @@ -0,0 +1,135 @@ +section[data-route='dc.show.serverstatus'] { + @extend %serverstatus-route; +} +%serverstatus-route .server-failure-tolerance { + @extend %server-failure-tolerance; +} +%serverstatus-route .redundancy-zones { + @extend %redundancy-zones; +} +%redundancy-zones section { + @extend %redundancy-zone; +} + +/**/ + +%serverstatus-route h2, +%serverstatus-route h3 { + @extend %h200; +} + +%server-failure-tolerance { + @extend %panel; + box-shadow: var(--decor-elevation-000); + padding: var(--padding-y) var(--padding-x); + width: 770px; + display: flex; + flex-wrap: wrap; +} +%server-failure-tolerance > header { + width: 100%; + padding-bottom: 0.500rem; /* 8px */ + margin-bottom: 1rem; /* 16px */ + border-bottom: var(--decor-border-100); + border-color: rgb(var(--tone-border)); +} +%server-failure-tolerance header em { + @extend %pill-200; + font-size: 0.812rem; /* 13px */ + background-color: rgb(var(--tone-gray-200)); + + text-transform: uppercase; + font-style: normal; + +} +%server-failure-tolerance > section { + width: 50%; +} +%server-failure-tolerance > section, +%server-failure-tolerance dl { + display: flex; + flex-direction: column; +} +%server-failure-tolerance dl { + flex-grow: 1; + justify-content: space-between; +} +%server-failure-tolerance dd { + display: flex; + align-items: center; +} +%server-failure-tolerance dl.warning dd::before { + --icon-name: icon-alert-circle; + --icon-resolution: .5; + --icon-size: icon-800; + --icon-color: rgb(var(--tone-orange-400)); + content: ''; + margin-right: 0.500rem; /* 8px */ +} +%server-failure-tolerance section:first-of-type dl { + padding-right: 1.500rem; /* 24px */ +} +%server-failure-tolerance dt { + @extend %p2; + color: rgb(var(--tone-gray-700)); +} +%server-failure-tolerance dd { + font-size: var(--typo-size-250); + color: rgb(var(--tone-gray-999)); +} +%server-failure-tolerance header span::before { + --icon-name: icon-info; + --icon-size: icon-300; + --icon-color: rgb(var(--tone-gray-500)); + vertical-align: unset; + content: ''; +} + +%serverstatus-route section:not([class*='-tolerance']) h2 { + margin-top: 1.5rem; /* 24px */ + margin-bottom: 1.5rem; /* 24px */ +} +%serverstatus-route section:not([class*='-tolerance']) header { + margin-top: 18px; + margin-bottom: 18px; +} + + +%redundancy-zones h3 { + @extend %h300; +} +%redundancy-zone header { + display: flow-root; +} +%redundancy-zone header h3 { + float: left; + margin-right: 0.5rem; /* 8px */ +} + +%redundancy-zone header dl { + @extend %horizontal-kv-list; + @extend %pill-500; +} +%redundancy-zone header dt { + @extend %visually-unhidden; +} +%redundancy-zone header dl:not(.warning) { + background-color: rgb(var(--tone-gray-100)); +} +%redundancy-zone header dl.warning { + background-color: rgb(var(--tone-orange-100)); + color: rgb(var(--tone-orange-800)); +} +%redundancy-zone header dl.warning::before { + --icon-name: icon-alert-circle; + --icon-size: icon-000; + margin-right: 0.312rem; /* 5px */ + content: ''; +} +%redundancy-zone header dt::after { + content: ':'; + display: inline-block; + vertical-align: revert; + background-color: var(--transparent); +} + diff --git a/ui/packages/consul-ui/app/templates/application.hbs b/ui/packages/consul-ui/app/templates/application.hbs index d84c79057..d1c8a3a23 100644 --- a/ui/packages/consul-ui/app/templates/application.hbs +++ b/ui/packages/consul-ui/app/templates/application.hbs @@ -47,6 +47,9 @@ as |source|> {{! redirect if we aren't on a URL with dc information }} {{#if (eq route.currentName 'index')}} +{{! until we get to the dc route we don't know any permissions }} +{{! as we don't know the dc, any inital permission based }} +{{! redirects are in the dc.show route}} {{did-insert (route-action 'replaceWith' 'dc.show' (hash dc=(env 'CONSUL_DATACENTER_LOCAL') diff --git a/ui/packages/consul-ui/app/templates/dc/show.hbs b/ui/packages/consul-ui/app/templates/dc/show.hbs index 090152e1e..6089703a7 100644 --- a/ui/packages/consul-ui/app/templates/dc/show.hbs +++ b/ui/packages/consul-ui/app/templates/dc/show.hbs @@ -9,7 +9,8 @@ as |route|> - + +{{#if false}} href=(href-to "dc.show.serverstatus") selected=(is-href "dc.show.serverstatus") ) +(if false (hash - label=(compute (fn route.t 'health.title')) - href=(href-to 'dc.show.health') - selected=(is-href 'dc.show.health') + label=(compute (fn route.t 'cataloghealth.title')) + href=(href-to 'dc.show.cataloghealth') + selected=(is-href 'dc.show.cataloghealth') ) -(if (and (can 'read license') (not (is 'hcp'))) +'') +(if (can 'read license') (hash label=(compute (fn route.t 'license.title')) href=(href-to 'dc.show.license') @@ -32,6 +35,15 @@ as |route|> ) '') }}/> +{{/if}} + + + + {{outlet}} + diff --git a/ui/packages/consul-ui/app/templates/dc/show/index.hbs b/ui/packages/consul-ui/app/templates/dc/show/index.hbs new file mode 100644 index 000000000..96d39860a --- /dev/null +++ b/ui/packages/consul-ui/app/templates/dc/show/index.hbs @@ -0,0 +1,6 @@ + + {{did-insert (route-action 'replaceWith' (if (can 'access overview') 'dc.show.serverstatus' 'dc.services.index'))}} + + diff --git a/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs new file mode 100644 index 000000000..211289c72 --- /dev/null +++ b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs @@ -0,0 +1,240 @@ + + + +{{#let + loader.data +as |item|}} + + + + + + {{#if (eq loader.error.status "404")}} + + + Warning! + + +

+ This service has been deregistered and no longer exists in the catalog. +

+
+
+ {{else if (eq loader.error.status "403")}} + + + Error! + + +

+ You no longer have access to this service +

+
+
+ {{else}} + + + Warning! + + +

+ An error was returned whilst loading this data, refresh to try again. +

+
+
+ {{/if}} +
+ + +
+ +
+ +
+

+ {{compute (fn route.t 'tolerance.header')}} +

+
+ +
+
+

+ {{compute (fn route.t 'tolerance.immediate.header')}} +

+
+
+
+ {{compute (fn route.t 'tolerance.immediate.body')}} +
+
+ {{item.FailureTolerance}} +
+
+
+ +
+
+

+ {{compute (fn route.t 'tolerance.optimistic.header')}} + {{#if (not (can 'read zones'))}} + + {{t 'common.ui.enterprisefeature'}} + + {{/if}} + 30 seconds between server failures, Consul can restore the Immediate Fault Tolerance by replacing failed active voters with healthy back-up voters when using redundancy zones.'}} + > + +

+
+
+
+ {{compute (fn route.t 'tolerance.optimistic.body')}} +
+
+ {{item.OptimisticFailureTolerance}} +
+
+ +
+ +
+ + {{#if (gt item.RedundancyZones.length 0)}} +
+
+

+ {{pluralize (t 'common.consul.redundancyzone')}} +

+
+ + {{#each item.RedundancyZones as |item|}} + {{#if (gt item.Servers.length 0) }} +
+
+

+ {{item.Name}} +

+
+
{{t 'common.consul.failuretolerance'}}
+
{{item.FailureTolerance}}
+
+
+ +
+ {{/if}} + {{/each}} + + {{#if (gt item.Default.Servers.length 0)}} +
+
+

+ {{compute (fn route.t 'unassigned')}} +

+
+ +
+ {{/if}} + +
+ {{else}} +
+
+

+ {{compute (fn route.t 'servers')}} +

+
+ +
+ {{/if}} + + {{#if (gt item.ReadReplicas.length 0)}} +
+
+

+ {{pluralize (t 'common.consul.readreplica')}} +

+
+ + +
+ {{/if}} + +
+
+{{/let}} +
+
+ diff --git a/ui/packages/consul-ui/mock-api/v1/operator/autopilot/state b/ui/packages/consul-ui/mock-api/v1/operator/autopilot/state index e3dbc500a..1cd442d09 100644 --- a/ui/packages/consul-ui/mock-api/v1/operator/autopilot/state +++ b/ui/packages/consul-ui/mock-api/v1/operator/autopilot/state @@ -1,7 +1,8 @@ ${[0].map(_ => { - const servers = range(env('CONSUL_SERVER_COUNT', 3)).map(_ => fake.random.uuid()); + const zones = range(env('CONSUL_ZONE_COUNT', 3)).map(_ => fake.hacker.noun()); + const servers = range(env('CONSUL_SERVER_COUNT', 15)).map(_ => fake.random.uuid()); const failureTolerance = Math.ceil(servers.length / 2); - const optimisticTolerance = failureTolerance; // <== same for now + const optimisticTolerance = 0; const leader = fake.random.number({min: 0, max: servers.length - 1}); return ` { @@ -18,10 +19,10 @@ ${[0].map(_ => { "LastContact": "0s", "LastTerm": 2, "LastIndex": 91, - "Healthy": true, + "Healthy": ${fake.random.boolean()}, "StableSince": "2022-02-02T11:59:01.0708146Z", "ReadReplica": false, - "Status": "${i === leader ? `leader` : `voter`}", + "Status": "${i === leader ? `leader` : fake.helpers.randomize(['non-voter', 'voter', 'staging'])}", "Meta": { "consul-network-segment": "" }, @@ -30,8 +31,26 @@ ${[0].map(_ => { `)}}, "Leader": "${servers[leader]}", "Voters": [ +${servers.map(item => `"${item}"`)} + ], +${ env('CONSUL_ZONES_ENABLE', false) ? ` + "RedundancyZones": {${zones.map((item, i) => ` + "${item}": { + "Servers": [ +${servers.map(item => `"${item}"`)} + ], + "Voters": [ +${servers.map(item => `"${item}"`)} + ], + "FailureTolerance": ${i} + } + `)} + }, + "ReadReplicas": [ ${servers.map(item => `"${item}"`)} - ] + ], +` : ``} + "Upgrade": {} } `; })} diff --git a/ui/packages/consul-ui/tests/unit/abilities/-test.js b/ui/packages/consul-ui/tests/unit/abilities/-test.js index e3ac70d9f..3ac3cf072 100644 --- a/ui/packages/consul-ui/tests/unit/abilities/-test.js +++ b/ui/packages/consul-ui/tests/unit/abilities/-test.js @@ -52,6 +52,9 @@ module('Unit | Ability | *', function(hooks) { // TODO: We currently hardcode KVs to always be true assert.equal(true, ability[`can${perm}`], `Expected ${item}.can${perm} to be true`); return; + case 'zone': + // Zone permissions depend on NSPACES_ENABLED + return; } assert.equal( bool, diff --git a/ui/packages/consul-ui/translations/common/en-us.yaml b/ui/packages/consul-ui/translations/common/en-us.yaml index e5700effe..5365996ab 100644 --- a/ui/packages/consul-ui/translations/common/en-us.yaml +++ b/ui/packages/consul-ui/translations/common/en-us.yaml @@ -14,6 +14,7 @@ ui: name: Name creation: Creation maxttl: Max TTL + enterprisefeature: Enterprise feature consul: name: Name passing: Passing @@ -41,6 +42,9 @@ consul: destinationname: Destination Name sourcename: Source Name displayname: Display Name + failuretolerance: Fault tolerance + readreplica: Read replica + redundancyzone: Redundancy zone search: search: Search searchproperty: Search Across diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index 82c60e4a8..59dd94ff1 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -3,10 +3,20 @@ dc: title: Cluster Overview serverstatus: title: Server status - health: + unassigned: Unassigned Zones + tolerance: + header: Server fault tolerance + immediate: + header: Immediate + body: the number of healthy active voting servers that can fail at once without causing an outage + optimistic: + header: Optimistic + body: the number of healthy active and back-up voting servers that can fail gradually without causing an outage + cataloghealth: title: Health license: title: License + nodes: show: healthchecks: diff --git a/ui/packages/consul-ui/vendor/consul-ui/routes.js b/ui/packages/consul-ui/vendor/consul-ui/routes.js index c79057c3b..99b3cfda5 100644 --- a/ui/packages/consul-ui/vendor/consul-ui/routes.js +++ b/ui/packages/consul-ui/vendor/consul-ui/routes.js @@ -13,18 +13,17 @@ show: { _options: { path: '/overview', - redirect: './serverstatus', abilities: ['access overview'] }, serverstatus: { _options: { path: '/server-status', - abilities: ['access overview', 'read raft'] + abilities: ['access overview', 'read zones'] }, }, - health: { + cataloghealth: { _options: { - path: '/health', + path: '/catalog-health', abilities: ['access overview'] }, }, @@ -417,6 +416,7 @@ }, index: { _options: { path: '/' }, + // root index redirects are currently dealt with in application.hbs }, settings: { _options: { From cdcb249449bb40f6a80d6299f41bd433a37f8fd3 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Mon, 4 Apr 2022 11:31:39 -0400 Subject: [PATCH 068/785] add a rate limiter to config auto-reload (#12490) * add config watcher to the config package * add logging to watcher * add test and refactor to add WatcherEvent. * add all API calls and fix a bug with recreated files * add tests for watcher * remove the unnecessary use of context * Add debug log and a test for file rename * use inode to detect if the file is recreated/replaced and only listen to create events. * tidy ups (#1535) * tidy ups * Add tests for inode reconcile * fix linux vs windows syscall * fix linux vs windows syscall * fix windows compile error * increase timeout * use ctime ID * remove remove/creation test as it's a use case that fail in linux * fix linux/windows to use Ino/CreationTime * fix the watcher to only overwrite current file id * fix linter error * fix remove/create test * set reconcile loop to 200 Milliseconds * fix watcher to not trigger event on remove, add more tests * on a remove event try to add the file back to the watcher and trigger the handler if success * fix race condition * fix flaky test * fix race conditions * set level to info * fix when file is removed and get an event for it after * fix to trigger handler when we get a remove but re-add fail * fix error message * add tests for directory watch and fixes * detect if a file is a symlink and return an error on Add * rename Watcher to FileWatcher and remove symlink deref * add fsnotify@v1.5.1 * fix go mod * do not reset timer on errors, rename OS specific files * rename New func * events trigger on write and rename * add missing test * fix flaking tests * fix flaky test * check reconcile when removed * delete invalid file * fix test to create files with different mod time. * back date file instead of sleeping * add watching file in agent command. * fix watcher call to use new API * add configuration and stop watcher when server stop * add certs as watched files * move FileWatcher to the agent start instead of the command code * stop watcher before replacing it * save watched files in agent * add add and remove interfaces to the file watcher * fix remove to not return an error * use `Add` and `Remove` to update certs files * fix tests * close events channel on the file watcher even when the context is done * extract `NotAutoReloadableRuntimeConfig` is a separate struct * fix linter errors * add Ca configs and outgoing verify to the not auto reloadable config * add some logs and fix to use background context * add tests to auto-config reload * remove stale test * add tests to changes to config files * add check to see if old cert files still trigger updates * rename `NotAutoReloadableRuntimeConfig` to `StaticRuntimeConfig` * fix to re add both key and cert file. Add test to cover this case. * review suggestion Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * add check to static runtime config changes * fix test * add changelog file * fix review comments * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * update flag description Co-authored-by: FFMMM * fix compilation error * add static runtime config support * fix test * fix review comments * fix log test * Update .changelog/12329.txt Co-authored-by: Dan Upton * transfer tests to runtime_test.go * fix filewatcher Replace to not deadlock. * avoid having lingering locks Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * split ReloadConfig func * fix warning message Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * convert `FileWatcher` into an interface * fix compilation errors * fix tests * extract func for adding and removing files * add a coalesceTimer with a very small timer * extract coaelsce Timer and add a shim for testing * add tests to coalesceTimer fix to send remaining events * set `coalesceTimer` to 1 Second * support symlink, fix a nil deref. * fix compile error * fix compile error * refactor file watcher rate limiting to be a Watcher implementation * fix linter issue * fix runtime config * fix runtime test * fix flaky tests * fix compile error * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * fix agent New to return an error if File watcher New return an error * quit timer loop if ctx is canceled * Apply suggestions from code review Co-authored-by: Chris S. Kim Co-authored-by: Ashwin Venkatesh Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: FFMMM Co-authored-by: Daniel Upton Co-authored-by: Chris S. Kim --- agent/agent.go | 48 +++--- agent/agent_test.go | 155 ++++++++++++++++-- agent/config/builder.go | 117 ++++++------- agent/config/file_watcher.go | 6 +- agent/config/file_watcher_test.go | 21 ++- agent/config/ratelimited_file_watcher.go | 90 ++++++++++ agent/config/ratelimited_file_watcher_test.go | 91 ++++++++++ agent/config/runtime.go | 3 + agent/config/runtime_test.go | 3 +- .../TestRuntimeConfig_Sanitize.golden | 3 +- agent/testagent.go | 3 + 11 files changed, 437 insertions(+), 103 deletions(-) create mode 100644 agent/config/ratelimited_file_watcher.go create mode 100644 agent/config/ratelimited_file_watcher_test.go diff --git a/agent/agent.go b/agent/agent.go index 7a313cb4f..91d42cb73 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -361,9 +361,9 @@ type Agent struct { // run by the Agent routineManager *routine.Manager - // FileWatcher is the watcher responsible to report events when a config file + // configFileWatcher is the watcher responsible to report events when a config file // changed - FileWatcher config.Watcher + configFileWatcher config.Watcher // xdsServer serves the XDS protocol for configuring Envoy proxies. xdsServer *xds.Server @@ -462,6 +462,13 @@ func New(bd BaseDeps) (*Agent, error) { a.baseDeps.WatchedFiles = append(a.baseDeps.WatchedFiles, f.Cfg.CertFile) } } + if a.baseDeps.RuntimeConfig.AutoReloadConfig && len(a.baseDeps.WatchedFiles) > 0 { + w, err := config.NewRateLimitedFileWatcher(a.baseDeps.WatchedFiles, a.baseDeps.Logger, a.baseDeps.RuntimeConfig.AutoReloadConfigCoalesceInterval) + if err != nil { + return nil, err + } + a.configFileWatcher = w + } return &a, nil } @@ -713,25 +720,20 @@ func (a *Agent) Start(ctx context.Context) error { }) // start a go routine to reload config based on file watcher events - if a.baseDeps.RuntimeConfig.AutoReloadConfig && len(a.baseDeps.WatchedFiles) > 0 { - w, err := config.NewFileWatcher(a.baseDeps.WatchedFiles, a.baseDeps.Logger) - if err != nil { - a.baseDeps.Logger.Error("error loading config", "error", err) - } else { - a.FileWatcher = w - a.baseDeps.Logger.Debug("starting file watcher") - a.FileWatcher.Start(context.Background()) - go func() { - for event := range a.FileWatcher.EventsCh() { - a.baseDeps.Logger.Debug("auto-reload config triggered", "event-file", event.Filename) - err := a.AutoReloadConfig() - if err != nil { - a.baseDeps.Logger.Error("error loading config", "error", err) - } + if a.configFileWatcher != nil { + a.baseDeps.Logger.Debug("starting file watcher") + a.configFileWatcher.Start(context.Background()) + go func() { + for event := range a.configFileWatcher.EventsCh() { + a.baseDeps.Logger.Debug("auto-reload config triggered", "num-events", len(event.Filenames)) + err := a.AutoReloadConfig() + if err != nil { + a.baseDeps.Logger.Error("error loading config", "error", err) } - }() - } + } + }() } + return nil } @@ -1413,8 +1415,8 @@ func (a *Agent) ShutdownAgent() error { a.stopAllWatches() // Stop config file watcher - if a.FileWatcher != nil { - a.FileWatcher.Stop() + if a.configFileWatcher != nil { + a.configFileWatcher.Stop() } a.stopLicenseManager() @@ -3772,13 +3774,13 @@ func (a *Agent) reloadConfig(autoReload bool) error { {a.config.TLS.HTTPS, newCfg.TLS.HTTPS}, } { if f.oldCfg.KeyFile != f.newCfg.KeyFile { - a.FileWatcher.Replace(f.oldCfg.KeyFile, f.newCfg.KeyFile) + a.configFileWatcher.Replace(f.oldCfg.KeyFile, f.newCfg.KeyFile) if err != nil { return err } } if f.oldCfg.CertFile != f.newCfg.CertFile { - a.FileWatcher.Replace(f.oldCfg.CertFile, f.newCfg.CertFile) + a.configFileWatcher.Replace(f.oldCfg.CertFile, f.newCfg.CertFile) if err != nil { return err } diff --git a/agent/agent_test.go b/agent/agent_test.go index 43d9bd31d..ba82f127f 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -5545,7 +5545,8 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) - cert1 := srv.tlsConfigurator.Cert() + cert1Pub := srv.tlsConfigurator.Cert().Certificate + cert1Key := srv.tlsConfigurator.Cert().PrivateKey certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{ Signer: signer, @@ -5575,8 +5576,10 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { // cert should not change as we did not update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.Equal(r, cert1Pub, cert.Certificate) + require.Equal(r, cert1Key, cert.PrivateKey) }) require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) @@ -5584,8 +5587,8 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { // cert should change as we did not update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + require.NotEqual(r, cert1Pub, srv.tlsConfigurator.Cert().Certificate) + require.NotEqual(r, cert1Key, srv.tlsConfigurator.Cert().PrivateKey) }) } @@ -5647,11 +5650,13 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { `), 0600)) srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}}) + defer srv.Shutdown() testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) - cert1 := srv.tlsConfigurator.Cert() + cert1Pub := srv.tlsConfigurator.Cert().Certificate + cert1Key := srv.tlsConfigurator.Cert().PrivateKey certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{ Signer: signer, @@ -5667,8 +5672,10 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { // cert should not change as we did not update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.Equal(r, cert1Pub, cert.Certificate) + require.Equal(r, cert1Key, cert.PrivateKey) }) require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) @@ -5689,10 +5696,13 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { // cert should change as we did not update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.NotEqual(r, cert1Key, cert.Certificate) + require.NotEqual(r, cert1Key, cert.PrivateKey) }) - cert2 := srv.tlsConfigurator.Cert() + cert2Pub := srv.tlsConfigurator.Cert().Certificate + cert2Key := srv.tlsConfigurator.Cert().PrivateKey certNew2, privateKeyNew2, err := tlsutil.GenerateCert(tlsutil.CertOpts{ Signer: signer, @@ -5707,8 +5717,10 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { // cert should not change as we did not update the associated cert time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.Equal(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.Equal(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.Equal(r, cert2Pub, cert.Certificate) + require.Equal(r, cert2Key, cert.PrivateKey) }) require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew2), 0600)) @@ -5716,7 +5728,120 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { // cert should change as we did update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { - require.NotEqual(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate) - require.NotEqual(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey) + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.NotEqual(r, cert2Pub, cert.Certificate) + require.NotEqual(r, cert2Key, cert.PrivateKey) }) } + +func Test_coalesceTimerTwoPeriods(t *testing.T) { + + certsDir := testutil.TempDir(t, "auto-config") + + // write some test TLS certificates out to the cfg dir + serverName := "server.dc1.consul" + signer, _, err := tlsutil.GeneratePrivateKey() + require.NoError(t, err) + + ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + require.NoError(t, err) + + cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + + certFile := filepath.Join(certsDir, "cert.pem") + caFile := filepath.Join(certsDir, "cacert.pem") + keyFile := filepath.Join(certsDir, "key.pem") + + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) + + // generate a gossip key + gossipKey := make([]byte, 32) + n, err := rand.Read(gossipKey) + require.NoError(t, err) + require.Equal(t, 32, n) + gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey) + + hclConfig := TestACLConfigWithParams(nil) + + configFile := testutil.TempDir(t, "config") + "/config.hcl" + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFile+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + coalesceInterval := 100 * time.Millisecond + testAgent := TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}, Config: &config.RuntimeConfig{ + AutoReloadConfigCoalesceInterval: coalesceInterval, + }} + srv := StartTestAgent(t, testAgent) + defer srv.Shutdown() + + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + cert1Pub := srv.tlsConfigurator.Cert().Certificate + cert1Key := srv.tlsConfigurator.Cert().PrivateKey + + certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{ + Signer: signer, + CA: ca, + Name: "Test Cert Name", + Days: 365, + DNSNames: []string{serverName}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }) + require.NoError(t, err) + certFileNew := filepath.Join(certsDir, "cert_new.pem") + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` + encrypt = "`+gossipKeyEncoded+`" + encrypt_verify_incoming = true + encrypt_verify_outgoing = true + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + ca_file = "`+caFile+`" + cert_file = "`+certFileNew+`" + key_file = "`+keyFile+`" + connect { enabled = true } + auto_reload_config = true + `), 0600)) + + // cert should not change as we did not update the associated key + time.Sleep(coalesceInterval * 2) + retry.Run(t, func(r *retry.R) { + cert := srv.tlsConfigurator.Cert() + require.NotNil(r, cert) + require.Equal(r, cert1Pub, cert.Certificate) + require.Equal(r, cert1Key, cert.PrivateKey) + }) + + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + + // cert should change as we did not update the associated key + time.Sleep(coalesceInterval * 2) + retry.Run(t, func(r *retry.R) { + require.NotEqual(r, cert1Pub, srv.tlsConfigurator.Cert().Certificate) + require.NotEqual(r, cert1Key, srv.tlsConfigurator.Cert().PrivateKey) + }) + +} diff --git a/agent/config/builder.go b/agent/config/builder.go index b3bdc46f3..d8a4ac042 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1004,64 +1004,65 @@ func (b *builder) build() (rt RuntimeConfig, err error) { LogRotateBytes: intVal(c.LogRotateBytes), LogRotateMaxFiles: intVal(c.LogRotateMaxFiles), }, - MaxQueryTime: b.durationVal("max_query_time", c.MaxQueryTime), - NodeID: types.NodeID(stringVal(c.NodeID)), - NodeMeta: c.NodeMeta, - NodeName: b.nodeName(c.NodeName), - ReadReplica: boolVal(c.ReadReplica), - PidFile: stringVal(c.PidFile), - PrimaryDatacenter: primaryDatacenter, - PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways), - PrimaryGatewaysInterval: b.durationVal("primary_gateways_interval", c.PrimaryGatewaysInterval), - RPCAdvertiseAddr: rpcAdvertiseAddr, - RPCBindAddr: rpcBindAddr, - RPCHandshakeTimeout: b.durationVal("limits.rpc_handshake_timeout", c.Limits.RPCHandshakeTimeout), - RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout), - RPCMaxBurst: intVal(c.Limits.RPCMaxBurst), - RPCMaxConnsPerClient: intVal(c.Limits.RPCMaxConnsPerClient), - RPCProtocol: intVal(c.RPCProtocol), - RPCRateLimit: rate.Limit(float64Val(c.Limits.RPCRate)), - RPCConfig: consul.RPCConfig{EnableStreaming: boolValWithDefault(c.RPC.EnableStreaming, serverMode)}, - RaftProtocol: intVal(c.RaftProtocol), - RaftSnapshotThreshold: intVal(c.RaftSnapshotThreshold), - RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval), - RaftTrailingLogs: intVal(c.RaftTrailingLogs), - ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN), - ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN), - RejoinAfterLeave: boolVal(c.RejoinAfterLeave), - RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN), - RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN), - RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN), - RetryJoinMaxAttemptsLAN: intVal(c.RetryJoinMaxAttemptsLAN), - RetryJoinMaxAttemptsWAN: intVal(c.RetryJoinMaxAttemptsWAN), - RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN), - SegmentName: stringVal(c.SegmentName), - Segments: segments, - SegmentLimit: intVal(c.SegmentLimit), - SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN, - SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN, - SerfAllowedCIDRsLAN: serfAllowedCIDRSLAN, - SerfAllowedCIDRsWAN: serfAllowedCIDRSWAN, - SerfBindAddrLAN: serfBindAddrLAN, - SerfBindAddrWAN: serfBindAddrWAN, - SerfPortLAN: serfPortLAN, - SerfPortWAN: serfPortWAN, - ServerMode: serverMode, - ServerName: stringVal(c.ServerName), - ServerPort: serverPort, - Services: services, - SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin), - SkipLeaveOnInt: skipLeaveOnInt, - StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN), - StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN), - TaggedAddresses: c.TaggedAddresses, - TranslateWANAddrs: boolVal(c.TranslateWANAddrs), - TxnMaxReqLen: uint64Val(c.Limits.TxnMaxReqLen), - UIConfig: b.uiConfigVal(c.UIConfig), - UnixSocketGroup: stringVal(c.UnixSocket.Group), - UnixSocketMode: stringVal(c.UnixSocket.Mode), - UnixSocketUser: stringVal(c.UnixSocket.User), - Watches: c.Watches, + MaxQueryTime: b.durationVal("max_query_time", c.MaxQueryTime), + NodeID: types.NodeID(stringVal(c.NodeID)), + NodeMeta: c.NodeMeta, + NodeName: b.nodeName(c.NodeName), + ReadReplica: boolVal(c.ReadReplica), + PidFile: stringVal(c.PidFile), + PrimaryDatacenter: primaryDatacenter, + PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways), + PrimaryGatewaysInterval: b.durationVal("primary_gateways_interval", c.PrimaryGatewaysInterval), + RPCAdvertiseAddr: rpcAdvertiseAddr, + RPCBindAddr: rpcBindAddr, + RPCHandshakeTimeout: b.durationVal("limits.rpc_handshake_timeout", c.Limits.RPCHandshakeTimeout), + RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout), + RPCMaxBurst: intVal(c.Limits.RPCMaxBurst), + RPCMaxConnsPerClient: intVal(c.Limits.RPCMaxConnsPerClient), + RPCProtocol: intVal(c.RPCProtocol), + RPCRateLimit: rate.Limit(float64Val(c.Limits.RPCRate)), + RPCConfig: consul.RPCConfig{EnableStreaming: boolValWithDefault(c.RPC.EnableStreaming, serverMode)}, + RaftProtocol: intVal(c.RaftProtocol), + RaftSnapshotThreshold: intVal(c.RaftSnapshotThreshold), + RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval), + RaftTrailingLogs: intVal(c.RaftTrailingLogs), + ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN), + ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN), + RejoinAfterLeave: boolVal(c.RejoinAfterLeave), + RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN), + RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN), + RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN), + RetryJoinMaxAttemptsLAN: intVal(c.RetryJoinMaxAttemptsLAN), + RetryJoinMaxAttemptsWAN: intVal(c.RetryJoinMaxAttemptsWAN), + RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN), + SegmentName: stringVal(c.SegmentName), + Segments: segments, + SegmentLimit: intVal(c.SegmentLimit), + SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN, + SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN, + SerfAllowedCIDRsLAN: serfAllowedCIDRSLAN, + SerfAllowedCIDRsWAN: serfAllowedCIDRSWAN, + SerfBindAddrLAN: serfBindAddrLAN, + SerfBindAddrWAN: serfBindAddrWAN, + SerfPortLAN: serfPortLAN, + SerfPortWAN: serfPortWAN, + ServerMode: serverMode, + ServerName: stringVal(c.ServerName), + ServerPort: serverPort, + Services: services, + SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin), + SkipLeaveOnInt: skipLeaveOnInt, + StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN), + StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN), + TaggedAddresses: c.TaggedAddresses, + TranslateWANAddrs: boolVal(c.TranslateWANAddrs), + TxnMaxReqLen: uint64Val(c.Limits.TxnMaxReqLen), + UIConfig: b.uiConfigVal(c.UIConfig), + UnixSocketGroup: stringVal(c.UnixSocket.Group), + UnixSocketMode: stringVal(c.UnixSocket.Mode), + UnixSocketUser: stringVal(c.UnixSocket.User), + Watches: c.Watches, + AutoReloadConfigCoalesceInterval: 1 * time.Second, } rt.TLS, err = b.buildTLSConfig(rt, c.TLS) diff --git a/agent/config/file_watcher.go b/agent/config/file_watcher.go index d85abca4b..d62d19035 100644 --- a/agent/config/file_watcher.go +++ b/agent/config/file_watcher.go @@ -44,7 +44,7 @@ type watchedFile struct { } type FileWatcherEvent struct { - Filename string + Filenames []string } //NewFileWatcher create a file watcher that will watch all the files/folders from configFiles @@ -213,7 +213,7 @@ func (w *fileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) err if isCreateEvent(event) || isWriteEvent(event) || isRenameEvent(event) { w.logger.Trace("call the handler", "filename", event.Name, "OP", event.Op) select { - case w.eventsCh <- &FileWatcherEvent{Filename: filename}: + case w.eventsCh <- &FileWatcherEvent{Filenames: []string{filename}}: case <-ctx.Done(): return ctx.Err() } @@ -265,7 +265,7 @@ func (w *fileWatcher) reconcile(ctx context.Context) { w.logger.Trace("call the handler", "filename", filename, "old modTime", configFile.modTime, "new modTime", newModTime) configFile.modTime = newModTime select { - case w.eventsCh <- &FileWatcherEvent{Filename: filename}: + case w.eventsCh <- &FileWatcherEvent{Filenames: []string{filename}}: case <-ctx.Done(): return } diff --git a/agent/config/file_watcher_test.go b/agent/config/file_watcher_test.go index 064729c53..52abb1328 100644 --- a/agent/config/file_watcher_test.go +++ b/agent/config/file_watcher_test.go @@ -64,6 +64,23 @@ func TestWatcherAddRemove(t *testing.T) { } +func TestWatcherReplace(t *testing.T) { + var filepaths []string + wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) + w := wi.(*fileWatcher) + require.NoError(t, err) + file1 := createTempConfigFile(t, "temp_config1") + err = w.Add(file1) + require.NoError(t, err) + file2 := createTempConfigFile(t, "temp_config2") + err = w.Replace(file1, file2) + require.NoError(t, err) + _, ok := w.configFiles[file1] + require.False(t, ok) + _, ok = w.configFiles[file2] + require.True(t, ok) +} + func TestWatcherAddWhileRunning(t *testing.T) { var filepaths []string wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{})) @@ -364,8 +381,8 @@ func TestEventWatcherMoveSoftLink(t *testing.T) { func assertEvent(name string, watcherCh chan *FileWatcherEvent, timeout time.Duration) error { select { case ev := <-watcherCh: - if ev.Filename != name && !strings.Contains(ev.Filename, name) { - return fmt.Errorf("filename do not match %s %s", ev.Filename, name) + if ev.Filenames[0] != name && !strings.Contains(ev.Filenames[0], name) { + return fmt.Errorf("filename do not match %s %s", ev.Filenames[0], name) } return nil case <-time.After(timeout): diff --git a/agent/config/ratelimited_file_watcher.go b/agent/config/ratelimited_file_watcher.go new file mode 100644 index 000000000..a47f9733d --- /dev/null +++ b/agent/config/ratelimited_file_watcher.go @@ -0,0 +1,90 @@ +package config + +import ( + "context" + "time" + + "github.com/hashicorp/go-hclog" +) + +type rateLimitedFileWatcher struct { + watcher Watcher + eventCh chan *FileWatcherEvent + coalesceInterval time.Duration +} + +func (r *rateLimitedFileWatcher) Start(ctx context.Context) { + r.watcher.Start(ctx) + r.coalesceTimer(ctx, r.watcher.EventsCh(), r.coalesceInterval) +} + +func (r rateLimitedFileWatcher) Stop() error { + return r.watcher.Stop() +} + +func (r rateLimitedFileWatcher) Add(filename string) error { + return r.watcher.Add(filename) +} + +func (r rateLimitedFileWatcher) Remove(filename string) { + r.watcher.Remove(filename) +} + +func (r rateLimitedFileWatcher) Replace(oldFile, newFile string) error { + return r.watcher.Replace(oldFile, newFile) +} + +func (r rateLimitedFileWatcher) EventsCh() chan *FileWatcherEvent { + return r.eventCh +} + +func NewRateLimitedFileWatcher(configFiles []string, logger hclog.Logger, coalesceInterval time.Duration) (Watcher, error) { + + watcher, err := NewFileWatcher(configFiles, logger) + if err != nil { + return nil, err + } + return &rateLimitedFileWatcher{ + watcher: watcher, + coalesceInterval: coalesceInterval, + eventCh: make(chan *FileWatcherEvent), + }, nil +} + +func (r rateLimitedFileWatcher) coalesceTimer(ctx context.Context, inputCh chan *FileWatcherEvent, coalesceDuration time.Duration) { + var ( + coalesceTimer *time.Timer + sendCh = make(chan struct{}) + fileWatcherEvents []string + ) + + go func() { + for { + select { + case event, ok := <-inputCh: + if !ok { + if len(fileWatcherEvents) > 0 { + r.eventCh <- &FileWatcherEvent{Filenames: fileWatcherEvents} + } + close(r.eventCh) + return + } + fileWatcherEvents = append(fileWatcherEvents, event.Filenames...) + if coalesceTimer == nil { + coalesceTimer = time.AfterFunc(coalesceDuration, func() { + // This runs in another goroutine so we can't just do the send + // directly here as access to fileWatcherEvents is racy. Instead, + // signal the main loop above. + sendCh <- struct{}{} + }) + } + case <-sendCh: + coalesceTimer = nil + r.eventCh <- &FileWatcherEvent{Filenames: fileWatcherEvents} + fileWatcherEvents = make([]string, 0) + case <-ctx.Done(): + return + } + } + }() +} diff --git a/agent/config/ratelimited_file_watcher_test.go b/agent/config/ratelimited_file_watcher_test.go new file mode 100644 index 000000000..ee1ecb8bb --- /dev/null +++ b/agent/config/ratelimited_file_watcher_test.go @@ -0,0 +1,91 @@ +package config + +import ( + "context" + "os" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +func TestNewRateLimitedWatcher(t *testing.T) { + w, err := NewRateLimitedFileWatcher([]string{}, hclog.New(&hclog.LoggerOptions{}), 1*time.Nanosecond) + require.NoError(t, err) + require.NotNil(t, w) +} + +func TestRateLimitedWatcherRenameEvent(t *testing.T) { + + fileTmp := createTempConfigFile(t, "temp_config3") + filepaths := []string{createTempConfigFile(t, "temp_config1"), createTempConfigFile(t, "temp_config2")} + w, err := NewRateLimitedFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}), 1*time.Nanosecond) + + require.NoError(t, err) + w.Start(context.Background()) + defer func() { + _ = w.Stop() + }() + + require.NoError(t, err) + err = os.Rename(fileTmp, filepaths[0]) + time.Sleep(timeoutDuration + 50*time.Millisecond) + require.NoError(t, err) + require.NoError(t, assertEvent(filepaths[0], w.EventsCh(), defaultTimeout)) + // make sure we consume all events + _ = assertEvent(filepaths[0], w.EventsCh(), defaultTimeout) +} + +func TestRateLimitedWatcherAddNotExist(t *testing.T) { + + file := testutil.TempFile(t, "temp_config") + filename := file.Name() + randomStr(16) + w, err := NewRateLimitedFileWatcher([]string{filename}, hclog.New(&hclog.LoggerOptions{}), 1*time.Nanosecond) + require.Error(t, err, "no such file or directory") + require.Nil(t, w) +} + +func TestEventRateLimitedWatcherWrite(t *testing.T) { + + file := testutil.TempFile(t, "temp_config") + _, err := file.WriteString("test config") + require.NoError(t, err) + err = file.Sync() + require.NoError(t, err) + w, err := NewRateLimitedFileWatcher([]string{file.Name()}, hclog.New(&hclog.LoggerOptions{}), 1*time.Nanosecond) + require.NoError(t, err) + w.Start(context.Background()) + defer func() { + _ = w.Stop() + }() + + _, err = file.WriteString("test config 2") + require.NoError(t, err) + err = file.Sync() + require.NoError(t, err) + require.NoError(t, assertEvent(file.Name(), w.EventsCh(), defaultTimeout)) +} + +func TestEventRateLimitedWatcherMove(t *testing.T) { + + filepath := createTempConfigFile(t, "temp_config1") + + w, err := NewRateLimitedFileWatcher([]string{filepath}, hclog.New(&hclog.LoggerOptions{}), 1*time.Second) + require.NoError(t, err) + w.Start(context.Background()) + defer func() { + _ = w.Stop() + }() + + for i := 0; i < 10; i++ { + filepath2 := createTempConfigFile(t, "temp_config2") + err = os.Rename(filepath2, filepath) + time.Sleep(timeoutDuration + 50*time.Millisecond) + require.NoError(t, err) + } + require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout)) + require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "expected timeout error") +} diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 99c51f335..442393ba1 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -1399,6 +1399,9 @@ type RuntimeConfig struct { // Watches []map[string]interface{} + // AutoReloadConfigCoalesceInterval Coalesce Interval for auto reload config + AutoReloadConfigCoalesceInterval time.Duration + EnterpriseRuntimeConfig } diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 408241e40..eb9d03d2b 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -6390,7 +6390,8 @@ func TestLoad_FullConfig(t *testing.T) { "args": []interface{}{"dltjDJ2a", "flEa7C2d"}, }, }, - RaftBoltDBConfig: consul.RaftBoltDBConfig{NoFreelistSync: true}, + RaftBoltDBConfig: consul.RaftBoltDBConfig{NoFreelistSync: true}, + AutoReloadConfigCoalesceInterval: 1 * time.Second, } entFullRuntimeConfig(expected) diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 5356761e4..4fafb520b 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -64,6 +64,7 @@ "AutoEncryptIPSAN": [], "AutoEncryptTLS": false, "AutoReloadConfig": false, + "AutoReloadConfigCoalesceInterval": "0s", "AutopilotCleanupDeadServers": false, "AutopilotDisableUpgradeMigration": false, "AutopilotLastContactThreshold": "0s", @@ -456,4 +457,4 @@ "Version": "", "VersionPrerelease": "", "Watches": [] -} +} \ No newline at end of file diff --git a/agent/testagent.go b/agent/testagent.go index 3910a78d9..11ca9a518 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -221,6 +221,9 @@ func (a *TestAgent) Start(t *testing.T) error { bd.MetricsHandler = metrics.NewInmemSink(1*time.Second, time.Minute) } + if a.Config != nil && bd.RuntimeConfig.AutoReloadConfigCoalesceInterval == 0 { + bd.RuntimeConfig.AutoReloadConfigCoalesceInterval = a.Config.AutoReloadConfigCoalesceInterval + } a.Config = bd.RuntimeConfig agent, err := New(bd) From a6e7195bdf48f9b99dd6182c0448fa9f5c635260 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Mon, 4 Apr 2022 12:01:38 -0400 Subject: [PATCH 069/785] documentation for config auto reload feature (#12548) * add config watcher to the config package * add logging to watcher * add test and refactor to add WatcherEvent. * add all API calls and fix a bug with recreated files * add tests for watcher * remove the unnecessary use of context * Add debug log and a test for file rename * use inode to detect if the file is recreated/replaced and only listen to create events. * tidy ups (#1535) * tidy ups * Add tests for inode reconcile * fix linux vs windows syscall * fix linux vs windows syscall * fix windows compile error * increase timeout * use ctime ID * remove remove/creation test as it's a use case that fail in linux * fix linux/windows to use Ino/CreationTime * fix the watcher to only overwrite current file id * fix linter error * fix remove/create test * set reconcile loop to 200 Milliseconds * fix watcher to not trigger event on remove, add more tests * on a remove event try to add the file back to the watcher and trigger the handler if success * fix race condition * fix flaky test * fix race conditions * set level to info * fix when file is removed and get an event for it after * fix to trigger handler when we get a remove but re-add fail * fix error message * add tests for directory watch and fixes * detect if a file is a symlink and return an error on Add * rename Watcher to FileWatcher and remove symlink deref * add fsnotify@v1.5.1 * fix go mod * do not reset timer on errors, rename OS specific files * rename New func * events trigger on write and rename * add missing test * fix flaking tests * fix flaky test * check reconcile when removed * delete invalid file * fix test to create files with different mod time. * back date file instead of sleeping * add watching file in agent command. * fix watcher call to use new API * add configuration and stop watcher when server stop * add certs as watched files * move FileWatcher to the agent start instead of the command code * stop watcher before replacing it * save watched files in agent * add add and remove interfaces to the file watcher * fix remove to not return an error * use `Add` and `Remove` to update certs files * fix tests * close events channel on the file watcher even when the context is done * extract `NotAutoReloadableRuntimeConfig` is a separate struct * fix linter errors * add Ca configs and outgoing verify to the not auto reloadable config * add some logs and fix to use background context * add tests to auto-config reload * remove stale test * add tests to changes to config files * add check to see if old cert files still trigger updates * rename `NotAutoReloadableRuntimeConfig` to `StaticRuntimeConfig` * fix to re add both key and cert file. Add test to cover this case. * review suggestion Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * add check to static runtime config changes * fix test * add changelog file * fix review comments * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * update flag description Co-authored-by: FFMMM * fix compilation error * add static runtime config support * fix test * fix review comments * fix log test * Update .changelog/12329.txt Co-authored-by: Dan Upton * transfer tests to runtime_test.go * fix filewatcher Replace to not deadlock. * avoid having lingering locks Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * split ReloadConfig func * fix warning message Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * convert `FileWatcher` into an interface * fix compilation errors * fix tests * extract func for adding and removing files * add a coalesceTimer with a very small timer * extract coaelsce Timer and add a shim for testing * add tests to coalesceTimer fix to send remaining events * set `coalesceTimer` to 1 Second * support symlink, fix a nil deref. * fix compile error * fix compile error * refactor file watcher rate limiting to be a Watcher implementation * fix linter issue * fix runtime config * fix runtime test * fix flaky tests * fix compile error * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * fix agent New to return an error if File watcher New return an error * add a coalesceTimer with a very small timer * extract coaelsce Timer and add a shim for testing * set `coalesceTimer` to 1 Second * add flag description to agent command docs * fix link * add Static runtime config docs * fix links and alignment * fix typo * Revert "add a coalesceTimer with a very small timer" This reverts commit d9db2fcb8213a81ac761f04b458091409c5fb1ee. * Revert "extract coaelsce Timer and add a shim for testing" This reverts commit 0ab86012a415ffeb452acf58e52c9f37c9f49254. * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Ashwin Venkatesh Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: FFMMM Co-authored-by: Daniel Upton Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- website/content/docs/agent/options.mdx | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/options.mdx index 696d8bc88..19cc786e9 100644 --- a/website/content/docs/agent/options.mdx +++ b/website/content/docs/agent/options.mdx @@ -460,7 +460,10 @@ The agent configuration options below are all specified on the command-line. "trace", "debug", "info", "warn", and "err". You can always connect to an agent via [`consul monitor`](/commands/monitor) and use any log level. Also, the log level can be changed during a config reload. - +- `-auto-reload-config` ((#\_auto_reload_config)) - This flag set Consul to automatically reload + [Reloadable Configuration](#reloadable-configuration) when configuration files change. + Consul will also watch certificate and key files set in `cert_file` and `key_file` and reload the configuration + if updated. - `-log-json` ((#\_log_json)) - This flag enables the agent to output logs in a JSON format. By default this is false. @@ -1833,6 +1836,8 @@ There are also a number of common configuration options supported by all provide - `log_level` Equivalent to the [`-log-level` command-line flag](#_log_level). +- `auto-reload-config` Equivalent to the [`-auto-reload-config` command-line flag](#_auto_reload_config). + - `log_json` Equivalent to the [`-log-json` command-line flag](#_log_json). - `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). @@ -2771,6 +2776,19 @@ items which are reloaded include: - Services - TLS Configuration - Please be aware that this is currently limited to reload a configuration that is already TLS enabled. You cannot enable or disable TLS only with reloading. + - To avoid a potential security issue, the following TLS configuration parameters do not automatically reload when [-auto-reload-config](#_auto_reload_config) is enabled: + - [encrypt_verify_incoming](#encrypt_verify_incoming) + - [verify_incoming](#verify_incoming) + - [verify_incoming_rpc](#verify_incoming_rpc) + - [verify_incoming_https](#verify_incoming_https) + - [verify_outgoing](#verify_outgoing) + - [verify_server_hostname](#verify_server_hostname) + - [ca_file](#ca_file) + - [ca_path](#ca_path) + + If any of those configurations are changed while [-auto-reload-config](#_auto_reload_config) is enabled, + Consul will issue the following warning, `Static Runtime config has changed and need a manual config reload to be applied`. + You must manually issue the `consul reload` command or send a `SIGHUP` to the Consul process to reload the new values. - Watches From 7bd9dec6d02e46b1d7f2ec07a4102457dda3cbec Mon Sep 17 00:00:00 2001 From: Karl Cardenas Date: Mon, 4 Apr 2022 09:47:15 -0700 Subject: [PATCH 070/785] docs: fixes broken url in acl overview page --- website/content/docs/security/acl/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/security/acl/index.mdx b/website/content/docs/security/acl/index.mdx index d0676d14d..5ef4544b7 100644 --- a/website/content/docs/security/acl/index.mdx +++ b/website/content/docs/security/acl/index.mdx @@ -54,7 +54,7 @@ In addition to the rules that authenticate access to services, several attribute Refer to the following topics for details about policies: -- [Policies](/docs/security/acl/policies) +- [Policies](/docs/security/acl/acl-policies) - [ACL policy command line](/commands/acl/policy) - [ACL policy API](/api-docs/acl/policies) From 9224181958d84f42d176a0bd915dee94a78de3ac Mon Sep 17 00:00:00 2001 From: John Murret Date: Mon, 4 Apr 2022 14:36:19 -0600 Subject: [PATCH 071/785] Updating helm docs with additionalVault and ACLs refactor functionality. (#12669) * Updating helm docs with additionalVault and ACLs refactor funtionality. * PR Feedback corrections. - Fix indentation. - Fix description of secretName and secretKey to be consistent - Change description of manageACLsRole to be more clear. - Make the added vault role field descriptions consistent * PR Feedback - correcting description for adminPartitionsRole * Fixing broken shell sessions * Fixing broken shell sessions by changing shell-session tobecloser tocomment marker --- website/content/docs/k8s/helm.mdx | 182 +++++++++++++++++++----------- 1 file changed, 116 insertions(+), 66 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 009e43348..81164cac3 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -151,35 +151,56 @@ Use these links to navigate to a particular top-level stanza. - `enabled` ((#v-global-secretsbackend-vault-enabled)) (`boolean: false`) - Enabling the Vault secrets backend will replace Kubernetes secrets with referenced Vault secrets. - `consulServerRole` ((#v-global-secretsbackend-vault-consulserverrole)) (`string: ""`) - The Vault role for the Consul server. - The role must be connected to the Consul server's service account and - have a policy with read capabilities for the following secrets: - - gossip encryption key defined by `global.gossipEncryption.secretName` - - certificate issue path defined by `server.serverCert.secretName` - - CA certificate defined by `global.tls.caCert.secretName` - - replication token defined by `global.acls.replicationToken.secretName` if `global.federation.enabled` is `true` + The role must be connected to the Consul server's service account. + The role must also have a policy with read capabilities for the following secrets: + - gossip encryption key defined by the `global.gossipEncryption.secretName` value + - certificate issue path defined by the `server.serverCert.secretName` value + - CA certificate defined by the `global.tls.caCert.secretName` value + - replication token defined by the `global.acls.replicationToken.secretName` value if `global.federation.enabled` is `true` To discover the service account name of the Consul server, run - ```shell-session - $ helm template --show-only templates/server-serviceaccount.yaml hashicorp/consul - ``` + ```shell-session + $ helm template --show-only templates/server-serviceaccount.yaml hashicorp/consul + ``` and check the name of `metadata.name`. - `consulClientRole` ((#v-global-secretsbackend-vault-consulclientrole)) (`string: ""`) - The Vault role for the Consul client. - The role must be connected to the Consul client's service account and - have a policy with read capabilities for the following secrets: - - gossip encryption key defined by `global.gossipEncryption.secretName`. + The role must be connected to the Consul client's service account. + The role must also have a policy with read capabilities for the gossip encryption + key defined by the `global.gossipEncryption.secretName` value. To discover the service account name of the Consul client, run - ```shell-session - $ helm template --show-only templates/client-serviceaccount.yaml charts/consul - ``` + ```shell-session + $ helm template --show-only templates/client-serviceaccount.yaml hashicorp/consul + ``` and check the name of `metadata.name`. - - `manageSystemACLsRole` ((#v-global-secretsbackend-vault-managesystemaclsrole)) (`string: ""`) - A Vault role to allow Kubernetes job that manages ACLs for this Helm chart (`server-acl-init`) - to read and update Vault secrets for the Consul's bootstrap and replication tokens. - This role must be bound the `server-acl-init`'s service account. + - `consulSnapshotAgentRole` ((#v-global-secretsbackend-vault-consulsnapshotagentrole)) (`string: ""`) - The Vault role for the Consul client snapshot agent. + The role must be connected to the Consul client snapshot agent's service account. + The role must also have a policy with read capabilities for the snapshot agent config + defined by the `client.snapshotAgent.configSecret.secretName` value. + To discover the service account name of the Consul client, run + ```shell-session + $ helm template --show-only templates/client-snapshot-agent-serviceaccount.yaml --set client.snapshotAgent.enabled=true hashicorp/consul + ``` + and check the name of `metadata.name`. + + - `manageSystemACLsRole` ((#v-global-secretsbackend-vault-managesystemaclsrole)) (`string: ""`) - A Vault role for the Consul `server-acl-init` job, which manages setting ACLs so that clients and components can obtain ACL tokens. + The role must be connected to the `server-acl-init` job's service account. + The role must also have a policy with read and write capabilities for the bootstrap, replication or partition tokens To discover the service account name of the `server-acl-init` job, run - ```shell-session - $ helm template --show-only templates/server-acl-init-serviceaccount.yaml charts/consul - ``` + ```shell-session + $ helm template --show-only templates/server-acl-init-serviceaccount.yaml \ + --set global.acls.manageSystemACLs=true hashicorp/consul + ``` + and check the name of `metadata.name`. + + - `adminPartitionsRole` ((#v-global-secretsbackend-vault-adminpartitionsrole)) (`string: ""`) - A Vault role that allows the Consul `partition-init` job to read a Vault secret for the partition ACL token. + The `partition-init` job bootstraps Admin Partitions on Consul servers. + . + This role must be bound the `partition-init` job's service account. + To discover the service account name of the `partition-init` job, run with Helm values for the client cluster: + ```shell-session + $ helm template --show-only templates/partition-init-serviceaccount.yaml -f client-cluster-values.yaml hashicorp/consul + ``` and check the name of `metadata.name`. - `agentAnnotations` ((#v-global-secretsbackend-vault-agentannotations)) (`string: null`) - This value defines additional annotations for @@ -200,10 +221,10 @@ Use these links to navigate to a particular top-level stanza. - `ca` ((#v-global-secretsbackend-vault-ca)) - Configuration for Vault server CA certificate. This certificate will be mounted to any pod where Vault agent needs to run. - - `secretName` ((#v-global-secretsbackend-vault-ca-secretname)) (`string: ""`) - secretName is the name of the Kubernetes secret that holds the Vault CA certificate. + - `secretName` ((#v-global-secretsbackend-vault-ca-secretname)) (`string: ""`) - The name of the Kubernetes or Vault secret that holds the Vault CA certificate. A Kubernetes secret must be in the same namespace that Consul is installed into. - - `secretKey` ((#v-global-secretsbackend-vault-ca-secretkey)) (`string: ""`) - secretKey is the key within the Kubernetes secret that holds the Vault CA certificate. + - `secretKey` ((#v-global-secretsbackend-vault-ca-secretkey)) (`string: ""`) - The key within the Kubernetes or Vault secret that holds the Vault CA certificate. - `connectCA` ((#v-global-secretsbackend-vault-connectca)) - Configuration for the Vault Connect CA provider. The provider will be configured to use the Vault Kubernetes auth method @@ -261,12 +282,12 @@ Use these links to navigate to a particular top-level stanza. `gossipEncryption.secretName="consul/data/secrets/gossip"` `gossipEncryption.secretKey="key"` - - `autoGenerate` ((#v-global-gossipencryption-autogenerate)) (`boolean: false`) - Automatically generate a gossip encryption key and save it to a Kubernetes secret. + - `autoGenerate` ((#v-global-gossipencryption-autogenerate)) (`boolean: false`) - Automatically generate a gossip encryption key and save it to a Kubernetes or Vault secret. - - `secretName` ((#v-global-gossipencryption-secretname)) (`string: ""`) - secretName is the name of the Kubernetes secret or Vault secret path that holds the gossip + - `secretName` ((#v-global-gossipencryption-secretname)) (`string: ""`) - The name of the Kubernetes secret or Vault secret path that holds the gossip encryption key. A Kubernetes secret must be in the same namespace that Consul is installed into. - - `secretKey` ((#v-global-gossipencryption-secretkey)) (`string: ""`) - secretKey is the key within the Kubernetes secret or Vault secret key that holds the gossip + - `secretKey` ((#v-global-gossipencryption-secretkey)) (`string: ""`) - The key within the Kubernetes secret or Vault secret key that holds the gossip encryption key. - `recursors` ((#v-global-recursors)) (`array: []`) - A list of addresses of upstream DNS servers that are used to recursively resolve DNS queries. @@ -294,11 +315,11 @@ Use these links to navigate to a particular top-level stanza. in the server certificate. This is useful when you need to access the Consul server(s) externally, for example, if you're using the UI. - - `verify` ((#v-global-tls-verify)) (`boolean: true`) - If true, `tls.defaults.verify_outgoing`, - `tls.internal_rpc.verify_server_hostname`, and `tls.internal_rpc.verify_incoming` will be set - to `true` for Consul servers and clients. Set this to false to incrementally roll out TLS - on an existing Consul cluster. - Please see https://consul.io/docs/k8s/operations/tls-on-existing-cluster for more details. + - `verify` ((#v-global-tls-verify)) (`boolean: true`) - If true, `verify_outgoing`, `verify_server_hostname`, + and `verify_incoming_rpc` will be set to `true` for Consul servers and clients. + Set this to false to incrementally roll out TLS on an existing Consul cluster. + Please see https://consul.io/docs/k8s/operations/tls-on-existing-cluster + for more details. - `httpsOnly` ((#v-global-tls-httpsonly)) (`boolean: true`) - If true, the Helm chart will configure Consul to disable the HTTP port on both clients and servers and to only accept HTTPS connections. @@ -317,11 +338,11 @@ Use these links to navigate to a particular top-level stanza. This will be consumed by the `global.secretsBackend.vault.consulCARole` role by all Consul components. When using Vault the secretKey is not used. - - `secretName` ((#v-global-tls-cacert-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-global-tls-cacert-secretname)) (`string: null`) - The name of the Kubernetes or Vault secret that holds the CA certificate. - - `secretKey` ((#v-global-tls-cacert-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-global-tls-cacert-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the CA certificate. - - `caKey` ((#v-global-tls-cakey)) - A Kubernetes secret containing the private key of the CA to use for + - `caKey` ((#v-global-tls-cakey)) - A Kubernetes or Vault secret containing the private key of the CA to use for TLS communication within the Consul cluster. If you have generated the CA yourself with the consul CLI, you could use the following command to create the secret in Kubernetes: @@ -336,9 +357,9 @@ Use these links to navigate to a particular top-level stanza. as Subject Alternative Names. In the future, we may support bringing your own server certificates. - - `secretName` ((#v-global-tls-cakey-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-global-tls-cakey-secretname)) (`string: null`) - The name of the Kubernetes or Vault secret that holds the CA key. - - `secretKey` ((#v-global-tls-cakey-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-global-tls-cakey-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the CA key. - `enableConsulNamespaces` ((#v-global-enableconsulnamespaces)) (`boolean: false`) - `enableConsulNamespaces` indicates that you are running Consul Enterprise v1.7+ with a valid Consul Enterprise license and would @@ -353,14 +374,14 @@ Use these links to navigate to a particular top-level stanza. for all Consul and consul-k8s-control-plane components. This requires Consul >= 1.4. - - `bootstrapToken` ((#v-global-acls-bootstraptoken)) - A Kubernetes secret containing the bootstrap token to use for + - `bootstrapToken` ((#v-global-acls-bootstraptoken)) - A Kubernetes or Vault secret containing the bootstrap token to use for creating policies and tokens for all Consul and consul-k8s-control-plane components. If set, we will skip ACL bootstrapping of the servers and will only initialize ACLs for the Consul clients and consul-k8s-control-plane system components. - - `secretName` ((#v-global-acls-bootstraptoken-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-global-acls-bootstraptoken-secretname)) (`string: null`) - The name of the Kubernetes or Vault secret that holds the bootstrap token. - - `secretKey` ((#v-global-acls-bootstraptoken-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-global-acls-bootstraptoken-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the bootstrap token. - `createReplicationToken` ((#v-global-acls-createreplicationtoken)) (`boolean: false`) - If true, an ACL token will be created that can be used in secondary datacenters for replication. This should only be set to true in the @@ -374,21 +395,32 @@ Use these links to navigate to a particular top-level stanza. and create ACL tokens and policies. This value is ignored if `bootstrapToken` is also set. - - `secretName` ((#v-global-acls-replicationtoken-secretname)) (`string: null`) - The name of the Kubernetes secret or the path of the secret in Vault. + - `secretName` ((#v-global-acls-replicationtoken-secretname)) (`string: null`) - The name of the Kubernetes or Vault secret that holds the replication token. - - `secretKey` ((#v-global-acls-replicationtoken-secretkey)) (`string: null`) - The key of the Kubernetes or Vault secret. + - `secretKey` ((#v-global-acls-replicationtoken-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the replication token. - - `enterpriseLicense` ((#v-global-enterpriselicense)) - This value refers to a Kubernetes secret that you have created + - `partitionToken` ((#v-global-acls-partitiontoken)) - partitionToken references a Vault secret containing the ACL token to be used in non-default partitions. + This value should only be provided in the default partition and only when setting + the `global.secretsBackend.vault.enabled` value to true. + Consul will use the value of the secret stored in Vault to create an ACL token in Consul with the value of the + secret as the secretID for the token. + In non-default, partitions set this secret as the `bootstrapToken`. + + - `secretName` ((#v-global-acls-partitiontoken-secretname)) (`string: null`) - The name of the Vault secret that holds the partition token. + + - `secretKey` ((#v-global-acls-partitiontoken-secretkey)) (`string: null`) - The key within the Vault secret that holds the parition token. + + - `enterpriseLicense` ((#v-global-enterpriselicense)) - This value refers to a Kubernetes or Vault secret that you have created that contains your enterprise license. It is required if you are using an enterprise binary. Defining it here applies it to your cluster once a leader has been elected. If you are not using an enterprise image or if you plan to introduce the license key via another route, then set these fields to null. Note: the job to apply license runs on both Helm installs and upgrades. - - `secretName` ((#v-global-enterpriselicense-secretname)) (`string: null`) - secretName is the name of the Kubernetes secret or Vault secret path that holds the enterprise license. + - `secretName` ((#v-global-enterpriselicense-secretname)) (`string: null`) - The name of the Kubernetes or Vault secret that holds the enterprise license. A Kubernetes secret must be in the same namespace that Consul is installed into. - - `secretKey` ((#v-global-enterpriselicense-secretkey)) (`string: null`) - secretKey is the key within the Kubernetes secret or Vault secret key that holds the enterprise license. + - `secretKey` ((#v-global-enterpriselicense-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the enterprise license. - `enableLicenseAutoload` ((#v-global-enterpriselicense-enablelicenseautoload)) (`boolean: true`) - Manages license autoload. Required in Consul 1.10.0+, 1.9.7+ and 1.8.12+. @@ -408,11 +440,25 @@ Use these links to navigate to a particular top-level stanza. `-federation` (if setting `global.name`), otherwise `-consul-federation`. - - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: ""`) - The name of the primary datacenter. + - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: null`) - The name of the primary datacenter. - `primaryGateways` ((#v-global-federation-primarygateways)) (`array: []`) - A list of addresses of the primary mesh gateways in the form `:`. (e.g. ["1.1.1.1:443", "2.3.4.5:443"] + - `k8sAuthMethodHost` ((#v-global-federation-k8sauthmethodhost)) (`string: null`) - If you are setting `global.federation.enabled` to true and are in a secondary datacenter, + set `k8sAuthMethodHost` to the address of the Kubernetes API server of the secondary datacenter. + This address must be reachable from the Consul servers in the primary datacenter. + This authmethod will be used to provision ACL tokens for Consul components and is different + from the one used by the Consul Service Mesh. + Please see the Kubernetes Auth Method documentation (https://consul.io/docs/acl/auth-methods/kubernetes). + + You could retrieve this value from your `kubeconfig` by running: + + ```shell-session + $ kubectl config view \ + -o jsonpath="{.clusters[?(@.name=='')].cluster.server}" + ``` + - `metrics` ((#v-global-metrics)) - Configures metrics for Consul service mesh - `enabled` ((#v-global-metrics-enabled)) (`boolean: false`) - Configures the Helm chart’s components @@ -511,7 +557,7 @@ Use these links to navigate to a particular top-level stanza. Note: when using TLS, both the `server.serverCert` and `global.tls.caCert` which points to the CA endpoint of this PKI engine must be provided. - - `secretName` ((#v-server-servercert-secretname)) (`string: null`) - The name of the Kubernetes secret or Vault secret path containing the PEM encoded server certificate. + - `secretName` ((#v-server-servercert-secretname)) (`string: null`) - The name of the Vault secret that holds the PEM encoded server certificate. - `exposeGossipAndRPCPorts` ((#v-server-exposegossipandrpcports)) (`boolean: false`) - Exposes the servers' gossip and RPC ports as hostPorts. To enable a client agent outside of the k8s cluster to join the datacenter, you would need to @@ -860,7 +906,7 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-client-resources)) (`map`) - Resource settings for Client agents. + - `resources` ((#v-client-resources)) (`map`) - The resource settings for Client agents. NOTE: The use of a YAML string is deprecated. Instead, set directly as a YAML map. @@ -1033,15 +1079,15 @@ Use these links to navigate to a particular top-level stanza. - `replicas` ((#v-client-snapshotagent-replicas)) (`integer: 2`) - The number of snapshot agents to run. - - `configSecret` ((#v-client-snapshotagent-configsecret)) - A Kubernetes secret that should be manually created to contain the entire + - `configSecret` ((#v-client-snapshotagent-configsecret)) - A Kubernetes or Vault secret that should be manually created to contain the entire config to be used on the snapshot agent. This is the preferred method of configuration since there are usually storage credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) for details. - - `secretName` ((#v-client-snapshotagent-configsecret-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-client-snapshotagent-configsecret-secretname)) (`string: null`) - The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. - - `secretKey` ((#v-client-snapshotagent-configsecret-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-client-snapshotagent-configsecret-secretkey)) (`string: null`) - The key within the Kubernetes secret or Vault secret key that holds the snapshot agent config. - `serviceAccount` ((#v-client-snapshotagent-serviceaccount)) @@ -1054,7 +1100,7 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-client-snapshotagent-resources)) (`map`) - Resource settings for snapshot agent pods. + - `resources` ((#v-client-snapshotagent-resources)) (`map`) - The resource settings for snapshot agent pods. - `caCert` ((#v-client-snapshotagent-cacert)) (`string: null`) - Optional PEM-encoded CA certificate that will be added to the trusted system CAs. Useful if using an S3-compatible storage exposing a self-signed certificate. @@ -1326,9 +1372,9 @@ Use these links to navigate to a particular top-level stanza. an ACL token for your Consul cluster which allows the sync process the correct permissions. This is only needed if ACLs are enabled on the Consul cluster. - - `secretName` ((#v-synccatalog-aclsynctoken-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-synccatalog-aclsynctoken-secretname)) (`string: null`) - The name of the Vault secret that holds the acl sync token. - - `secretKey` ((#v-synccatalog-aclsynctoken-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-synccatalog-aclsynctoken-secretkey)) (`string: null`) - The key within the Vault secret that holds the acl sync. - `nodeSelector` ((#v-synccatalog-nodeselector)) (`string: null`) - This value defines `nodeSelector` (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) labels for catalog sync pod assignment, formatted as a multi-line string. @@ -1358,7 +1404,7 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-synccatalog-resources)) (`map`) - Resource settings for sync catalog pods. + - `resources` ((#v-synccatalog-resources)) (`map`) - The resource settings for sync catalog pods. - `logLevel` ((#v-synccatalog-loglevel)) (`string: ""`) - Override global log verbosity level. One of "debug", "info", "warn", or "error". @@ -1464,7 +1510,7 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-connectinject-resources)) (`map`) - Resource settings for connect inject pods. + - `resources` ((#v-connectinject-resources)) (`map`) - The resource settings for connect inject pods. - `failurePolicy` ((#v-connectinject-failurepolicy)) (`string: Fail`) - Sets the failurePolicy for the mutating webhook. By default this will cause pods not part of the consul installation to fail scheduling while the webhook is offline. This prevents a pod from skipping mutation if the webhook were to be momentarily offline. @@ -1576,9 +1622,9 @@ Use these links to navigate to a particular top-level stanza. This token needs to have `operator = "write"` privileges to be able to create Consul namespaces. - - `secretName` ((#v-connectinject-aclinjecttoken-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-connectinject-aclinjecttoken-secretname)) (`string: null`) - The name of the Vault secret that holds the ACL inject token. - - `secretKey` ((#v-connectinject-aclinjecttoken-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-connectinject-aclinjecttoken-secretkey)) (`string: null`) - The key within the Vault secret that holds the ACL inject token. - `sidecarProxy` ((#v-connectinject-sidecarproxy)) @@ -1603,7 +1649,7 @@ Use these links to navigate to a particular top-level stanza. - `cpu` ((#v-connectinject-sidecarproxy-resources-limits-cpu)) (`string: null`) - Recommended default: 100m - - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - Resource settings for the Connect injected init container. + - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - The resource settings for the Connect injected init container. ### controller @@ -1628,7 +1674,7 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-controller-resources)) (`map`) - Resource settings for controller pods. + - `resources` ((#v-controller-resources)) (`map`) - The resource settings for controller pods. - `nodeSelector` ((#v-controller-nodeselector)) (`string: null`) - Optional YAML string to specify a nodeSelector config. @@ -1654,9 +1700,9 @@ Use these links to navigate to a particular top-level stanza. ``` If running Consul Enterprise, talk to your account manager for assistance. - - `secretName` ((#v-controller-acltoken-secretname)) (`string: null`) - The name of the Kubernetes secret. + - `secretName` ((#v-controller-acltoken-secretname)) (`string: null`) - The name of the Vault secret that holds the ACL token. - - `secretKey` ((#v-controller-acltoken-secretkey)) (`string: null`) - The key of the Kubernetes secret. + - `secretKey` ((#v-controller-acltoken-secretkey)) (`string: null`) - The key within the Vault secret that holds the ACL token. ### meshGateway @@ -1760,13 +1806,13 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `resources` ((#v-meshgateway-resources)) (`map`) - Resource settings for mesh gateway pods. + - `resources` ((#v-meshgateway-resources)) (`map`) - The resource settings for mesh gateway pods. NOTE: The use of a YAML string is deprecated. Instead, set directly as a YAML map. - - `initCopyConsulContainer` ((#v-meshgateway-initcopyconsulcontainer)) (`map`) - Resource settings for the `copy-consul-bin` init container. + - `initCopyConsulContainer` ((#v-meshgateway-initcopyconsulcontainer)) (`map`) - The resource settings for the `copy-consul-bin` init container. - - `initServiceInitContainer` ((#v-meshgateway-initserviceinitcontainer)) (`map`) - Resource settings for the `service-init` init container. + - `initServiceInitContainer` ((#v-meshgateway-initserviceinitcontainer)) (`map`) - The resource settings for the `service-init` init container. - `affinity` ((#v-meshgateway-affinity)) (`string`) - By default, we set an anti-affinity so that two gateway pods won't be on the same node. NOTE: Gateways require that Consul client agents are @@ -1846,7 +1892,7 @@ Use these links to navigate to a particular top-level stanza. - `resources` ((#v-ingressgateways-defaults-resources)) (`map`) - Resource limits for all ingress gateway pods - - `initCopyConsulContainer` ((#v-ingressgateways-defaults-initcopyconsulcontainer)) (`map`) - Resource settings for the `copy-consul-bin` init container. + - `initCopyConsulContainer` ((#v-ingressgateways-defaults-initcopyconsulcontainer)) (`map`) - The resource settings for the `copy-consul-bin` init container. - `affinity` ((#v-ingressgateways-defaults-affinity)) (`string`) - By default, we set an anti-affinity so that two of the same gateway pods won't be on the same node. NOTE: Gateways require that Consul client agents are @@ -1919,7 +1965,7 @@ Use these links to navigate to a particular top-level stanza. - `resources` ((#v-terminatinggateways-defaults-resources)) (`map`) - Resource limits for all terminating gateway pods - - `initCopyConsulContainer` ((#v-terminatinggateways-defaults-initcopyconsulcontainer)) (`map`) - Resource settings for the `copy-consul-bin` init container. + - `initCopyConsulContainer` ((#v-terminatinggateways-defaults-initcopyconsulcontainer)) (`map`) - The resource settings for the `copy-consul-bin` init container. - `affinity` ((#v-terminatinggateways-defaults-affinity)) (`string`) - By default, we set an anti-affinity so that two of the same gateway pods won't be on the same node. NOTE: Gateways require that Consul client agents are @@ -2069,6 +2115,10 @@ Use these links to navigate to a particular top-level stanza. "annotation-key": "annotation-value" ``` + - `resources` ((#v-apigateway-resources)) (`map`) - The resource settings for api gateway pods. + + - `initCopyConsulContainer` ((#v-apigateway-initcopyconsulcontainer)) (`map`) - The resource settings for the `copy-consul-bin` init container. + ### webhookCertManager - `webhookCertManager` ((#v-webhookcertmanager)) - Configuration settings for the webhook-cert-manager From d60e8cd6460c42a8af64cca2cd2852feac66eb99 Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Mon, 4 Apr 2022 14:35:07 -0700 Subject: [PATCH 072/785] docs: Update links to K8s service mesh annotations (#12652) The list of supported annotations for Consul service mesh were moved from /docs/k8s/connect to /docs/k8s/annotations-and-labels in PR #12323. This commit updates various across the site to point to the new URL for these annotations. --- .../content/docs/connect/transparent-proxy.mdx | 15 ++++++++------- website/content/docs/k8s/connect/index.mdx | 2 +- .../docs/k8s/connect/ingress-controllers.mdx | 10 +++++----- .../single-dc-multi-k8s.mdx | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/website/content/docs/connect/transparent-proxy.mdx b/website/content/docs/connect/transparent-proxy.mdx index 5ddc21e2d..750c62e0f 100644 --- a/website/content/docs/connect/transparent-proxy.mdx +++ b/website/content/docs/connect/transparent-proxy.mdx @@ -47,7 +47,7 @@ proxy, and dial the local listener to reach the appropriate upstream. Users woul specific services to talk to one another. Transparent proxying reduces this duplication, by determining upstreams implicitly from Service Intentions. Explicit upstreams are still supported in the [proxy service registration](/docs/connect/registration/service-registration) on VMs and via the -[annotation](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams) in Kubernetes. +[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) in Kubernetes. To support transparent proxying, Consul's CLI now has a command [`consul connect redirect-traffic`](/commands/connect/redirect-traffic) to redirect traffic through an inbound and @@ -153,20 +153,21 @@ or the Pod annotation `consul.hashicorp.com/transparent-proxy-overwrite-probes`. Pods with transparent proxy enabled will have an init container injected that sets up traffic redirection for all inbound and outbound traffic through the sidecar proxies. This will include all traffic by default, with the ability to configure exceptions on a per-Pod basis. The following Pod annotations allow you to exclude certain traffic from redirection to the sidecar proxies: -- [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) -- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) -- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) -- [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-uids) + +- [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) +- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) +- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) +- [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids) ## Known Limitations * Traffic can only be transparently proxied when the address dialed corresponds to the address of a service in the transparent proxy's datacenter. Services can also dial explicit upstreams in other datacenters without transparent proxy, for example, by adding an -[annotation](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams) such as +[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) such as `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` to reach an upstream service called `my-service` in the datacenter `dc2`. -* In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be +* In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, where `my-service` is the service that exists in another Kubernetes cluster and is exposed on port `1234`. Although Transparent Proxy is enabled, KubeDNS is not utilized when communicating between services existing on separate Kubernetes clusters. * When dialing headless services the request will be proxied using a plain TCP proxy with a 5s connection timeout. Currently the upstream's protocol and connection timeout are not considered. diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index d708f342c..928089142 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -192,7 +192,7 @@ When ACLs are enabled with default `deny` policy, you must supply an [intention](/docs/connect/intentions) to tell Consul which upstream you need to talk to. When upstreams are specified explicitly with the -[`consul.hashicorp.com/connect-service-upstreams` annotation](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams), +[`consul.hashicorp.com/connect-service-upstreams` annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams), the injector will also set environment variables `_CONNECT_SERVICE_HOST` and `_CONNECT_SERVICE_PORT` in every container in the Pod for every defined upstream. This is analogous to the standard Kubernetes service environment variables, but diff --git a/website/content/docs/k8s/connect/ingress-controllers.mdx b/website/content/docs/k8s/connect/ingress-controllers.mdx index b814d1241..eb99cc93f 100644 --- a/website/content/docs/k8s/connect/ingress-controllers.mdx +++ b/website/content/docs/k8s/connect/ingress-controllers.mdx @@ -12,22 +12,22 @@ description: Configuring Ingress Controllers With Consul On Kubernetes [Transparent Proxy](/docs/connect/transparent-proxy) mode enabled. This page describes a general approach for integrating Ingress Controllers with Consul on Kubernetes to secure traffic from the Controller -to the backend services by deploying sidecars along with your Ingress Controller. This allows Consul to transparently secure traffic from the ingress point through the entire traffic flow of the service. +to the backend services by deploying sidecars along with your Ingress Controller. This allows Consul to transparently secure traffic from the ingress point through the entire traffic flow of the service. -If you are looking for a fully supported solution for ingress traffic into Consul Service Mesh, please visit [Consul API Gateway](https://www.consul.io/docs/api-gateway) for instruction on how to install Consul API Gateway along with Consul on Kubernetes. +If you are looking for a fully supported solution for ingress traffic into Consul Service Mesh, please visit [Consul API Gateway](/docs/api-gateway) for instruction on how to install Consul API Gateway along with Consul on Kubernetes. A few steps are generally required to enable an Ingress controller to join the mesh and pass traffic through to a service: * Enable connect-injection via an annotation on the Ingress Controller's deployment: `consul.hashicorp.com/connect-inject` is `true`. * Using the following annotations on the Ingress controller's deployment, set up exclusion rules for its ports. - * [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) - Provides the ability to exclude a list of ports for + * [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) - Provides the ability to exclude a list of ports for inbound traffic that the service exposes from redirection. Typical configurations would require all inbound service ports for the controller to be included in this list. - * [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) - Provides the ability to exclude a list of ports for + * [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) - Provides the ability to exclude a list of ports for outbound traffic that the service exposes from redirection. These would be outbound ports used by your ingress controller which expect to skip the mesh and talk to non-mesh services. - * [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/connect#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) - Provides the ability to exclude a list of CIDRs that + * [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) - Provides the ability to exclude a list of CIDRs that the service communicates with for outbound requests from redirection. It is somewhat common that an Ingress controller will expect to make API calls to the Kubernetes service for service/endpoint management. As such including the ClusterIP of the Kubernetes service is common. diff --git a/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx b/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx index baa36174d..4e47aeeda 100644 --- a/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx +++ b/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx @@ -188,7 +188,7 @@ $ helm install cluster2 --values cluster2-config.yaml hashicorp/consul ## Verifying the Consul Service Mesh works -~> When Transparent proxy is enabled, services in one Kubernetes cluster that need to communicate with a service in another Kubernetes cluster must have a explicit upstream configured through the ["consul.hashicorp.com/connect-service-upstreams"](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams) annotation. +~> When Transparent proxy is enabled, services in one Kubernetes cluster that need to communicate with a service in another Kubernetes cluster must have a explicit upstream configured through the ["consul.hashicorp.com/connect-service-upstreams"](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. Now that we have our Consul cluster in multiple k8s clusters up and running, we will deploy two services and verify that they can connect to each other. From b4285b56ee8ce3e6d7b11d25fc48527597f68f50 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Mon, 4 Apr 2022 17:50:59 -0400 Subject: [PATCH 073/785] Update Helm docs to reflect 0.42.0 release (#12689) * Update Helm docs to reflect 0.42.0 release Co-authored-by: mrspanishviking Co-authored-by: David Yu Co-authored-by: mrspanishviking --- website/content/docs/k8s/helm.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 81164cac3..0072528c1 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -448,11 +448,12 @@ Use these links to navigate to a particular top-level stanza. - `k8sAuthMethodHost` ((#v-global-federation-k8sauthmethodhost)) (`string: null`) - If you are setting `global.federation.enabled` to true and are in a secondary datacenter, set `k8sAuthMethodHost` to the address of the Kubernetes API server of the secondary datacenter. This address must be reachable from the Consul servers in the primary datacenter. - This authmethod will be used to provision ACL tokens for Consul components and is different - from the one used by the Consul Service Mesh. - Please see the Kubernetes Auth Method documentation (https://consul.io/docs/acl/auth-methods/kubernetes). - You could retrieve this value from your `kubeconfig` by running: + This auth method will be used to provision ACL tokens for Consul components and is different + from the one used by the Consul Service Mesh. + Please see the [Kubernetes Auth Method documentation](https://consul.io/docs/acl/auth-methods/kubernetes) for additional information. + + You can retrieve this value from your `kubeconfig` by running: ```shell-session $ kubectl config view \ From e48c1611eeda4de78b1dc7e523642f06e8fb31df Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 5 Apr 2022 15:26:14 +0100 Subject: [PATCH 074/785] WatchRoots gRPC endpoint (#12678) Adds a new gRPC streaming endpoint (WatchRoots) that dataplane clients will use to fetch the current list of active Connect CA roots and receive new lists whenever the roots are rotated. --- .changelog/12678.txt | 3 + agent/consul/acl.go | 20 + agent/consul/leader_connect_ca_test.go | 3 +- agent/consul/leader_test.go | 3 +- agent/consul/server.go | 8 + agent/consul/server_test.go | 3 +- agent/consul/state/catalog_events.go | 64 ++- agent/consul/state/catalog_events_test.go | 69 +-- agent/consul/state/connect_ca_events.go | 17 +- agent/consul/state/connect_ca_events_test.go | 23 + agent/consul/state/state_store.go | 5 +- agent/consul/state/store_integration_test.go | 56 ++- agent/consul/stream/event.go | 14 +- agent/consul/stream/event_publisher.go | 9 +- agent/consul/stream/event_publisher_test.go | 40 +- agent/consul/stream/subscription.go | 36 +- agent/consul/stream/subscription_test.go | 30 +- .../private/services/subscribe/subscribe.go | 12 +- .../services/subscribe/subscribe_test.go | 6 +- .../public/services/connectca/acl_test.go | 27 + .../services/connectca/mock_ACLResolver.go | 38 ++ .../grpc/public/services/connectca/server.go | 42 ++ .../public/services/connectca/server_test.go | 52 ++ .../public/services/connectca/watch_roots.go | 202 ++++++++ .../services/connectca/watch_roots_test.go | 280 +++++++++++ agent/grpc/public/token.go | 28 ++ agent/submatview/store_integration_test.go | 10 +- agent/xds/server.go | 16 +- proto-public/pbconnectca/ca.pb.binary.go | 28 ++ proto-public/pbconnectca/ca.pb.go | 473 ++++++++++++++++++ proto-public/pbconnectca/ca.proto | 72 +++ 31 files changed, 1473 insertions(+), 216 deletions(-) create mode 100644 .changelog/12678.txt create mode 100644 agent/grpc/public/services/connectca/acl_test.go create mode 100644 agent/grpc/public/services/connectca/mock_ACLResolver.go create mode 100644 agent/grpc/public/services/connectca/server.go create mode 100644 agent/grpc/public/services/connectca/server_test.go create mode 100644 agent/grpc/public/services/connectca/watch_roots.go create mode 100644 agent/grpc/public/services/connectca/watch_roots_test.go create mode 100644 agent/grpc/public/token.go create mode 100644 proto-public/pbconnectca/ca.pb.binary.go create mode 100644 proto-public/pbconnectca/ca.pb.go create mode 100644 proto-public/pbconnectca/ca.proto diff --git a/.changelog/12678.txt b/.changelog/12678.txt new file mode 100644 index 000000000..3758a06a1 --- /dev/null +++ b/.changelog/12678.txt @@ -0,0 +1,3 @@ +```release-note:feature +ca: Root certificates can now be consumed from a gRPC streaming endpoint: `WatchRoots` +``` diff --git a/agent/consul/acl.go b/agent/consul/acl.go index bd84857b6..8b3d4e55e 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -664,6 +664,26 @@ func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*stru return syntheticPolicies } +// plainACLResolver wraps ACLResolver so that it can be used in other packages +// that cannot import agent/consul wholesale (e.g. because of import cycles). +// +// TODO(agentless): this pattern was copied from subscribeBackend for expediency +// but we should really refactor ACLResolver so it can be passed as a dependency +// to other packages. +type plainACLResolver struct { + resolver *ACLResolver +} + +func (r plainACLResolver) ResolveTokenAndDefaultMeta( + token string, + entMeta *structs.EnterpriseMeta, + authzContext *acl.AuthorizerContext, +) (acl.Authorizer, error) { + // ACLResolver.ResolveTokenAndDefaultMeta returns a ACLResolveResult which + // can't be used in other packages, but it embeds acl.Authorizer which can. + return r.resolver.ResolveTokenAndDefaultMeta(token, entMeta, authzContext) +} + func dedupeServiceIdentities(in []*structs.ACLServiceIdentity) []*structs.ACLServiceIdentity { // From: https://github.com/golang/go/wiki/SliceTricks#in-place-deduplicate-comparable diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index 2ebbda0a6..1f2c964b8 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -19,6 +19,7 @@ import ( vaultapi "github.com/hashicorp/vault/api" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/consul-net-rpc/net/rpc" @@ -550,7 +551,7 @@ func TestCAManager_Initialize_Logging(t *testing.T) { deps := newDefaultDeps(t, conf1) deps.Logger = logger - s1, err := NewServer(conf1, deps, nil) + s1, err := NewServer(conf1, deps, grpc.NewServer()) require.NoError(t, err) defer s1.Shutdown() testrpc.WaitForLeader(t, s1.RPC, "dc1") diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 189c058b9..cb767acf0 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" + "google.golang.org/grpc" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" @@ -1528,7 +1529,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) { deps := newDefaultDeps(t, config) deps.Logger = logger - srv, err := NewServer(config, deps, nil) + srv, err := NewServer(config, deps, grpc.NewServer()) require.NoError(t, err) defer srv.Shutdown() diff --git a/agent/consul/server.go b/agent/consul/server.go index da821ebc8..ccfa3044a 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -43,6 +43,7 @@ import ( "github.com/hashicorp/consul/agent/consul/wanfed" agentgrpc "github.com/hashicorp/consul/agent/grpc/private" "github.com/hashicorp/consul/agent/grpc/private/services/subscribe" + "github.com/hashicorp/consul/agent/grpc/public/services/connectca" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" @@ -632,6 +633,13 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve // since it can fire events when leadership is obtained. go s.monitorLeadership() + // Initialize public gRPC server. + connectca.NewServer(connectca.Config{ + GetStore: func() connectca.StateStore { return s.FSM().State() }, + Logger: logger.Named("grpc-api.connect-ca"), + ACLResolver: plainACLResolver{s.ACLResolver}, + }).Register(s.publicGRPCServer) + // Start listening for RPC requests. go func() { if err := s.grpcHandler.Run(); err != nil { diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index bf7ff0ab6..6f953dd1c 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -13,6 +13,7 @@ import ( "github.com/google/tcpproxy" "github.com/hashicorp/memberlist" "github.com/hashicorp/raft" + "google.golang.org/grpc" "github.com/hashicorp/consul/ipaddr" @@ -263,7 +264,7 @@ func newServer(t *testing.T, c *Config) (*Server, error) { } } - srv, err := NewServer(c, newDefaultDeps(t, c), nil) + srv, err := NewServer(c, newDefaultDeps(t, c), grpc.NewServer()) if err != nil { return nil, err } diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 3c72db0bd..eaca440a8 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -1,6 +1,7 @@ package state import ( + "fmt" "strings" memdb "github.com/hashicorp/go-memdb" @@ -11,6 +12,38 @@ import ( "github.com/hashicorp/consul/proto/pbsubscribe" ) +// EventSubjectService is a stream.Subject used to route and receive events for +// a specific service. +type EventSubjectService struct { + Key string + EnterpriseMeta structs.EnterpriseMeta + + overrideKey string + overrideNamespace string + overridePartition string +} + +// String satisfies the stream.Subject interface. +func (s EventSubjectService) String() string { + partition := s.EnterpriseMeta.PartitionOrDefault() + if v := s.overridePartition; v != "" { + partition = strings.ToLower(v) + } + + namespace := s.EnterpriseMeta.NamespaceOrDefault() + if v := s.overrideNamespace; v != "" { + namespace = strings.ToLower(v) + } + + key := s.Key + if v := s.overrideKey; v != "" { + key = v + } + key = strings.ToLower(key) + + return partition + "/" + namespace + "/" + key +} + // EventPayloadCheckServiceNode is used as the Payload for a stream.Event to // indicates changes to a CheckServiceNode for service health. // @@ -33,25 +66,14 @@ func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bo } func (e EventPayloadCheckServiceNode) Subject() stream.Subject { - partition := e.Value.Service.PartitionOrDefault() - if e.overridePartition != "" { - partition = e.overridePartition - } - partition = strings.ToLower(partition) + return EventSubjectService{ + Key: e.Value.Service.Service, + EnterpriseMeta: e.Value.Service.EnterpriseMeta, - namespace := e.Value.Service.NamespaceOrDefault() - if e.overrideNamespace != "" { - namespace = e.overrideNamespace + overrideKey: e.overrideKey, + overrideNamespace: e.overrideNamespace, + overridePartition: e.overridePartition, } - namespace = strings.ToLower(namespace) - - key := e.Value.Service.Service - if e.overrideKey != "" { - key = e.overrideKey - } - key = strings.ToLower(key) - - return stream.Subject(partition + "/" + namespace + "/" + key) } // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot @@ -62,7 +84,13 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc { defer tx.Abort() connect := topic == topicServiceHealthConnect - idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, connect, &req.EnterpriseMeta) + + subject, ok := req.Subject.(EventSubjectService) + if !ok { + return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject) + } + + idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta) if err != nil { return 0, err } diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index bb17dae10..b85ea5f76 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -16,11 +16,10 @@ import ( "github.com/hashicorp/consul/types" ) -func TestEventPayloadCheckServiceNode_SubjectMatchesRequests(t *testing.T) { - // Matches. +func TestEventPayloadCheckServiceNode_Subject(t *testing.T) { for desc, tc := range map[string]struct { evt EventPayloadCheckServiceNode - req stream.SubscribeRequest + sub string }{ "default partition and namespace": { EventPayloadCheckServiceNode{ @@ -30,10 +29,7 @@ func TestEventPayloadCheckServiceNode_SubjectMatchesRequests(t *testing.T) { }, }, }, - stream.SubscribeRequest{ - Key: "foo", - EnterpriseMeta: structs.EnterpriseMeta{}, - }, + "default/default/foo", }, "mixed casing": { EventPayloadCheckServiceNode{ @@ -43,7 +39,7 @@ func TestEventPayloadCheckServiceNode_SubjectMatchesRequests(t *testing.T) { }, }, }, - stream.SubscribeRequest{Key: "foo"}, + "default/default/foo", }, "override key": { EventPayloadCheckServiceNode{ @@ -54,60 +50,11 @@ func TestEventPayloadCheckServiceNode_SubjectMatchesRequests(t *testing.T) { }, overrideKey: "bar", }, - stream.SubscribeRequest{Key: "bar"}, + "default/default/bar", }, } { t.Run(desc, func(t *testing.T) { - require.Equal(t, tc.req.Subject(), tc.evt.Subject()) - }) - } - - // Non-matches. - for desc, tc := range map[string]struct { - evt EventPayloadCheckServiceNode - req stream.SubscribeRequest - }{ - "different key": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - }, - stream.SubscribeRequest{ - Key: "bar", - }, - }, - "different partition": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - overridePartition: "bar", - }, - stream.SubscribeRequest{ - Key: "foo", - }, - }, - "different namespace": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - overrideNamespace: "bar", - }, - stream.SubscribeRequest{ - Key: "foo", - }, - }, - } { - t.Run(desc, func(t *testing.T) { - require.NotEqual(t, tc.req.Subject(), tc.evt.Subject()) + require.Equal(t, tc.sub, tc.evt.Subject().String()) }) } } @@ -125,7 +72,7 @@ func TestServiceHealthSnapshot(t *testing.T) { fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealth) buf := &snapshotAppender{} - req := stream.SubscribeRequest{Key: "web"} + req := stream.SubscribeRequest{Subject: EventSubjectService{Key: "web"}} idx, err := fn(req, buf) require.NoError(t, err) @@ -202,7 +149,7 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) { fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealthConnect) buf := &snapshotAppender{} - req := stream.SubscribeRequest{Key: "web", Topic: topicServiceHealthConnect} + req := stream.SubscribeRequest{Subject: EventSubjectService{Key: "web"}, Topic: topicServiceHealthConnect} idx, err := fn(req, buf) require.NoError(t, err) diff --git a/agent/consul/state/connect_ca_events.go b/agent/consul/state/connect_ca_events.go index e73c206b5..c6bd135be 100644 --- a/agent/consul/state/connect_ca_events.go +++ b/agent/consul/state/connect_ca_events.go @@ -12,11 +12,13 @@ import ( // // Note: topics are ordinarily defined in subscribe.proto, but this one isn't // currently available via the Subscribe endpoint. -const EventTopicCARoots stringTopic = "CARoots" +const EventTopicCARoots stringer = "CARoots" -type stringTopic string +// stringer is a convenience type to turn a regular string into a fmt.Stringer +// so that it can be used as a stream.Topic or stream.Subject. +type stringer string -func (s stringTopic) String() string { return string(s) } +func (s stringer) String() string { return string(s) } type EventPayloadCARoots struct { CARoots structs.CARoots @@ -25,9 +27,12 @@ type EventPayloadCARoots struct { func (e EventPayloadCARoots) Subject() stream.Subject { return stream.SubjectNone } func (e EventPayloadCARoots) HasReadPermission(authz acl.Authorizer) bool { - // TODO(agentless): implement this method once the Authorizer exposes a method - // to check for `service:write` on any service. - panic("EventPayloadCARoots does not implement HasReadPermission") + // Require `service:write` on any service in any partition and namespace. + var authzContext acl.AuthorizerContext + structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier). + FillAuthzContext(&authzContext) + + return authz.ServiceWriteAny(&authzContext) == acl.Allow } // caRootsChangeEvents returns an event on EventTopicCARoots whenever the list diff --git a/agent/consul/state/connect_ca_events_test.go b/agent/consul/state/connect_ca_events_test.go index 9e9134367..9651e2a47 100644 --- a/agent/consul/state/connect_ca_events_test.go +++ b/agent/consul/state/connect_ca_events_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" @@ -93,3 +94,25 @@ func TestCARootsSnapshot(t *testing.T) { }) }) } + +func TestEventPayloadCARoots_HasReadPermission(t *testing.T) { + t.Run("no service:write", func(t *testing.T) { + hasRead := EventPayloadCARoots{}.HasReadPermission(acl.DenyAll()) + require.False(t, hasRead) + }) + + t.Run("has service:write", func(t *testing.T) { + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "write" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(t, err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(t, err) + + hasRead := EventPayloadCARoots{}.HasReadPermission(authz) + require.True(t, hasRead) + }) +} diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 82dc8d356..2689ac142 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -276,8 +276,11 @@ func (s *Store) AbandonCh() <-chan struct{} { // Abandon is used to signal that the given state store has been abandoned. // Calling this more than one time will panic. func (s *Store) Abandon() { - s.stopEventPublisher() + // Note: the order of these operations matters. Subscribers may receive on + // abandonCh to determine whether their subscription was closed because the + // store was abandoned, therefore it's important abandonCh is closed first. close(s.abandonCh) + s.stopEventPublisher() } // maxIndex is a helper used to retrieve the highest known index diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index edd051389..55c3059ce 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -25,9 +25,9 @@ func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -71,9 +71,9 @@ func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } sub2, err := publisher.Subscribe(subscription2) require.NoError(t, err) @@ -112,9 +112,9 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -162,9 +162,9 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } sub, err = publisher.Subscribe(subscription2) require.NoError(t, err) @@ -191,9 +191,9 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register another subscription. subscription3 := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } sub, err = publisher.Subscribe(subscription3) require.NoError(t, err) @@ -233,9 +233,9 @@ func TestStore_IntegrationWithEventPublisher_ACLRoleUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -278,9 +278,9 @@ func TestStore_IntegrationWithEventPublisher_ACLRoleUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } sub, err = publisher.Subscribe(subscription2) require.NoError(t, err) @@ -396,7 +396,9 @@ var topicService topic = "test-topic-service" func newTestSnapshotHandlers(s *Store) stream.SnapshotHandlers { return stream.SnapshotHandlers{ topicService: func(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { - idx, nodes, err := s.ServiceNodes(nil, req.Key, nil) + key := req.Subject.String() + + idx, nodes, err := s.ServiceNodes(nil, key, nil) if err != nil { return idx, err } @@ -405,7 +407,7 @@ func newTestSnapshotHandlers(s *Store) stream.SnapshotHandlers { event := stream.Event{ Topic: req.Topic, Index: node.ModifyIndex, - Payload: nodePayload{node: node, key: req.Key}, + Payload: nodePayload{node: node, key: key}, } snap.Append([]stream.Event{event}) } @@ -424,7 +426,7 @@ func (p nodePayload) HasReadPermission(acl.Authorizer) bool { } func (p nodePayload) Subject() stream.Subject { - return stream.Subject(p.node.PartitionOrDefault() + "/" + p.node.NamespaceOrDefault() + "/" + p.key) + return stringer(p.key) } func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLToken { @@ -451,9 +453,9 @@ func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLTo // so we know the initial token write event has been sent out before // continuing... req := &stream.SubscribeRequest{ - Topic: topicService, - Key: "nope", - Token: token.SecretID, + Topic: topicService, + Subject: stringer("nope"), + Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() diff --git a/agent/consul/stream/event.go b/agent/consul/stream/event.go index 78e41bc37..b3936a49b 100644 --- a/agent/consul/stream/event.go +++ b/agent/consul/stream/event.go @@ -17,12 +17,16 @@ type Topic fmt.Stringer // Subject identifies a portion of a topic for which a subscriber wishes to // receive events (e.g. health events for a particular service) usually the // normalized resource name (including partition and namespace if applicable). -type Subject string +type Subject fmt.Stringer // SubjectNone is used when all events on a given topic are "global" and not // further partitioned by subject. For example: the "CA Roots" topic which is // used to notify subscribers when the global set CA root certificates changes. -const SubjectNone Subject = "none" +const SubjectNone stringer = "none" + +type stringer string + +func (s stringer) String() string { return string(s) } // Event is a structure with identifiers and a payload. Events are Published to // EventPublisher and returned to Subscribers. @@ -123,6 +127,12 @@ func (e Event) IsNewSnapshotToFollow() bool { return e.Payload == newSnapshotToFollow{} } +// IsFramingEvent returns true if this is a framing event (e.g. EndOfSnapshot +// or NewSnapshotToFollow). +func (e Event) IsFramingEvent() bool { + return e.IsEndOfSnapshot() || e.IsNewSnapshotToFollow() +} + type framingEvent struct{} func (framingEvent) HasReadPermission(acl.Authorizer) bool { diff --git a/agent/consul/stream/event_publisher.go b/agent/consul/stream/event_publisher.go index 094101355..06b7b03a2 100644 --- a/agent/consul/stream/event_publisher.go +++ b/agent/consul/stream/event_publisher.go @@ -44,8 +44,8 @@ type EventPublisher struct { // topicSubject is used as a map key when accessing topic buffers and cached // snapshots. type topicSubject struct { - Topic Topic - Subject Subject + Topic string + Subject string } type subscriptions struct { @@ -138,7 +138,10 @@ func (e *EventPublisher) publishEvent(events []Event) { continue } - groupKey := topicSubject{event.Topic, event.Payload.Subject()} + groupKey := topicSubject{ + Topic: event.Topic.String(), + Subject: event.Payload.Subject().String(), + } groupedEvents[groupKey] = append(groupedEvents[groupKey], event) } diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index f90af0b1b..c718d5853 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -21,8 +21,8 @@ var testTopic Topic = intTopic(999) func TestEventPublisher_SubscribeWithIndex0(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -81,7 +81,7 @@ func (p simplePayload) HasReadPermission(acl.Authorizer) bool { return !p.noReadPerm } -func (p simplePayload) Subject() Subject { return Subject("default/default/" + p.key) } +func (p simplePayload) Subject() Subject { return stringer(p.key) } func newTestSnapshotHandlers() SnapshotHandlers { return SnapshotHandlers{ @@ -153,11 +153,11 @@ func TestEventPublisher_ShutdownClosesSubscriptions(t *testing.T) { publisher := NewEventPublisher(handlers, time.Second) go publisher.Run(ctx) - sub1, err := publisher.Subscribe(&SubscribeRequest{Topic: intTopic(22)}) + sub1, err := publisher.Subscribe(&SubscribeRequest{Topic: intTopic(22), Subject: SubjectNone}) require.NoError(t, err) defer sub1.Unsubscribe() - sub2, err := publisher.Subscribe(&SubscribeRequest{Topic: intTopic(33)}) + sub2, err := publisher.Subscribe(&SubscribeRequest{Topic: intTopic(33), Subject: SubjectNone}) require.NoError(t, err) defer sub2.Unsubscribe() @@ -184,8 +184,8 @@ func consumeSub(ctx context.Context, sub *Subscription) error { func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -229,8 +229,8 @@ func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_CanResume(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -282,8 +282,8 @@ func TestEventPublisher_SubscribeWithIndexNotZero_CanResume(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -338,8 +338,8 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -406,9 +406,9 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testin func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot_WithCache(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", - Index: 1, + Topic: testTopic, + Subject: stringer("sub-key"), + Index: 1, } nextEvent := Event{ @@ -492,8 +492,8 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) { func TestEventPublisher_Unsubscribe_ClosesSubscription(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -514,8 +514,8 @@ func TestEventPublisher_Unsubscribe_ClosesSubscription(t *testing.T) { func TestEventPublisher_Unsubscribe_FreesResourcesWhenThereAreNoSubscribers(t *testing.T) { req := &SubscribeRequest{ - Topic: testTopic, - Key: "sub-key", + Topic: testTopic, + Subject: stringer("sub-key"), } publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) diff --git a/agent/consul/stream/subscription.go b/agent/consul/stream/subscription.go index 0a4294715..28ca50c3a 100644 --- a/agent/consul/stream/subscription.go +++ b/agent/consul/stream/subscription.go @@ -4,10 +4,7 @@ import ( "context" "errors" "fmt" - "strings" "sync/atomic" - - "github.com/hashicorp/consul/agent/structs" ) const ( @@ -54,37 +51,32 @@ type Subscription struct { } // SubscribeRequest identifies the types of events the subscriber would like to -// receiver. Topic and Token are required. +// receive. Topic, Subject, and Token are required. type SubscribeRequest struct { - // Topic to subscribe to + // Topic to subscribe to (e.g. service health). Topic Topic - // Key used to filter events in the topic. Only events matching the key will - // be returned by the subscription. A blank key will return all events. Key - // is generally the name of the resource. - Key string - // EnterpriseMeta is used to filter events in the topic. Only events matching - // the partition and namespace will be returned by the subscription. - EnterpriseMeta structs.EnterpriseMeta + + // Subject identifies the subset of Topic events the subscriber wishes to + // receive (e.g. events for a specific service). SubjectNone may be provided + // if all events on the given topic are "global" and not further partitioned + // by subject. + Subject Subject + // Token that was used to authenticate the request. If any ACL policy // changes impact the token the subscription will be forcefully closed. Token string + // Index is the last index the client received. If non-zero the // subscription will be resumed from this index. If the index is out-of-date // a NewSnapshotToFollow event will be sent. Index uint64 } -func (req SubscribeRequest) Subject() Subject { - var ( - partition = req.EnterpriseMeta.PartitionOrDefault() - namespace = req.EnterpriseMeta.NamespaceOrDefault() - key = strings.ToLower(req.Key) - ) - return Subject(partition + "/" + namespace + "/" + key) -} - func (req SubscribeRequest) topicSubject() topicSubject { - return topicSubject{req.Topic, req.Subject()} + return topicSubject{ + Topic: req.Topic.String(), + Subject: req.Subject.String(), + } } // newSubscription return a new subscription. The caller is responsible for diff --git a/agent/consul/stream/subscription_test.go b/agent/consul/stream/subscription_test.go index cf3be6393..b6e0f1a5f 100644 --- a/agent/consul/stream/subscription_test.go +++ b/agent/consul/stream/subscription_test.go @@ -6,32 +6,10 @@ import ( time "time" "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/structs" ) func noopUnSub() {} -func TestSubscription_Subject(t *testing.T) { - for desc, tc := range map[string]struct { - req SubscribeRequest - sub Subject - }{ - "default partition and namespace": { - SubscribeRequest{Key: "foo", EnterpriseMeta: structs.EnterpriseMeta{}}, - "default/default/foo", - }, - "mixed casing": { - SubscribeRequest{Key: "BaZ"}, - "default/default/baz", - }, - } { - t.Run(desc, func(t *testing.T) { - require.Equal(t, tc.sub, tc.req.Subject()) - }) - } -} - func TestSubscription(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -50,8 +28,8 @@ func TestSubscription(t *testing.T) { defer cancel() req := SubscribeRequest{ - Topic: testTopic, - Key: "test", + Topic: testTopic, + Subject: stringer("test"), } sub := newSubscription(req, startHead, noopUnSub) @@ -124,8 +102,8 @@ func TestSubscription_Close(t *testing.T) { defer cancel() req := SubscribeRequest{ - Topic: testTopic, - Key: "test", + Topic: testTopic, + Subject: stringer("test"), } sub := newSubscription(req, startHead, noopUnSub) diff --git a/agent/grpc/private/services/subscribe/subscribe.go b/agent/grpc/private/services/subscribe/subscribe.go index 1a9d0031a..18372b200 100644 --- a/agent/grpc/private/services/subscribe/subscribe.go +++ b/agent/grpc/private/services/subscribe/subscribe.go @@ -93,11 +93,13 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta structs.EnterpriseMeta) *stream.SubscribeRequest { return &stream.SubscribeRequest{ - Topic: req.Topic, - Key: req.Key, - EnterpriseMeta: entMeta, - Token: req.Token, - Index: req.Index, + Topic: req.Topic, + Subject: state.EventSubjectService{ + Key: req.Key, + EnterpriseMeta: entMeta, + }, + Token: req.Token, + Index: req.Index, } } diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index 95df5fb13..a5a47a077 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -3,13 +3,12 @@ package subscribe import ( "context" "errors" - "github.com/golang/protobuf/ptypes/duration" - "github.com/hashicorp/consul/proto/pbcommon" "io" "net" "testing" "time" + "github.com/golang/protobuf/ptypes/duration" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" @@ -25,6 +24,7 @@ import ( grpc "github.com/hashicorp/consul/agent/grpc/private" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/proto/prototest" @@ -1106,7 +1106,7 @@ func newEventFromSubscription(t *testing.T, index uint64) stream.Event { }, } ep := stream.NewEventPublisher(handlers, 0) - req := &stream.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealthConnect, Index: index} + req := &stream.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealthConnect, Subject: stream.SubjectNone, Index: index} sub, err := ep.Subscribe(req) require.NoError(t, err) diff --git a/agent/grpc/public/services/connectca/acl_test.go b/agent/grpc/public/services/connectca/acl_test.go new file mode 100644 index 000000000..bac0e342e --- /dev/null +++ b/agent/grpc/public/services/connectca/acl_test.go @@ -0,0 +1,27 @@ +package connectca + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" +) + +// testAuthorizer returns an ACL policy authorizer with `service:write` on an +// arbitrary service. +func testAuthorizer(t *testing.T) acl.Authorizer { + t.Helper() + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "write" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(t, err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(t, err) + + return authz +} diff --git a/agent/grpc/public/services/connectca/mock_ACLResolver.go b/agent/grpc/public/services/connectca/mock_ACLResolver.go new file mode 100644 index 000000000..bbc462c44 --- /dev/null +++ b/agent/grpc/public/services/connectca/mock_ACLResolver.go @@ -0,0 +1,38 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package connectca + +import ( + acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" +) + +// MockACLResolver is an autogenerated mock type for the ACLResolver type +type MockACLResolver struct { + mock.Mock +} + +// ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 acl.Authorizer + if rf, ok := ret.Get(0).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(acl.Authorizer) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/agent/grpc/public/services/connectca/server.go b/agent/grpc/public/services/connectca/server.go new file mode 100644 index 000000000..64bced2dd --- /dev/null +++ b/agent/grpc/public/services/connectca/server.go @@ -0,0 +1,42 @@ +package connectca + +import ( + "google.golang.org/grpc" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +type Server struct { + Config +} + +type Config struct { + GetStore func() StateStore + Logger hclog.Logger + ACLResolver ACLResolver +} + +type StateStore interface { + EventPublisher() state.EventPublisher + CAConfig(memdb.WatchSet) (uint64, *structs.CAConfiguration, error) + AbandonCh() <-chan struct{} +} + +//go:generate mockery -name ACLResolver -inpkg +type ACLResolver interface { + ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) +} + +func NewServer(cfg Config) *Server { + return &Server{cfg} +} + +func (s *Server) Register(grpcServer *grpc.Server) { + pbconnectca.RegisterConnectCAServiceServer(grpcServer, s) +} diff --git a/agent/grpc/public/services/connectca/server_test.go b/agent/grpc/public/services/connectca/server_test.go new file mode 100644 index 000000000..6a4d42fa0 --- /dev/null +++ b/agent/grpc/public/services/connectca/server_test.go @@ -0,0 +1,52 @@ +package connectca + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +func testStateStore(t *testing.T) *state.Store { + t.Helper() + + gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) + require.NoError(t, err) + + return state.NewStateStoreWithEventPublisher(gc) +} + +func testClient(t *testing.T, server *Server) pbconnectca.ConnectCAServiceClient { + t.Helper() + + addr := runTestServer(t, server) + + conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, conn.Close()) + }) + + return pbconnectca.NewConnectCAServiceClient(conn) +} + +func runTestServer(t *testing.T, server *Server) net.Addr { + t.Helper() + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + server.Register(grpcServer) + + go grpcServer.Serve(lis) + t.Cleanup(grpcServer.Stop) + + return lis.Addr() +} diff --git a/agent/grpc/public/services/connectca/watch_roots.go b/agent/grpc/public/services/connectca/watch_roots.go new file mode 100644 index 000000000..eeaf2d8c8 --- /dev/null +++ b/agent/grpc/public/services/connectca/watch_roots.go @@ -0,0 +1,202 @@ +package connectca + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +// WatchRoots provides a stream on which you can receive the list of active +// Connect CA roots. Current roots are sent immediately at the start of the +// stream, and new lists will be sent whenever the roots are rotated. +func (s *Server) WatchRoots(_ *emptypb.Empty, serverStream pbconnectca.ConnectCAService_WatchRootsServer) error { + logger := s.Logger.Named("watch-roots").With("stream_id", streamID()) + + logger.Trace("starting stream") + defer logger.Trace("stream closed") + + token := public.TokenFromContext(serverStream.Context()) + + // Serve the roots from an EventPublisher subscription. If the subscription is + // closed due to an ACL change, we'll attempt to re-authorize and resume it to + // prevent unnecessarily terminating the stream. + var idx uint64 + for { + var err error + idx, err = s.serveRoots(token, idx, serverStream, logger) + if errors.Is(err, stream.ErrSubForceClosed) { + logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume") + } else { + return err + } + } +} + +func (s *Server) serveRoots( + token string, + idx uint64, + serverStream pbconnectca.ConnectCAService_WatchRootsServer, + logger hclog.Logger, +) (uint64, error) { + if err := s.authorize(token); err != nil { + return 0, err + } + + store := s.GetStore() + + // Read the TrustDomain up front - we do not allow users to change the ClusterID + // so reading it once at the beginning of the stream is sufficient. + trustDomain, err := getTrustDomain(store, logger) + if err != nil { + return 0, err + } + + // Start the subscription. + sub, err := store.EventPublisher().Subscribe(&stream.SubscribeRequest{ + Topic: state.EventTopicCARoots, + Subject: stream.SubjectNone, + Token: token, + Index: idx, + }) + if err != nil { + logger.Error("failed to subscribe to CA Roots events", "error", err) + return 0, status.Error(codes.Internal, "failed to subscribe to CA Roots events") + } + defer sub.Unsubscribe() + + for { + event, err := sub.Next(serverStream.Context()) + switch { + case errors.Is(err, stream.ErrSubForceClosed): + // If the subscription was closed because the state store was abandoned (e.g. + // following a snapshot restore) reset idx to ensure we don't skip over the + // new store's events. + select { + case <-store.AbandonCh(): + idx = 0 + default: + } + return idx, err + case errors.Is(err, context.Canceled): + return 0, nil + case err != nil: + logger.Error("failed to read next event", "error", err) + return idx, status.Error(codes.Internal, err.Error()) + } + + // Note: this check isn't strictly necessary because the event publishing + // machinery will ensure the index increases monotonically, but it can be + // tricky to faithfully reproduce this in tests (e.g. the EventPublisher + // garbage collects topic buffers and snapshots aggressively when streams + // disconnect) so this avoids a bunch of confusing setup code. + if event.Index <= idx { + continue + } + + idx = event.Index + + // We do not send framing events (e.g. EndOfSnapshot, NewSnapshotToFollow) + // because we send a full list of roots on every event, rather than expecting + // clients to maintain a state-machine in the way they do for service health. + if event.IsFramingEvent() { + continue + } + + rsp, err := eventToResponse(event, trustDomain) + if err != nil { + logger.Error("failed to convert event to response", "error", err) + return idx, status.Error(codes.Internal, err.Error()) + } + if err := serverStream.Send(rsp); err != nil { + logger.Error("failed to send response", "error", err) + return idx, err + } + } +} + +func eventToResponse(event stream.Event, trustDomain string) (*pbconnectca.WatchRootsResponse, error) { + payload, ok := event.Payload.(state.EventPayloadCARoots) + if !ok { + return nil, fmt.Errorf("unexpected event payload type: %T", payload) + } + + var active string + roots := make([]*pbconnectca.CARoot, 0) + + for _, root := range payload.CARoots { + if root.Active { + active = root.ID + } + + roots = append(roots, &pbconnectca.CARoot{ + Id: root.ID, + Name: root.Name, + SerialNumber: root.SerialNumber, + SigningKeyId: root.SigningKeyID, + RootCert: root.RootCert, + IntermediateCerts: root.IntermediateCerts, + Active: root.Active, + RotatedOutAt: timestamppb.New(root.RotatedOutAt), + }) + } + + return &pbconnectca.WatchRootsResponse{ + TrustDomain: trustDomain, + ActiveRootId: active, + Roots: roots, + }, nil +} + +func (s *Server) authorize(token string) error { + // Require the given ACL token to have `service:write` on any service (in any + // partition and namespace). + var authzContext acl.AuthorizerContext + entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) + if err != nil { + return status.Error(codes.Unauthenticated, err.Error()) + } + if err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzContext); err != nil { + return status.Error(codes.PermissionDenied, err.Error()) + } + return nil +} + +// We tag logs with a unique identifier to ease debugging. In the future this +// should probably be an Open Telemetry trace ID. +func streamID() string { + id, err := uuid.GenerateUUID() + if err != nil { + return "" + } + return id +} + +func getTrustDomain(store StateStore, logger hclog.Logger) (string, error) { + _, cfg, err := store.CAConfig(nil) + switch { + case err != nil: + logger.Error("failed to read Connect CA Config", "error", err) + return "", status.Error(codes.Internal, "failed to read Connect CA Config") + case cfg == nil: + logger.Warn("cannot begin stream because Connect CA is not yet initialized") + return "", status.Error(codes.FailedPrecondition, "Connect CA is not yet initialized") + } + return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil +} diff --git a/agent/grpc/public/services/connectca/watch_roots_test.go b/agent/grpc/public/services/connectca/watch_roots_test.go new file mode 100644 index 000000000..efd022d90 --- /dev/null +++ b/agent/grpc/public/services/connectca/watch_roots_test.go @@ -0,0 +1,280 @@ +package connectca + +import ( + "context" + "errors" + "io" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +const testACLToken = "acl-token" + +func TestWatchRoots_Success(t *testing.T) { + store := testStateStore(t) + + // Set the initial roots and CA configuration. + rootA := connect.TestCA(t, nil) + _, err := store.CARootSetCAS(1, 0, structs.CARoots{rootA}) + require.NoError(t, err) + + err = store.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) + require.NoError(t, err) + + // Mock the ACL Resolver to return an authorizer with `service:write`. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(testAuthorizer(t), nil) + + ctx := public.ContextWithToken(context.Background(), testACLToken) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + + // Begin the stream. + client := testClient(t, server) + stream, err := client.WatchRoots(ctx, &emptypb.Empty{}) + require.NoError(t, err) + rspCh := handleRootsStream(t, stream) + + // Expect an initial message containing current roots (provided by the snapshot). + roots := mustGetRoots(t, rspCh) + require.Equal(t, "cluster-id.consul", roots.TrustDomain) + require.Equal(t, rootA.ID, roots.ActiveRootId) + require.Len(t, roots.Roots, 1) + require.Equal(t, rootA.ID, roots.Roots[0].Id) + + // Rotate the roots. + rootB := connect.TestCA(t, nil) + _, err = store.CARootSetCAS(2, 1, structs.CARoots{rootB}) + require.NoError(t, err) + + // Expect another event containing the new roots. + roots = mustGetRoots(t, rspCh) + require.Equal(t, "cluster-id.consul", roots.TrustDomain) + require.Equal(t, rootB.ID, roots.ActiveRootId) + require.Len(t, roots.Roots, 1) + require.Equal(t, rootB.ID, roots.Roots[0].Id) +} + +func TestWatchRoots_InvalidACLToken(t *testing.T) { + store := testStateStore(t) + + // Set the initial CA configuration. + err := store.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) + require.NoError(t, err) + + // Mock the ACL resolver to return ErrNotFound. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(nil, acl.ErrNotFound) + + ctx := public.ContextWithToken(context.Background(), testACLToken) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + + // Start the stream. + client := testClient(t, server) + stream, err := client.WatchRoots(ctx, &emptypb.Empty{}) + require.NoError(t, err) + rspCh := handleRootsStream(t, stream) + + // Expect to get an Unauthenticated error immediately. + err = mustGetError(t, rspCh) + require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) +} + +func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { + store := testStateStore(t) + + // Set the initial roots and CA configuration. + rootA := connect.TestCA(t, nil) + _, err := store.CARootSetCAS(1, 0, structs.CARoots{rootA}) + require.NoError(t, err) + + err = store.CASetConfig(2, &structs.CAConfiguration{ClusterID: "cluster-id"}) + require.NoError(t, err) + + // Mock the ACL Resolver to return an authorizer with `service:write` the + // first two times it is called (initial connect and first re-auth). + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(testAuthorizer(t), nil).Twice() + + ctx := public.ContextWithToken(context.Background(), testACLToken) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + + // Start the stream. + client := testClient(t, server) + stream, err := client.WatchRoots(ctx, &emptypb.Empty{}) + require.NoError(t, err) + rspCh := handleRootsStream(t, stream) + + // Consume the initial response. + mustGetRoots(t, rspCh) + + // Update the ACL token to cause the subscription to be force-closed. + accessorID, err := uuid.GenerateUUID() + require.NoError(t, err) + err = store.ACLTokenSet(1, &structs.ACLToken{ + AccessorID: accessorID, + SecretID: testACLToken, + }) + require.NoError(t, err) + + // Update the roots. + rootB := connect.TestCA(t, nil) + _, err = store.CARootSetCAS(3, 1, structs.CARoots{rootB}) + require.NoError(t, err) + + // Expect the stream to remain open and to receive the new roots. + mustGetRoots(t, rspCh) + + // Simulate removing the `service:write` permission. + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(acl.DenyAll(), nil) + + // Update the ACL token to cause the subscription to be force-closed. + err = store.ACLTokenSet(1, &structs.ACLToken{ + AccessorID: accessorID, + SecretID: testACLToken, + }) + require.NoError(t, err) + + // Expect the stream to be terminated. + err = mustGetError(t, rspCh) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) +} + +func TestWatchRoots_StateStoreAbandoned(t *testing.T) { + storeA := testStateStore(t) + + // Set the initial roots and CA configuration. + rootA := connect.TestCA(t, nil) + _, err := storeA.CARootSetCAS(1, 0, structs.CARoots{rootA}) + require.NoError(t, err) + + err = storeA.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-a"}) + require.NoError(t, err) + + // Mock the ACL Resolver to return an authorizer with `service:write`. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(testAuthorizer(t), nil) + + ctx := public.ContextWithToken(context.Background(), testACLToken) + + server := NewServer(Config{ + GetStore: func() StateStore { return storeA }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + + // Begin the stream. + client := testClient(t, server) + stream, err := client.WatchRoots(ctx, &emptypb.Empty{}) + require.NoError(t, err) + rspCh := handleRootsStream(t, stream) + + // Consume the initial roots. + mustGetRoots(t, rspCh) + + // Simulate a snapshot restore. + storeB := testStateStore(t) + + rootB := connect.TestCA(t, nil) + _, err = storeB.CARootSetCAS(1, 0, structs.CARoots{rootB}) + require.NoError(t, err) + + err = storeB.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-b"}) + require.NoError(t, err) + + server.GetStore = func() StateStore { return storeB } + + storeA.Abandon() + + // Expect to get the new store's roots. + newRoots := mustGetRoots(t, rspCh) + require.Equal(t, "cluster-b.consul", newRoots.TrustDomain) + require.Len(t, newRoots.Roots, 1) + require.Equal(t, rootB.ID, newRoots.ActiveRootId) +} + +func mustGetRoots(t *testing.T, ch <-chan rootsOrError) *pbconnectca.WatchRootsResponse { + t.Helper() + + select { + case rsp := <-ch: + require.NoError(t, rsp.err) + return rsp.rsp + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for WatchRootsResponse") + return nil + } +} + +func mustGetError(t *testing.T, ch <-chan rootsOrError) error { + t.Helper() + + select { + case rsp := <-ch: + require.Error(t, rsp.err) + return rsp.err + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for WatchRootsResponse") + return nil + } +} + +func handleRootsStream(t *testing.T, stream pbconnectca.ConnectCAService_WatchRootsClient) <-chan rootsOrError { + t.Helper() + + rspCh := make(chan rootsOrError) + go func() { + for { + rsp, err := stream.Recv() + if errors.Is(err, io.EOF) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) { + return + } + rspCh <- rootsOrError{ + rsp: rsp, + err: err, + } + } + }() + return rspCh +} + +type rootsOrError struct { + rsp *pbconnectca.WatchRootsResponse + err error +} diff --git a/agent/grpc/public/token.go b/agent/grpc/public/token.go new file mode 100644 index 000000000..237317ee4 --- /dev/null +++ b/agent/grpc/public/token.go @@ -0,0 +1,28 @@ +package public + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +const metadataKeyToken = "x-consul-token" + +// TokenFromContext returns the ACL token in the gRPC metadata attached to the +// given context. +func TokenFromContext(ctx context.Context) string { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "" + } + toks, ok := md[metadataKeyToken] + if ok && len(toks) > 0 { + return toks[0] + } + return "" +} + +// ContextWithToken returns a context with the given ACL token attached. +func ContextWithToken(ctx context.Context, token string) context.Context { + return metadata.AppendToOutgoingContext(ctx, metadataKeyToken, token) +} diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index b6e629543..69dab7cfc 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -37,9 +37,9 @@ func TestStore_IntegrationWithBackend(t *testing.T) { var maxIndex uint64 = 200 count := &counter{latest: 3} producers := map[string]*eventProducer{ - "srv1": newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv1", count, maxIndex), - "srv2": newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv2", count, maxIndex), - "srv3": newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv3", count, maxIndex), + state.EventSubjectService{Key: "srv1"}.String(): newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv1", count, maxIndex), + state.EventSubjectService{Key: "srv2"}.String(): newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv2", count, maxIndex), + state.EventSubjectService{Key: "srv3"}.String(): newEventProducer(pbsubscribe.Topic_ServiceHealth, "srv3", count, maxIndex), } sh := snapshotHandler{producers: producers} @@ -88,7 +88,7 @@ func TestStore_IntegrationWithBackend(t *testing.T) { t.Run(fmt.Sprintf("consumer %d", i), func(t *testing.T) { require.True(t, len(consumer.states) > 2, "expected more than %d events", len(consumer.states)) - expected := producers[consumer.srvName].nodesByIndex + expected := producers[state.EventSubjectService{Key: consumer.srvName}.String()].nodesByIndex for idx, nodes := range consumer.states { assertDeepEqual(t, idx, expected[idx], nodes) } @@ -348,7 +348,7 @@ type snapshotHandler struct { } func (s *snapshotHandler) Snapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { - producer := s.producers[req.Key] + producer := s.producers[req.Subject.String()] producer.nodesLock.Lock() defer producer.nodesLock.Unlock() diff --git a/agent/xds/server.go b/agent/xds/server.go index d385ac863..88419547a 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -13,10 +13,10 @@ import ( "github.com/hashicorp/go-hclog" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/grpc/public" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/xds/xdscommon" @@ -189,18 +189,6 @@ func (s *Server) StreamAggregatedResources(stream ADSStream) error { return errors.New("not implemented") } -func tokenFromContext(ctx context.Context) string { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "" - } - toks, ok := md["x-consul-token"] - if ok && len(toks) > 0 { - return toks[0] - } - return "" -} - // Register the XDS server handlers to the given gRPC server. func (s *Server) Register(srv *grpc.Server) { envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s) @@ -221,7 +209,7 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot") } - authz, err := s.ResolveToken(tokenFromContext(ctx)) + authz, err := s.ResolveToken(public.TokenFromContext(ctx)) if acl.IsErrNotFound(err) { return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) } else if acl.IsErrPermissionDenied(err) { diff --git a/proto-public/pbconnectca/ca.pb.binary.go b/proto-public/pbconnectca/ca.pb.binary.go new file mode 100644 index 000000000..e373db9b5 --- /dev/null +++ b/proto-public/pbconnectca/ca.pb.binary.go @@ -0,0 +1,28 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto-public/pbconnectca/ca.proto + +package pbconnectca + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *WatchRootsResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *WatchRootsResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *CARoot) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *CARoot) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbconnectca/ca.pb.go b/proto-public/pbconnectca/ca.pb.go new file mode 100644 index 000000000..bb966a4de --- /dev/null +++ b/proto-public/pbconnectca/ca.pb.go @@ -0,0 +1,473 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto-public/pbconnectca/ca.proto + +package pbconnectca + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type WatchRootsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // active_root_id is the ID of a root in Roots that is the active CA root. + // Other roots are still valid if they're in the Roots list but are in the + // process of being rotated out. + ActiveRootId string `protobuf:"bytes,1,opt,name=active_root_id,json=activeRootId,proto3" json:"active_root_id,omitempty"` + // trust_domain is the identification root for this Consul cluster. All + // certificates signed by the cluster's CA must have their identifying URI + // in this domain. + // + // This does not include the protocol (currently spiffe://) since we may + // implement other protocols in future with equivalent semantics. It should + // be compared against the "authority" section of a URI (i.e. host:port). + TrustDomain string `protobuf:"bytes,2,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // roots is a list of root CA certs to trust. + Roots []*CARoot `protobuf:"bytes,3,rep,name=roots,proto3" json:"roots,omitempty"` +} + +func (x *WatchRootsResponse) Reset() { + *x = WatchRootsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchRootsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchRootsResponse) ProtoMessage() {} + +func (x *WatchRootsResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchRootsResponse.ProtoReflect.Descriptor instead. +func (*WatchRootsResponse) Descriptor() ([]byte, []int) { + return file_proto_public_pbconnectca_ca_proto_rawDescGZIP(), []int{0} +} + +func (x *WatchRootsResponse) GetActiveRootId() string { + if x != nil { + return x.ActiveRootId + } + return "" +} + +func (x *WatchRootsResponse) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *WatchRootsResponse) GetRoots() []*CARoot { + if x != nil { + return x.Roots + } + return nil +} + +type CARoot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // id is a globally unique ID (UUID) representing this CA root. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // name is a human-friendly name for this CA root. This value is opaque to + // Consul and is not used for anything internally. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // serial_number is the x509 serial number of the certificate. + SerialNumber uint64 `protobuf:"varint,3,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // signing_key_id is the connect.HexString encoded id of the public key that + // corresponds to the private key used to sign leaf certificates in the + // local datacenter. + // + // The value comes from x509.Certificate.SubjectKeyId of the local leaf + // signing cert. + // + // See https://www.rfc-editor.org/rfc/rfc3280#section-4.2.1.1 for more detail. + SigningKeyId string `protobuf:"bytes,4,opt,name=signing_key_id,json=signingKeyId,proto3" json:"signing_key_id,omitempty"` + // root_cert is the PEM-encoded public certificate. + RootCert string `protobuf:"bytes,5,opt,name=root_cert,json=rootCert,proto3" json:"root_cert,omitempty"` + // intermediate_certs is a list of PEM-encoded intermediate certs to + // attach to any leaf certs signed by this CA. + IntermediateCerts []string `protobuf:"bytes,6,rep,name=intermediate_certs,json=intermediateCerts,proto3" json:"intermediate_certs,omitempty"` + // active is true if this is the current active CA. This must only + // be true for exactly one CA. + Active bool `protobuf:"varint,7,opt,name=active,proto3" json:"active,omitempty"` + // rotated_out_at is the time at which this CA was removed from the state. + // This will only be set on roots that have been rotated out from being the + // active root. + RotatedOutAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=rotated_out_at,json=rotatedOutAt,proto3" json:"rotated_out_at,omitempty"` +} + +func (x *CARoot) Reset() { + *x = CARoot{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CARoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CARoot) ProtoMessage() {} + +func (x *CARoot) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CARoot.ProtoReflect.Descriptor instead. +func (*CARoot) Descriptor() ([]byte, []int) { + return file_proto_public_pbconnectca_ca_proto_rawDescGZIP(), []int{1} +} + +func (x *CARoot) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CARoot) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CARoot) GetSerialNumber() uint64 { + if x != nil { + return x.SerialNumber + } + return 0 +} + +func (x *CARoot) GetSigningKeyId() string { + if x != nil { + return x.SigningKeyId + } + return "" +} + +func (x *CARoot) GetRootCert() string { + if x != nil { + return x.RootCert + } + return "" +} + +func (x *CARoot) GetIntermediateCerts() []string { + if x != nil { + return x.IntermediateCerts + } + return nil +} + +func (x *CARoot) GetActive() bool { + if x != nil { + return x.Active + } + return false +} + +func (x *CARoot) GetRotatedOutAt() *timestamppb.Timestamp { + if x != nil { + return x.RotatedOutAt + } + return nil +} + +var File_proto_public_pbconnectca_ca_proto protoreflect.FileDescriptor + +var file_proto_public_pbconnectca_ca_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, + 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2f, 0x63, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x86, 0x01, 0x0a, + 0x12, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x05, + 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x05, + 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x22, 0x9d, 0x02, 0x0a, 0x06, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x69, 0x67, + 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x12, 0x2d, 0x0a, 0x12, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6f, + 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, + 0x4f, 0x75, 0x74, 0x41, 0x74, 0x32, 0x5b, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x43, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, + 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_proto_public_pbconnectca_ca_proto_rawDescOnce sync.Once + file_proto_public_pbconnectca_ca_proto_rawDescData = file_proto_public_pbconnectca_ca_proto_rawDesc +) + +func file_proto_public_pbconnectca_ca_proto_rawDescGZIP() []byte { + file_proto_public_pbconnectca_ca_proto_rawDescOnce.Do(func() { + file_proto_public_pbconnectca_ca_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_public_pbconnectca_ca_proto_rawDescData) + }) + return file_proto_public_pbconnectca_ca_proto_rawDescData +} + +var file_proto_public_pbconnectca_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_public_pbconnectca_ca_proto_goTypes = []interface{}{ + (*WatchRootsResponse)(nil), // 0: connectca.WatchRootsResponse + (*CARoot)(nil), // 1: connectca.CARoot + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty +} +var file_proto_public_pbconnectca_ca_proto_depIdxs = []int32{ + 1, // 0: connectca.WatchRootsResponse.roots:type_name -> connectca.CARoot + 2, // 1: connectca.CARoot.rotated_out_at:type_name -> google.protobuf.Timestamp + 3, // 2: connectca.ConnectCAService.WatchRoots:input_type -> google.protobuf.Empty + 0, // 3: connectca.ConnectCAService.WatchRoots:output_type -> connectca.WatchRootsResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_proto_public_pbconnectca_ca_proto_init() } +func file_proto_public_pbconnectca_ca_proto_init() { + if File_proto_public_pbconnectca_ca_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_public_pbconnectca_ca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchRootsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbconnectca_ca_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CARoot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_public_pbconnectca_ca_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_public_pbconnectca_ca_proto_goTypes, + DependencyIndexes: file_proto_public_pbconnectca_ca_proto_depIdxs, + MessageInfos: file_proto_public_pbconnectca_ca_proto_msgTypes, + }.Build() + File_proto_public_pbconnectca_ca_proto = out.File + file_proto_public_pbconnectca_ca_proto_rawDesc = nil + file_proto_public_pbconnectca_ca_proto_goTypes = nil + file_proto_public_pbconnectca_ca_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ConnectCAServiceClient is the client API for ConnectCAService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ConnectCAServiceClient interface { + // WatchRoots provides a stream on which you can receive the list of active + // Connect CA roots. Current roots are sent immediately at the start of the + // stream, and new lists will be sent whenever the roots are rotated. + WatchRoots(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (ConnectCAService_WatchRootsClient, error) +} + +type connectCAServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewConnectCAServiceClient(cc grpc.ClientConnInterface) ConnectCAServiceClient { + return &connectCAServiceClient{cc} +} + +func (c *connectCAServiceClient) WatchRoots(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (ConnectCAService_WatchRootsClient, error) { + stream, err := c.cc.NewStream(ctx, &_ConnectCAService_serviceDesc.Streams[0], "/connectca.ConnectCAService/WatchRoots", opts...) + if err != nil { + return nil, err + } + x := &connectCAServiceWatchRootsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ConnectCAService_WatchRootsClient interface { + Recv() (*WatchRootsResponse, error) + grpc.ClientStream +} + +type connectCAServiceWatchRootsClient struct { + grpc.ClientStream +} + +func (x *connectCAServiceWatchRootsClient) Recv() (*WatchRootsResponse, error) { + m := new(WatchRootsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ConnectCAServiceServer is the server API for ConnectCAService service. +type ConnectCAServiceServer interface { + // WatchRoots provides a stream on which you can receive the list of active + // Connect CA roots. Current roots are sent immediately at the start of the + // stream, and new lists will be sent whenever the roots are rotated. + WatchRoots(*emptypb.Empty, ConnectCAService_WatchRootsServer) error +} + +// UnimplementedConnectCAServiceServer can be embedded to have forward compatible implementations. +type UnimplementedConnectCAServiceServer struct { +} + +func (*UnimplementedConnectCAServiceServer) WatchRoots(*emptypb.Empty, ConnectCAService_WatchRootsServer) error { + return status.Errorf(codes.Unimplemented, "method WatchRoots not implemented") +} + +func RegisterConnectCAServiceServer(s *grpc.Server, srv ConnectCAServiceServer) { + s.RegisterService(&_ConnectCAService_serviceDesc, srv) +} + +func _ConnectCAService_WatchRoots_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ConnectCAServiceServer).WatchRoots(m, &connectCAServiceWatchRootsServer{stream}) +} + +type ConnectCAService_WatchRootsServer interface { + Send(*WatchRootsResponse) error + grpc.ServerStream +} + +type connectCAServiceWatchRootsServer struct { + grpc.ServerStream +} + +func (x *connectCAServiceWatchRootsServer) Send(m *WatchRootsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _ConnectCAService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "connectca.ConnectCAService", + HandlerType: (*ConnectCAServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "WatchRoots", + Handler: _ConnectCAService_WatchRoots_Handler, + ServerStreams: true, + }, + }, + Metadata: "proto-public/pbconnectca/ca.proto", +} diff --git a/proto-public/pbconnectca/ca.proto b/proto-public/pbconnectca/ca.proto new file mode 100644 index 000000000..fef15fbc1 --- /dev/null +++ b/proto-public/pbconnectca/ca.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package connectca; + +option go_package = "github.com/hashicorp/consul/proto-public/pbconnectca"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +service ConnectCAService { + // WatchRoots provides a stream on which you can receive the list of active + // Connect CA roots. Current roots are sent immediately at the start of the + // stream, and new lists will be sent whenever the roots are rotated. + rpc WatchRoots(google.protobuf.Empty) returns (stream WatchRootsResponse) {}; +} + +message WatchRootsResponse { + // active_root_id is the ID of a root in Roots that is the active CA root. + // Other roots are still valid if they're in the Roots list but are in the + // process of being rotated out. + string active_root_id = 1; + + // trust_domain is the identification root for this Consul cluster. All + // certificates signed by the cluster's CA must have their identifying URI + // in this domain. + // + // This does not include the protocol (currently spiffe://) since we may + // implement other protocols in future with equivalent semantics. It should + // be compared against the "authority" section of a URI (i.e. host:port). + string trust_domain = 2; + + // roots is a list of root CA certs to trust. + repeated CARoot roots = 3; +} + +message CARoot { + // id is a globally unique ID (UUID) representing this CA root. + string id = 1; + + // name is a human-friendly name for this CA root. This value is opaque to + // Consul and is not used for anything internally. + string name = 2; + + // serial_number is the x509 serial number of the certificate. + uint64 serial_number = 3; + + // signing_key_id is the connect.HexString encoded id of the public key that + // corresponds to the private key used to sign leaf certificates in the + // local datacenter. + // + // The value comes from x509.Certificate.SubjectKeyId of the local leaf + // signing cert. + // + // See https://www.rfc-editor.org/rfc/rfc3280#section-4.2.1.1 for more detail. + string signing_key_id = 4; + + // root_cert is the PEM-encoded public certificate. + string root_cert = 5; + + // intermediate_certs is a list of PEM-encoded intermediate certs to + // attach to any leaf certs signed by this CA. + repeated string intermediate_certs = 6; + + // active is true if this is the current active CA. This must only + // be true for exactly one CA. + bool active = 7; + + // rotated_out_at is the time at which this CA was removed from the state. + // This will only be set on roots that have been rotated out from being the + // active root. + google.protobuf.Timestamp rotated_out_at = 8; +} From f053279c4e596b40c91f989d4ca45cc31599d70c Mon Sep 17 00:00:00 2001 From: Riddhi Shah Date: Sun, 27 Mar 2022 16:29:30 +0530 Subject: [PATCH 075/785] [OSS] Supported dataplane features gRPC endpoint Adds a new gRPC service and endpoint to return the list of supported consul dataplane features. The Consul Dataplane will use this API to customize its interaction with that particular server. --- agent/consul/server.go | 7 +- .../services/connectca/watch_roots_test.go | 7 +- .../dataplane/get_supported_features.go | 45 ++ .../dataplane/get_supported_features_test.go | 82 ++++ .../services/dataplane/mock_ACLResolver.go | 38 ++ .../grpc/public/services/dataplane/server.go | 33 ++ .../public/services/dataplane/server_test.go | 40 ++ .../acl_test.go => testutils/acl.go} | 9 +- .../pbdataplane/dataplane.pb.binary.go | 38 ++ proto-public/pbdataplane/dataplane.pb.go | 448 ++++++++++++++++++ proto-public/pbdataplane/dataplane.proto | 32 ++ 11 files changed, 769 insertions(+), 10 deletions(-) create mode 100644 agent/grpc/public/services/dataplane/get_supported_features.go create mode 100644 agent/grpc/public/services/dataplane/get_supported_features_test.go create mode 100644 agent/grpc/public/services/dataplane/mock_ACLResolver.go create mode 100644 agent/grpc/public/services/dataplane/server.go create mode 100644 agent/grpc/public/services/dataplane/server_test.go rename agent/grpc/public/{services/connectca/acl_test.go => testutils/acl.go} (69%) create mode 100644 proto-public/pbdataplane/dataplane.pb.binary.go create mode 100644 proto-public/pbdataplane/dataplane.pb.go create mode 100644 proto-public/pbdataplane/dataplane.proto diff --git a/agent/consul/server.go b/agent/consul/server.go index ccfa3044a..2b40a615e 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -44,6 +44,7 @@ import ( agentgrpc "github.com/hashicorp/consul/agent/grpc/private" "github.com/hashicorp/consul/agent/grpc/private/services/subscribe" "github.com/hashicorp/consul/agent/grpc/public/services/connectca" + "github.com/hashicorp/consul/agent/grpc/public/services/dataplane" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" @@ -633,12 +634,16 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve // since it can fire events when leadership is obtained. go s.monitorLeadership() - // Initialize public gRPC server. + // Initialize public gRPC server - register services on public gRPC server. connectca.NewServer(connectca.Config{ GetStore: func() connectca.StateStore { return s.FSM().State() }, Logger: logger.Named("grpc-api.connect-ca"), ACLResolver: plainACLResolver{s.ACLResolver}, }).Register(s.publicGRPCServer) + dataplane.NewServer(dataplane.Config{ + Logger: logger.Named("grpc-api.dataplane"), + ACLResolver: plainACLResolver{s.ACLResolver}, + }).Register(s.publicGRPCServer) // Start listening for RPC requests. go func() { diff --git a/agent/grpc/public/services/connectca/watch_roots_test.go b/agent/grpc/public/services/connectca/watch_roots_test.go index efd022d90..d650a4d13 100644 --- a/agent/grpc/public/services/connectca/watch_roots_test.go +++ b/agent/grpc/public/services/connectca/watch_roots_test.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/grpc/public/testutils" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" ) @@ -39,7 +40,7 @@ func TestWatchRoots_Success(t *testing.T) { // Mock the ACL Resolver to return an authorizer with `service:write`. aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testAuthorizer(t), nil) + Return(testutils.TestAuthorizer(t), nil) ctx := public.ContextWithToken(context.Background(), testACLToken) @@ -121,7 +122,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { // first two times it is called (initial connect and first re-auth). aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testAuthorizer(t), nil).Twice() + Return(testutils.TestAuthorizer(t), nil).Twice() ctx := public.ContextWithToken(context.Background(), testACLToken) @@ -187,7 +188,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { // Mock the ACL Resolver to return an authorizer with `service:write`. aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testAuthorizer(t), nil) + Return(testutils.TestAuthorizer(t), nil) ctx := public.ContextWithToken(context.Background(), testACLToken) diff --git a/agent/grpc/public/services/dataplane/get_supported_features.go b/agent/grpc/public/services/dataplane/get_supported_features.go new file mode 100644 index 000000000..672e48f66 --- /dev/null +++ b/agent/grpc/public/services/dataplane/get_supported_features.go @@ -0,0 +1,45 @@ +package dataplane + +import ( + "context" + + acl "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/grpc/public" + structs "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbdataplane" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (d *Server) SupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.SupportedDataplaneFeaturesRequest) (*pbdataplane.SupportedDataplaneFeaturesResponse, error) { + d.Logger.Trace("Received request for supported dataplane features") + + // Require the given ACL token to have `service:write` on any service + token := public.TokenFromContext(ctx) + var authzContext acl.AuthorizerContext + entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + authz, err := d.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + if err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzContext); err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + supportedFeatures := []*pbdataplane.DataplaneFeatureSupport{ + { + FeatureName: pbdataplane.DataplaneFeatures_WATCH_SERVERS, + Supported: true, + }, + { + FeatureName: pbdataplane.DataplaneFeatures_EDGE_CERTIFICATE_MANAGEMENT, + Supported: true, + }, + { + FeatureName: pbdataplane.DataplaneFeatures_ENVOY_BOOTSTRAP_CONFIGURATION, + Supported: true, + }, + } + + return &pbdataplane.SupportedDataplaneFeaturesResponse{SupportedDataplaneFeatures: supportedFeatures}, nil +} diff --git a/agent/grpc/public/services/dataplane/get_supported_features_test.go b/agent/grpc/public/services/dataplane/get_supported_features_test.go new file mode 100644 index 000000000..2b3c5e76d --- /dev/null +++ b/agent/grpc/public/services/dataplane/get_supported_features_test.go @@ -0,0 +1,82 @@ +package dataplane + +import ( + "context" + "testing" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + "github.com/hashicorp/consul/proto-public/pbdataplane" + "github.com/hashicorp/go-hclog" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const testACLToken = "acl-token" + +func TestSupportedDataplaneFeatures_Success(t *testing.T) { + // Mock the ACL Resolver to return an authorizer with `service:write`. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(testutils.TestAuthorizer(t), nil) + ctx := public.ContextWithToken(context.Background(), testACLToken) + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + + client := testClient(t, server) + resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + require.NoError(t, err) + require.Equal(t, 3, len(resp.SupportedDataplaneFeatures)) + + for _, feature := range resp.SupportedDataplaneFeatures { + switch feature.GetFeatureName() { + case pbdataplane.DataplaneFeatures_EDGE_CERTIFICATE_MANAGEMENT: + require.True(t, feature.GetSupported()) + case pbdataplane.DataplaneFeatures_WATCH_SERVERS: + require.True(t, feature.GetSupported()) + case pbdataplane.DataplaneFeatures_ENVOY_BOOTSTRAP_CONFIGURATION: + require.True(t, feature.GetSupported()) + default: + require.False(t, feature.GetSupported()) + } + } +} + +func TestSupportedDataplaneFeatures_Unauthenticated(t *testing.T) { + // Mock the ACL resolver to return ErrNotFound. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(nil, acl.ErrNotFound) + ctx := public.ContextWithToken(context.Background(), testACLToken) + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + client := testClient(t, server) + resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + require.Error(t, err) + require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) + require.Nil(t, resp) +} + +func TestSupportedDataplaneFeatures_PermissionDenied(t *testing.T) { + // Mock the ACL resolver to return ErrNotFound. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(acl.DenyAll(), nil) + ctx := public.ContextWithToken(context.Background(), testACLToken) + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + client := testClient(t, server) + resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + require.Error(t, err) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) + require.Nil(t, resp) +} diff --git a/agent/grpc/public/services/dataplane/mock_ACLResolver.go b/agent/grpc/public/services/dataplane/mock_ACLResolver.go new file mode 100644 index 000000000..364e17e66 --- /dev/null +++ b/agent/grpc/public/services/dataplane/mock_ACLResolver.go @@ -0,0 +1,38 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package dataplane + +import ( + acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" +) + +// MockACLResolver is an autogenerated mock type for the ACLResolver type +type MockACLResolver struct { + mock.Mock +} + +// ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 acl.Authorizer + if rf, ok := ret.Get(0).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(acl.Authorizer) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/agent/grpc/public/services/dataplane/server.go b/agent/grpc/public/services/dataplane/server.go new file mode 100644 index 000000000..90a050e22 --- /dev/null +++ b/agent/grpc/public/services/dataplane/server.go @@ -0,0 +1,33 @@ +package dataplane + +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbdataplane" + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" +) + +type Server struct { + Config +} + +type Config struct { + Logger hclog.Logger + ACLResolver ACLResolver +} + +//go:generate mockery -name ACLResolver -inpkg +type ACLResolver interface { + ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) +} + +func NewServer(cfg Config) *Server { + return &Server{cfg} +} + +var _ pbdataplane.DataplaneServiceServer = (*Server)(nil) + +func (s *Server) Register(grpcServer *grpc.Server) { + pbdataplane.RegisterDataplaneServiceServer(grpcServer, s) +} diff --git a/agent/grpc/public/services/dataplane/server_test.go b/agent/grpc/public/services/dataplane/server_test.go new file mode 100644 index 000000000..5a9186c5a --- /dev/null +++ b/agent/grpc/public/services/dataplane/server_test.go @@ -0,0 +1,40 @@ +package dataplane + +import ( + "context" + "net" + "testing" + + "github.com/hashicorp/consul/proto-public/pbdataplane" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func testClient(t *testing.T, server *Server) pbdataplane.DataplaneServiceClient { + t.Helper() + + addr := RunTestServer(t, server) + + conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, conn.Close()) + }) + + return pbdataplane.NewDataplaneServiceClient(conn) +} + +func RunTestServer(t *testing.T, server *Server) net.Addr { + t.Helper() + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + server.Register(grpcServer) + + go grpcServer.Serve(lis) + t.Cleanup(grpcServer.Stop) + + return lis.Addr() +} diff --git a/agent/grpc/public/services/connectca/acl_test.go b/agent/grpc/public/testutils/acl.go similarity index 69% rename from agent/grpc/public/services/connectca/acl_test.go rename to agent/grpc/public/testutils/acl.go index bac0e342e..0c640d266 100644 --- a/agent/grpc/public/services/connectca/acl_test.go +++ b/agent/grpc/public/testutils/acl.go @@ -1,16 +1,13 @@ -package connectca +package testutils import ( "testing" - "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/acl" + "github.com/stretchr/testify/require" ) -// testAuthorizer returns an ACL policy authorizer with `service:write` on an -// arbitrary service. -func testAuthorizer(t *testing.T) acl.Authorizer { +func TestAuthorizer(t *testing.T) acl.Authorizer { t.Helper() policy, err := acl.NewPolicyFromSource(` diff --git a/proto-public/pbdataplane/dataplane.pb.binary.go b/proto-public/pbdataplane/dataplane.pb.binary.go new file mode 100644 index 000000000..aae9be911 --- /dev/null +++ b/proto-public/pbdataplane/dataplane.pb.binary.go @@ -0,0 +1,38 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto-public/pbdataplane/dataplane.proto + +package pbdataplane + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *SupportedDataplaneFeaturesRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *SupportedDataplaneFeaturesRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *DataplaneFeatureSupport) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *DataplaneFeatureSupport) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *SupportedDataplaneFeaturesResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *SupportedDataplaneFeaturesResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbdataplane/dataplane.pb.go b/proto-public/pbdataplane/dataplane.pb.go new file mode 100644 index 000000000..c5e48a241 --- /dev/null +++ b/proto-public/pbdataplane/dataplane.pb.go @@ -0,0 +1,448 @@ +// Package dataplane provides a service on Consul servers for the Consul Dataplane + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto-public/pbdataplane/dataplane.proto + +package pbdataplane + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type DataplaneFeatures int32 + +const ( + DataplaneFeatures_UNKNOWN DataplaneFeatures = 0 + DataplaneFeatures_WATCH_SERVERS DataplaneFeatures = 1 + DataplaneFeatures_EDGE_CERTIFICATE_MANAGEMENT DataplaneFeatures = 2 + DataplaneFeatures_ENVOY_BOOTSTRAP_CONFIGURATION DataplaneFeatures = 3 +) + +// Enum value maps for DataplaneFeatures. +var ( + DataplaneFeatures_name = map[int32]string{ + 0: "UNKNOWN", + 1: "WATCH_SERVERS", + 2: "EDGE_CERTIFICATE_MANAGEMENT", + 3: "ENVOY_BOOTSTRAP_CONFIGURATION", + } + DataplaneFeatures_value = map[string]int32{ + "UNKNOWN": 0, + "WATCH_SERVERS": 1, + "EDGE_CERTIFICATE_MANAGEMENT": 2, + "ENVOY_BOOTSTRAP_CONFIGURATION": 3, + } +) + +func (x DataplaneFeatures) Enum() *DataplaneFeatures { + p := new(DataplaneFeatures) + *p = x + return p +} + +func (x DataplaneFeatures) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DataplaneFeatures) Descriptor() protoreflect.EnumDescriptor { + return file_proto_public_pbdataplane_dataplane_proto_enumTypes[0].Descriptor() +} + +func (DataplaneFeatures) Type() protoreflect.EnumType { + return &file_proto_public_pbdataplane_dataplane_proto_enumTypes[0] +} + +func (x DataplaneFeatures) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DataplaneFeatures.Descriptor instead. +func (DataplaneFeatures) EnumDescriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{0} +} + +type SupportedDataplaneFeaturesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SupportedDataplaneFeaturesRequest) Reset() { + *x = SupportedDataplaneFeaturesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SupportedDataplaneFeaturesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SupportedDataplaneFeaturesRequest) ProtoMessage() {} + +func (x *SupportedDataplaneFeaturesRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SupportedDataplaneFeaturesRequest.ProtoReflect.Descriptor instead. +func (*SupportedDataplaneFeaturesRequest) Descriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{0} +} + +type DataplaneFeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FeatureName DataplaneFeatures `protobuf:"varint,1,opt,name=feature_name,json=featureName,proto3,enum=dataplane.DataplaneFeatures" json:"feature_name,omitempty"` + Supported bool `protobuf:"varint,2,opt,name=supported,proto3" json:"supported,omitempty"` +} + +func (x *DataplaneFeatureSupport) Reset() { + *x = DataplaneFeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataplaneFeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataplaneFeatureSupport) ProtoMessage() {} + +func (x *DataplaneFeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataplaneFeatureSupport.ProtoReflect.Descriptor instead. +func (*DataplaneFeatureSupport) Descriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{1} +} + +func (x *DataplaneFeatureSupport) GetFeatureName() DataplaneFeatures { + if x != nil { + return x.FeatureName + } + return DataplaneFeatures_UNKNOWN +} + +func (x *DataplaneFeatureSupport) GetSupported() bool { + if x != nil { + return x.Supported + } + return false +} + +type SupportedDataplaneFeaturesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SupportedDataplaneFeatures []*DataplaneFeatureSupport `protobuf:"bytes,1,rep,name=supported_dataplane_features,json=supportedDataplaneFeatures,proto3" json:"supported_dataplane_features,omitempty"` +} + +func (x *SupportedDataplaneFeaturesResponse) Reset() { + *x = SupportedDataplaneFeaturesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SupportedDataplaneFeaturesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SupportedDataplaneFeaturesResponse) ProtoMessage() {} + +func (x *SupportedDataplaneFeaturesResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SupportedDataplaneFeaturesResponse.ProtoReflect.Descriptor instead. +func (*SupportedDataplaneFeaturesResponse) Descriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{2} +} + +func (x *SupportedDataplaneFeaturesResponse) GetSupportedDataplaneFeatures() []*DataplaneFeatureSupport { + if x != nil { + return x.SupportedDataplaneFeatures + } + return nil +} + +var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor + +var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, + 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x64, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, 0x17, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x0b, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x22, 0x8a, 0x01, 0x0a, 0x22, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x1c, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x2a, 0x77, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x4e, 0x56, 0x4f, 0x59, + 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x32, 0x8f, 0x01, 0x0a, 0x10, 0x44, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x7b, 0x0a, 0x1a, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_public_pbdataplane_dataplane_proto_rawDescOnce sync.Once + file_proto_public_pbdataplane_dataplane_proto_rawDescData = file_proto_public_pbdataplane_dataplane_proto_rawDesc +) + +func file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP() []byte { + file_proto_public_pbdataplane_dataplane_proto_rawDescOnce.Do(func() { + file_proto_public_pbdataplane_dataplane_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_public_pbdataplane_dataplane_proto_rawDescData) + }) + return file_proto_public_pbdataplane_dataplane_proto_rawDescData +} + +var file_proto_public_pbdataplane_dataplane_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_proto_public_pbdataplane_dataplane_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_proto_public_pbdataplane_dataplane_proto_goTypes = []interface{}{ + (DataplaneFeatures)(0), // 0: dataplane.DataplaneFeatures + (*SupportedDataplaneFeaturesRequest)(nil), // 1: dataplane.SupportedDataplaneFeaturesRequest + (*DataplaneFeatureSupport)(nil), // 2: dataplane.DataplaneFeatureSupport + (*SupportedDataplaneFeaturesResponse)(nil), // 3: dataplane.SupportedDataplaneFeaturesResponse +} +var file_proto_public_pbdataplane_dataplane_proto_depIdxs = []int32{ + 0, // 0: dataplane.DataplaneFeatureSupport.feature_name:type_name -> dataplane.DataplaneFeatures + 2, // 1: dataplane.SupportedDataplaneFeaturesResponse.supported_dataplane_features:type_name -> dataplane.DataplaneFeatureSupport + 1, // 2: dataplane.DataplaneService.SupportedDataplaneFeatures:input_type -> dataplane.SupportedDataplaneFeaturesRequest + 3, // 3: dataplane.DataplaneService.SupportedDataplaneFeatures:output_type -> dataplane.SupportedDataplaneFeaturesResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_proto_public_pbdataplane_dataplane_proto_init() } +func file_proto_public_pbdataplane_dataplane_proto_init() { + if File_proto_public_pbdataplane_dataplane_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_public_pbdataplane_dataplane_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SupportedDataplaneFeaturesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbdataplane_dataplane_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataplaneFeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbdataplane_dataplane_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SupportedDataplaneFeaturesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_public_pbdataplane_dataplane_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_public_pbdataplane_dataplane_proto_goTypes, + DependencyIndexes: file_proto_public_pbdataplane_dataplane_proto_depIdxs, + EnumInfos: file_proto_public_pbdataplane_dataplane_proto_enumTypes, + MessageInfos: file_proto_public_pbdataplane_dataplane_proto_msgTypes, + }.Build() + File_proto_public_pbdataplane_dataplane_proto = out.File + file_proto_public_pbdataplane_dataplane_proto_rawDesc = nil + file_proto_public_pbdataplane_dataplane_proto_goTypes = nil + file_proto_public_pbdataplane_dataplane_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// DataplaneServiceClient is the client API for DataplaneService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DataplaneServiceClient interface { + SupportedDataplaneFeatures(ctx context.Context, in *SupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*SupportedDataplaneFeaturesResponse, error) +} + +type dataplaneServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDataplaneServiceClient(cc grpc.ClientConnInterface) DataplaneServiceClient { + return &dataplaneServiceClient{cc} +} + +func (c *dataplaneServiceClient) SupportedDataplaneFeatures(ctx context.Context, in *SupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*SupportedDataplaneFeaturesResponse, error) { + out := new(SupportedDataplaneFeaturesResponse) + err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/SupportedDataplaneFeatures", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DataplaneServiceServer is the server API for DataplaneService service. +type DataplaneServiceServer interface { + SupportedDataplaneFeatures(context.Context, *SupportedDataplaneFeaturesRequest) (*SupportedDataplaneFeaturesResponse, error) +} + +// UnimplementedDataplaneServiceServer can be embedded to have forward compatible implementations. +type UnimplementedDataplaneServiceServer struct { +} + +func (*UnimplementedDataplaneServiceServer) SupportedDataplaneFeatures(context.Context, *SupportedDataplaneFeaturesRequest) (*SupportedDataplaneFeaturesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SupportedDataplaneFeatures not implemented") +} + +func RegisterDataplaneServiceServer(s *grpc.Server, srv DataplaneServiceServer) { + s.RegisterService(&_DataplaneService_serviceDesc, srv) +} + +func _DataplaneService_SupportedDataplaneFeatures_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SupportedDataplaneFeaturesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataplaneServiceServer).SupportedDataplaneFeatures(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dataplane.DataplaneService/SupportedDataplaneFeatures", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataplaneServiceServer).SupportedDataplaneFeatures(ctx, req.(*SupportedDataplaneFeaturesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DataplaneService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "dataplane.DataplaneService", + HandlerType: (*DataplaneServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SupportedDataplaneFeatures", + Handler: _DataplaneService_SupportedDataplaneFeatures_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto-public/pbdataplane/dataplane.proto", +} diff --git a/proto-public/pbdataplane/dataplane.proto b/proto-public/pbdataplane/dataplane.proto new file mode 100644 index 000000000..17789fc55 --- /dev/null +++ b/proto-public/pbdataplane/dataplane.proto @@ -0,0 +1,32 @@ +// Package dataplane provides a service on Consul servers for the Consul Dataplane + +syntax = "proto3"; + +package dataplane; + +option go_package = "github.com/hashicorp/consul/proto-public/pbdataplane"; + + +message SupportedDataplaneFeaturesRequest {} + +enum DataplaneFeatures { + UNKNOWN = 0; + WATCH_SERVERS = 1; + EDGE_CERTIFICATE_MANAGEMENT = 2; + ENVOY_BOOTSTRAP_CONFIGURATION = 3; +} + + +message DataplaneFeatureSupport { + DataplaneFeatures feature_name = 1; + bool supported = 2; +} + +message SupportedDataplaneFeaturesResponse { + repeated DataplaneFeatureSupport supported_dataplane_features = 1; +} + + +service DataplaneService { + rpc SupportedDataplaneFeatures(SupportedDataplaneFeaturesRequest) returns (SupportedDataplaneFeaturesResponse) {}; +} \ No newline at end of file From e7dfc7288496646e22a514937760c0e1a62542db Mon Sep 17 00:00:00 2001 From: Riddhi Shah Date: Tue, 5 Apr 2022 07:44:26 -0700 Subject: [PATCH 076/785] Add changelog --- .changelog/12695.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/12695.txt diff --git a/.changelog/12695.txt b/.changelog/12695.txt new file mode 100644 index 000000000..e9eb3aef0 --- /dev/null +++ b/.changelog/12695.txt @@ -0,0 +1,3 @@ +```release-note:feature +New gRPC service and endpoint to return the list of supported consul dataplane features +``` \ No newline at end of file From a49cbf50dd6a1cc0ab1756f68d5b7297c1c6884d Mon Sep 17 00:00:00 2001 From: David Yu Date: Tue, 5 Apr 2022 07:46:14 -0700 Subject: [PATCH 077/785] docs: rename Connect Service Mesh Kubernetes to Consul Service Mesh on Kubernetes (#12690) * docs:rename Connect Service Mesh Kubernetes to Consul Service Mesh on Kubernetes --- website/content/docs/k8s/connect/index.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index 928089142..7a3c472ca 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -1,26 +1,27 @@ --- layout: docs -page_title: Service Mesh - Kubernetes +page_title: Consul Service Mesh on Kubernetes description: >- - Connect is a feature built into to Consul that enables automatic + Consul Service Mesh is a feature built into to Consul that enables automatic service-to-service authorization and connection encryption across your Consul - services. Connect can be used with Kubernetes to secure pod communication with + services. Consul Service Mesh can be used with Kubernetes to secure pod communication with other services. --- -# Connect Service Mesh on Kubernetes +# Consul Service Mesh on Kubernetes -[Connect](/docs/connect) is a feature built into to Consul that enables +[Consul Service Mesh](/docs/connect) is a feature built into to Consul that enables automatic service-to-service authorization and connection encryption across -your Consul services. Connect can be used with Kubernetes to secure pod -communication with other pods and external Kubernetes services. +your Consul services. Consul Service Mesh can be used with Kubernetes to secure pod +communication with other pods and external Kubernetes services. Consul Connect is used interchangeably with the name +Consul Service Mesh and is what will be used to refer to for Service Mesh functionality within Consul. The Connect sidecar running Envoy can be automatically injected into pods in your cluster, making configuration for Kubernetes automatic. This functionality is provided by the [consul-k8s project](https://github.com/hashicorp/consul-k8s) and can be automatically installed and configured using the -[Consul Helm chart](/docs/k8s/installation/install). +[Consul Helm chart](/docs/k8s/installation/install#helm-chart-installation). ## Usage From 76cfe558669fdb42f930454b1da9a819f907b10d Mon Sep 17 00:00:00 2001 From: Riddhi Shah Date: Tue, 5 Apr 2022 09:08:37 -0700 Subject: [PATCH 078/785] Update .changelog/12695.txt Co-authored-by: Matt Keeler --- .changelog/12695.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/12695.txt b/.changelog/12695.txt index e9eb3aef0..9baaa3dcb 100644 --- a/.changelog/12695.txt +++ b/.changelog/12695.txt @@ -1,3 +1,3 @@ ```release-note:feature -New gRPC service and endpoint to return the list of supported consul dataplane features +grpc: New gRPC service and endpoint to return the list of supported consul dataplane features ``` \ No newline at end of file From 23200990db0dda2863cc7ac517791ae75b610ea5 Mon Sep 17 00:00:00 2001 From: Bryce Kalow Date: Tue, 5 Apr 2022 11:18:57 -0500 Subject: [PATCH 079/785] website: fix usages of img tag (#12696) --- website/.gitignore | 2 + website/Makefile | 80 ++++++++----------- website/README.md | 2 +- website/content/docs/ecs/architecture.mdx | 12 ++- .../content/docs/integrate/partnerships.mdx | 27 ++++++- website/next.config.js | 2 +- website/package.json | 4 +- website/{redirects.next.js => redirects.js} | 0 website/scripts/website-build.sh | 31 +++++++ website/scripts/website-start.sh | 25 ++++++ 10 files changed, 127 insertions(+), 58 deletions(-) rename website/{redirects.next.js => redirects.js} (100%) create mode 100755 website/scripts/website-build.sh create mode 100755 website/scripts/website-start.sh diff --git a/website/.gitignore b/website/.gitignore index 507cbf4e5..7d809dab7 100644 --- a/website/.gitignore +++ b/website/.gitignore @@ -6,3 +6,5 @@ out # As per Next.js conventions (https://nextjs.org/docs/basic-features/environment-variables#default-environment-variables) .env*.local + +website-preview diff --git a/website/Makefile b/website/Makefile index 7267737fa..485e1b884 100644 --- a/website/Makefile +++ b/website/Makefile @@ -1,54 +1,38 @@ +.DEFAULT_GOAL := website + +PWD=$$(pwd) +DOCKER_IMAGE="hashicorp/dev-portal" +DOCKER_IMAGE_LOCAL="dev-portal-local" +DOCKER_RUN_FLAGS=-it \ + --publish "3000:3000" \ + --rm \ + --tty \ + --volume "$(PWD)/content:/app/content" \ + --volume "$(PWD)/public:/app/public" \ + --volume "$(PWD)/data:/app/data" \ + --volume "$(PWD)/redirects.js:/app/redirects.js" \ + --volume "next-dir:/app/website-preview/.next" \ + --volume "$(PWD)/.env:/app/.env" \ + -e "REPO=consul" + # Default: run this if working on the website locally to run in watch mode. +.PHONY: website website: @echo "==> Downloading latest Docker image..." - @docker pull hashicorp/consul-website - @echo "==> Starting website in Docker..." - @docker run \ - --interactive \ - --rm \ - --tty \ - --workdir "/website" \ - --volume "$(shell pwd):/website" \ - --volume "/website/node_modules" \ - --publish "3000:3000" \ - hashicorp/consul-website \ - npm start + @docker pull $(DOCKER_IMAGE) + @echo "==> Starting website..." + @docker run $(DOCKER_RUN_FLAGS) $(DOCKER_IMAGE) -# This command will generate a static version of the website to the "out" folder. -build: - @echo "==> Downloading latest Docker image..." - @docker pull hashicorp/consul-website - @echo "==> Starting build in Docker..." - @docker run \ - --interactive \ - --rm \ - --tty \ - --workdir "/website" \ - --volume "$(shell pwd):/website" \ - --volume "/website/node_modules" \ - hashicorp/consul-website \ - npm run static +# Use this if you have run `website/build-local` to use the locally built image. +.PHONY: website/local +website/local: + @echo "==> Starting website from local image..." + @docker run $(DOCKER_RUN_FLAGS) $(DOCKER_IMAGE_LOCAL) -# If you are changing node dependencies locally, run this to generate a new -# local Docker image with the dependency changes included. -build-image: - @echo "==> Building Docker image..." - @docker build --tag hashicorp-consul-website-local . +# Run this to generate a new local Docker image. +.PHONY: website/build-local +website/build-local: + @echo "==> Building local Docker image" + @docker build https://github.com/hashicorp/dev-portal.git\#main \ + -t $(DOCKER_IMAGE_LOCAL) -# Use this if you have run `build-image` to use the locally built image -# rather than our CI-generated image to test dependency changes. -website-local: - @echo "==> Starting website in Docker..." - @docker run \ - --interactive \ - --rm \ - --tty \ - --workdir "/website" \ - --volume "$(shell pwd):/website" \ - --volume "/website/node_modules" \ - --publish "3000:3000" \ - hashicorp-consul-website-local \ - npm start - -.DEFAULT_GOAL := website -.PHONY: build build-image website website-local diff --git a/website/README.md b/website/README.md index 545bbd3f8..7428737ca 100644 --- a/website/README.md +++ b/website/README.md @@ -58,7 +58,7 @@ The website can be run locally through node.js or [Docker](https://www.docker.co Running the site locally is simple. Provided you have Docker installed, clone this repo, run `make`, and then visit `http://localhost:3000`. -The docker image is pre-built with all the website dependencies installed, which is what makes it so quick and simple, but also means if you need to change dependencies and test the changes within Docker, you'll need a new image. If this is something you need to do, you can run `make build-image` to generate a local Docker image with updated dependencies, then `make website-local` to use that image and preview. +The docker image is pre-built with all the website dependencies installed, which is what makes it so quick and simple, but also means if you need to change dependencies and test the changes within Docker, you'll need a new image. If this is something you need to do, you can run `make website/build-local` to generate a local Docker image with updated dependencies, then `make website/local` to use that image and preview. ### With Node diff --git a/website/content/docs/ecs/architecture.mdx b/website/content/docs/ecs/architecture.mdx index 766402fcc..03d8bf7bc 100644 --- a/website/content/docs/ecs/architecture.mdx +++ b/website/content/docs/ecs/architecture.mdx @@ -32,7 +32,11 @@ For more information about how Consul works in general, see Consul's [Architectu This diagram shows the timeline of a task starting up and all its containers: -Task Startup Timeline + + +![Task Startup Timeline](/img/ecs-task-startup.svg) + + - **T0:** ECS starts the task. The `consul-client` and `mesh-init` containers start: - `consul-client` uses the `retry-join` option to join the Consul cluster @@ -49,7 +53,11 @@ This diagram shows the timeline of a task starting up and all its containers: This diagram shows an example timeline of a task shutting down: -Task Shutdown Timeline + + +![Task Shutdown Timeline](/img/ecs-task-shutdown.svg) + + - **T0**: ECS sends a TERM signal to all containers. Each container reacts to the TERM signal: - `consul-client` begins to gracefully leave the Consul cluster. diff --git a/website/content/docs/integrate/partnerships.mdx b/website/content/docs/integrate/partnerships.mdx index b4bd3c10c..e7c5bb222 100644 --- a/website/content/docs/integrate/partnerships.mdx +++ b/website/content/docs/integrate/partnerships.mdx @@ -20,7 +20,11 @@ By leveraging Consul’s RESTful HTTP API system, prospective partners are able **The Consul ecosystem of integrations:** -Consul Architecture + + +![Consul Architecture](/img/consul_ecosystem_diagram2.png) + + **Data Plane**: These integrations extend Consul’s certificate management, secure ACL configuration, observability metrics and logging, and service discovery that allows for dynamic service mapping APM and logging tools, extend sidecar proxies to support Consul connect, and extend API gateways to allow Consul to route incoming traffic to the proxies for Connect-enabled services. @@ -36,7 +40,18 @@ By leveraging Consul’s RESTful HTTP API system, prospective partners are able **Consul integration verification badges**: Partners will be issued the Consul Enterprise badge for integrations that work with [Consul Enterprise features](https://www.consul.io/docs/enterprise) such as namespaces. Partners will be issued the HCP Consul badge for integrations validated to work with [HCP Consul](https://cloud.hashicorp.com/docs/consul/features). Each badge would be displayed on HashiCorp’s partner page as well as be available for posting on the partner’s own website to provide better visibility and differentiation of the integration for joint customers. - + + + +![Consul Enterprise Badge](/img/consul_enterprise_partner_badge.png) + + + + +![HCP Consul](/img/HCPc_badge.png) + + + Developing a valid integration with either Consul Enterprise or HCP Consul also qualifies the partner for the Premier tier of the HashiCorp Technology Partners program. The process for verification of these integrations is detailed below. @@ -44,7 +59,11 @@ Developing a valid integration with either Consul Enterprise or HCP Consul also The Consul integration development process is described in the steps below. By following these steps, Consul integrations can be developed alongside HashiCorp to ensure new integrations are reviewed, approved and released as quickly as possible. -Integration Program Steps + + +![Integration Program Steps](/img/consul_integration_program_steps.png) + + 1. Engage: Initial contact between vendor and HashiCorp 2. Enable: Documentation, code samples and best practices for developing the integration @@ -168,4 +187,4 @@ Below is a checklist of steps that should be followed during the Consul integrat ## Contact Us -For any questions or feedback, please contact us at: [technologypartners@hashicorp.com](mailto:technologypartners@hashicorp.com) \ No newline at end of file +For any questions or feedback, please contact us at: [technologypartners@hashicorp.com](mailto:technologypartners@hashicorp.com) diff --git a/website/next.config.js b/website/next.config.js index f90501d0d..b46f9830b 100644 --- a/website/next.config.js +++ b/website/next.config.js @@ -1,5 +1,5 @@ const withHashicorp = require('@hashicorp/platform-nextjs-plugin') -const redirects = require('./redirects.next') +const redirects = require('./redirects') module.exports = withHashicorp({ dato: { diff --git a/website/package.json b/website/package.json index e4235d9cb..e8067cee0 100644 --- a/website/package.json +++ b/website/package.json @@ -72,14 +72,14 @@ }, "main": "index.js", "scripts": { - "build": "node --max-old-space-size=4096 ./node_modules/.bin/next build", + "build": "./scripts/website-build.sh", "dynamic": "NODE_ENV=production next build && next start", "export": "node --max-old-space-size=4096 ./node_modules/.bin/next export", "format": "next-hashicorp format", "generate:component": "next-hashicorp generate component", "generate:readme": "next-hashicorp markdown-blocks README.md", "lint": "next-hashicorp lint", - "start": "next-remote-watch './content/**/*.mdx'", + "start": "./scripts/website-start.sh", "static": "npm run build && npm run export && cp _redirects out/.", "linkcheck": "linkcheck https://consul.io" }, diff --git a/website/redirects.next.js b/website/redirects.js similarity index 100% rename from website/redirects.next.js rename to website/redirects.js diff --git a/website/scripts/website-build.sh b/website/scripts/website-build.sh new file mode 100755 index 000000000..1c3e5bd77 --- /dev/null +++ b/website/scripts/website-build.sh @@ -0,0 +1,31 @@ +# Repo which we are cloning and executing npm run build:deploy-preview within +REPO_TO_CLONE=dev-portal +# Set the subdirectory name for the base project +PREVIEW_DIR=website-preview +# The directory we want to clone the project into +CLONE_DIR=website-preview +# The product for which we are building the deploy preview +PRODUCT=consul + +from_cache=false + +if [ -d "$PREVIEW_DIR" ]; then + echo "$PREVIEW_DIR found" + CLONE_DIR="$PREVIEW_DIR-tmp" + from_cache=true +fi + +# Clone the base project, if needed +echo "⏳ Cloning the $REPO_TO_CLONE repo, this might take a while..." +git clone --depth=1 "https://github.com/hashicorp/$REPO_TO_CLONE.git" "$CLONE_DIR" + +if [ "$from_cache" = true ]; then + echo "Setting up $PREVIEW_DIR" + cp -R "./$CLONE_DIR/." "./$PREVIEW_DIR" +fi + +# cd into the preview directory project +cd "$PREVIEW_DIR" + +# Run the build:deploy-preview start script +REPO=$PRODUCT DEV_IO=$PRODUCT IS_CONTENT_PREVIEW=true HASHI_ENV=project-preview npm run build:deploy-preview diff --git a/website/scripts/website-start.sh b/website/scripts/website-start.sh new file mode 100755 index 000000000..0f2a5212b --- /dev/null +++ b/website/scripts/website-start.sh @@ -0,0 +1,25 @@ +# Repo which we are cloning and executing npm run build:deploy-preview within +REPO_TO_CLONE=dev-portal +# Set the subdirectory name for the dev-portal app +PREVIEW_DIR=website-preview +# The product for which we are building the deploy preview +PRODUCT=consul + +should_pull=true + +# Clone the dev-portal project, if needed +if [ ! -d "$PREVIEW_DIR" ]; then + echo "⏳ Cloning the $REPO_TO_CLONE repo, this might take a while..." + git clone --depth=1 https://github.com/hashicorp/$REPO_TO_CLONE.git "$PREVIEW_DIR" + should_pull=false +fi + +cd "$PREVIEW_DIR" + +# If the directory already existed, pull to ensure the clone is fresh +if [ "$should_pull" = true ]; then + git pull origin main +fi + +# Run the dev-portal content-repo start script +REPO=$PRODUCT PREVIEW_DIR="$PREVIEW_DIR" npm run start:local-preview From 744d79f55c25158df98f6655ca6caa2fc839eb6e Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 5 Apr 2022 11:58:07 -0500 Subject: [PATCH 080/785] build: conditionally install or reinstall protobuf supporting tools as needed (#12674) --- GNUmakefile | 72 ++---- build-support/functions/00-vars.sh | 2 +- build-support/functions/10-util.sh | 6 + build-support/scripts/build-docker.sh | 17 +- build-support/scripts/functions.sh | 8 +- build-support/scripts/proto-gen.sh | 162 ------------ build-support/scripts/protobuf.sh | 340 ++++++++++++++++++++++++++ build-support/scripts/release.sh | 19 +- build-support/scripts/version.sh | 19 +- 9 files changed, 387 insertions(+), 258 deletions(-) delete mode 100755 build-support/scripts/proto-gen.sh create mode 100755 build-support/scripts/protobuf.sh diff --git a/GNUmakefile b/GNUmakefile index ecd0e8e38..36ef83b2c 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -12,23 +12,23 @@ GOTOOLS = \ github.com/hashicorp/lint-consul-retry@master PROTOC_VERSION=3.15.8 -PROTOC_OS := $(shell if test "$$(uname)" == "Darwin"; then echo osx; else echo linux; fi) -PROTOC_ZIP := protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-x86_64.zip -PROTOC_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/$(PROTOC_ZIP) -PROTOC_ROOT := .protobuf/protoc-$(PROTOC_OS)-$(PROTOC_VERSION) -PROTOC_BIN := $(PROTOC_ROOT)/bin/protoc -GOPROTOVERSION?=$(shell grep github.com/golang/protobuf go.mod | awk '{print $$2}') -GOPROTOTOOLS = \ - github.com/golang/protobuf/protoc-gen-go@$(GOPROTOVERSION) \ - github.com/hashicorp/protoc-gen-go-binary@master \ - github.com/favadi/protoc-go-inject-tag@v1.3.0 \ - github.com/hashicorp/mog@v0.2.0 + +### +# MOG_VERSION can be either a valid string for "go install @" +# or the string @DEV to imply use whatever is currently installed locally. +### +MOG_VERSION='v0.2.0' +### +# PROTOC_GO_INJECT_TAG_VERSION can be either a valid string for "go install @" +# or the string @DEV to imply use whatever is currently installed locally. +### +PROTOC_GO_INJECT_TAG_VERSION='v1.3.0' GOTAGS ?= GOPATH=$(shell go env GOPATH) MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1) -export PATH := $(PWD)/bin:$(PATH) +export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH) ASSETFS_PATH?=agent/uiserver/bindata_assetfs.go # Get the git commit @@ -38,11 +38,6 @@ GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true GIT_IMPORT=github.com/hashicorp/consul/version GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -PROTOFILES?=$(shell find . -name '*.proto' | grep -v 'vendor/' | grep -v '.protobuf' ) -PROTOGOFILES=$(PROTOFILES:.proto=.pb.go) -PROTOGOBINFILES=$(PROTOFILES:.proto=.pb.binary.go) -PROTO_MOG_ORDER=$(shell go list -tags '$(GOTAGS)' -deps ./proto/pb... | grep "consul/proto") - ifeq ($(FORCE_REBUILD),1) NOCACHE=--no-cache else @@ -304,11 +299,9 @@ tools: proto-tools done proto-tools: - @if [[ -d .gotools ]]; then rm -rf .gotools ; fi - @for TOOL in $(GOPROTOTOOLS); do \ - echo "=== TOOL: $$TOOL" ; \ - go install -v $$TOOL ; \ - done + @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh \ + --protoc-version "$(PROTOC_VERSION)" \ + --tools-only version: @echo -n "Version: " @@ -359,39 +352,10 @@ else @go test -v ./agent -run Vault endif -.PHONY: protoc-install -protoc-install: - $(info locally installing protocol buffer compiler version if needed (expect: $(PROTOC_VERSION))) - @if [[ ! -x $(PROTOC_ROOT)/bin/protoc ]]; then \ - mkdir -p .protobuf/tmp ; \ - if [[ ! -f .protobuf/tmp/$(PROTOC_ZIP) ]]; then \ - ( cd .protobuf/tmp && curl -sSL "$(PROTOC_URL)" -o "$(PROTOC_ZIP)" ) ; \ - fi ; \ - mkdir -p $(PROTOC_ROOT) ; \ - unzip -d $(PROTOC_ROOT) .protobuf/tmp/$(PROTOC_ZIP) ; \ - chmod -R a+Xr $(PROTOC_ROOT) ; \ - chmod +x $(PROTOC_ROOT)/bin/protoc ; \ - fi - .PHONY: proto -proto: -protoc-files -mog-files - -.PHONY: -mog-files --mog-files: - @for FULL_PKG in $(PROTO_MOG_ORDER); do \ - PKG="$${FULL_PKG/#github.com\/hashicorp\/consul\//.\/}" ; \ - find "$$PKG" -name '*.gen.go' -delete ; \ - echo "mog -tags '$(GOTAGS)' -source \"$${PKG}/*.pb.go\"" ; \ - mog -tags '$(GOTAGS)' -source "$${PKG}/*.pb.go" ; \ - done - @echo "Generated all mog Go files" - -.PHONY: -protoc-files --protoc-files: protoc-install $(PROTOGOFILES) $(PROTOGOBINFILES) - @echo "Generated all protobuf Go files" - -%.pb.go %.pb.binary.go: %.proto - @$(SHELL) $(CURDIR)/build-support/scripts/proto-gen.sh --grpc --protoc-bin "$(PROTOC_BIN)" "$<" +proto: + @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh \ + --protoc-version "$(PROTOC_VERSION)" # utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') print-% : ; @echo $($*) diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index f4705f5d1..c9af7c60b 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -8,7 +8,7 @@ GO_BUILD_CONTAINER_DEFAULT="consul-build-go" # Whether to colorize shell output COLORIZE=${COLORIZE-1} -# determine GOPATH and the first GOPATH to use for intalling binaries +# determine GOPATH and the first GOPATH to use for installing binaries if command -v go >/dev/null; then GOPATH=${GOPATH:-$(go env GOPATH)} case $(uname) in diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index b116f2483..6a5f7282d 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -64,6 +64,12 @@ function debug_run { return $? } +function print_run { + echo "$@" + "$@" + return $? +} + function sed_i { if test "$(uname)" == "Darwin" then diff --git a/build-support/scripts/build-docker.sh b/build-support/scripts/build-docker.sh index ba9abfb96..83aa3f1cc 100755 --- a/build-support/scripts/build-docker.sh +++ b/build-support/scripts/build-docker.sh @@ -1,14 +1,9 @@ -#!/bin/bash -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null +#!/usr/bin/env bash + +readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +readonly SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")" +readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions" source "${SCRIPT_DIR}/functions.sh" diff --git a/build-support/scripts/functions.sh b/build-support/scripts/functions.sh index 2ddae96f2..75beeb141 100755 --- a/build-support/scripts/functions.sh +++ b/build-support/scripts/functions.sh @@ -3,15 +3,11 @@ # # It provides all the scripting around building Consul and the release process -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -pushd ../functions > /dev/null -FUNC_DIR=$(pwd) -popd > /dev/null -popd > /dev/null +readonly FUNC_DIR="$(dirname "$(dirname "${BASH_SOURCE[0]}")")/functions" func_sources=$(find ${FUNC_DIR} -mindepth 1 -maxdepth 1 -name "*.sh" -type f | sort -n) for src in $func_sources do source $src -done \ No newline at end of file +done diff --git a/build-support/scripts/proto-gen.sh b/build-support/scripts/proto-gen.sh deleted file mode 100755 index 693a29de1..000000000 --- a/build-support/scripts/proto-gen.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null - -source "${SCRIPT_DIR}/functions.sh" - -function usage { -cat <<-EOF -Usage: ${SCRIPT_NAME} [] - -Description: - Generate the Go files from protobuf definitions. In addition to - running the protoc generator it will also fixup build tags in the - generated code. - -Options: - --protoc-bin Path to protoc. - --import-replace Replace imports of google types with those from the protobuf repo. - --grpc Enable the gRPC plugin - -h | --help Print this help text. -EOF -} - -function err_usage { - err "$1" - err "" - err "$(usage)" -} - -function main { - local -i grpc=0 - local proto_path= - local protoc_bin= - - while test $# -gt 0 - do - case "$1" in - -h | --help ) - usage - return 0 - ;; - --grpc ) - grpc=1 - shift - ;; - --protoc-bin ) - protoc_bin="$2" - shift 2 - ;; - * ) - proto_path="$1" - shift - ;; - esac - done - - if test -z "${proto_path}" - then - err_usage "ERROR: No proto file specified" - return 1 - fi - - if test -z "${protoc_bin}" - then - protoc_bin="$(command -v protoc)" - if test -z "${protoc_bin}" - then - err_usage "ERROR: no proto-bin specified and protoc could not be discovered" - return 1 - fi - fi - - - go mod download - - local golang_proto_path=$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf) - local golang_proto_mod_path=$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}") - - local proto_go_path=${proto_path%%.proto}.pb.go - local proto_go_bin_path=${proto_path%%.proto}.pb.binary.go - local proto_go_rpcglue_path=${proto_path%%.proto}.rpcglue.pb.go - - local go_proto_out="paths=source_relative" - if is_set "${grpc}" - then - go_proto_out="${go_proto_out},plugins=grpc" - fi - - if test -n "${go_proto_out}" - then - go_proto_out="${go_proto_out}:" - fi - - rm -f "${proto_go_path}" ${proto_go_bin_path}" ${proto_go_rpcglue_path}" - - # How we run protoc probably needs some documentation. - # - # This is the path to where - # -I="${golang_proto_path}/protobuf" \ - local -i ret=0 - status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path}" - echo "debug_run ${protoc_bin} \ - -I=\"${golang_proto_path}\" \ - -I=\"${golang_proto_mod_path}\" \ - -I=\"${SOURCE_DIR}\" \ - --go_out=\"${go_proto_out}${SOURCE_DIR}\" \ - --go-binary_out=\"${SOURCE_DIR}\" \ - \"${proto_path}\"" - debug_run ${protoc_bin} \ - -I="${golang_proto_path}" \ - -I="${golang_proto_mod_path}" \ - -I="${SOURCE_DIR}" \ - --go_out="${go_proto_out}${SOURCE_DIR}" \ - --go-binary_out="${SOURCE_DIR}" \ - "${proto_path}" - - if test $? -ne 0 - then - err "Failed to run protoc for ${proto_path}" - return 1 - fi - - debug_run protoc-go-inject-tag \ - -input="${proto_go_path}" - - if test $? -ne 0 - then - err "Failed to run protoc-go-inject-tag for ${proto_path}" - return 1 - fi - - BUILD_TAGS=$(head -n 2 "${proto_path}" | grep '^//go:build\|// +build') - if test -n "${BUILD_TAGS}" - then - echo -e "${BUILD_TAGS}\n" >> "${proto_go_bin_path}.new" - cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" - mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" - fi - - # note: this has to run after we fix up the build tags above - rm -f "${proto_go_rpcglue_path}" - debug_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" - if test $? -ne 0 - then - err "Failed to generate consul rpc glue outputs from ${proto_path}" - return 1 - fi - - return 0 -} - -main "$@" -exit $? diff --git a/build-support/scripts/protobuf.sh b/build-support/scripts/protobuf.sh new file mode 100755 index 000000000..fa6e5b79f --- /dev/null +++ b/build-support/scripts/protobuf.sh @@ -0,0 +1,340 @@ +#!/usr/bin/env bash + +readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +readonly SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")" +readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions" + +source "${SCRIPT_DIR}/functions.sh" + +unset CDPATH + +set -euo pipefail + +usage() { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Description: + Installs protoc, various supporting Go tools, and then regenerates all Go + files from protobuf definitions. In addition to running the protoc + generator it will also fixup build tags in the generated code and + regenerate mog outputs and RPC stubs. + +Options: + --protoc-version Version of protoc to install. It defaults to what is specified in the makefile. + --tools-only Install all required tools but do not generate outputs. + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + local protoc_version= + local tools_only= + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + --protoc-version ) + protoc_version="$2" + shift 2 + ;; + --tools-only ) + tools_only=1 + shift + ;; + esac + done + + if test -z "${protoc_version}" + then + protoc_version="$(make --no-print-directory print-PROTOC_VERSION)" + if test -z "${protoc_version}" + then + err_usage "ERROR: no proto-version specified and version could not be discovered" + return 1 + fi + fi + + # ensure the correct protoc compiler is installed + protoc_install "${protoc_version}" + if test -z "${protoc_bin}" ; then + exit 1 + fi + + # ensure these tools are installed + proto_tools_install + + if [[ -n $tools_only ]]; then + return 0 + fi + + # Compute some data from dependencies in non-local variables. + go mod download + golang_proto_path="$(go list -f '{{ .Dir }}' -m github.com/golang/protobuf)" + # golang_proto_mod_path="$(sed -e 's,\(.*\)github.com.*,\1,' <<< "${golang_proto_path}")" + golang_proto_mod_path="$(go env GOMODCACHE)" + + declare -a proto_files + while IFS= read -r pkg; do + pkg="${pkg#"./"}" + proto_files+=( "$pkg" ) + done < <(find . -name '*.proto' | grep -v 'vendor/' | grep -v '.protobuf' | sort ) + + for proto_file in "${proto_files[@]}"; do + generate_protobuf_code "${proto_file}" + done + + status "Generated all protobuf Go files" + + generate_mog_code + + status "Generated all mog Go files" + + return 0 +} + +# Installs the version of protoc specified by the first argument. +# +# Will set 'protoc_bin' +function protoc_install { + local protoc_version="${1:-}" + local protoc_os + + if test -z "${protoc_version}" + then + protoc_version="$(make --no-print-directory print-PROTOC_VERSION)" + if test -z "${protoc_version}" + then + err "ERROR: no protoc-version specified and version could not be discovered" + return 1 + fi + fi + + case "$(uname)" in + Darwin) + protoc_os="osx" + ;; + Linux) + protoc_os="linux" + ;; + *) + err "unexpected OS: $(uname)" + return 1 + esac + + local protoc_zip="protoc-${protoc_version}-${protoc_os}-x86_64.zip" + local protoc_url="https://github.com/protocolbuffers/protobuf/releases/download/v${protoc_version}/${protoc_zip}" + local protoc_root=".protobuf/protoc-${protoc_os}-${protoc_version}" + # This is updated for use outside of the function. + protoc_bin="${protoc_root}/bin/protoc" + + if [[ -x "${protoc_bin}" ]]; then + status "protocol buffer compiler version already installed: ${protoc_version}" + return 0 + fi + + status_stage "installing protocol buffer compiler version: ${protoc_version}" + + mkdir -p .protobuf/tmp + if [[ ! -f .protobuf/tmp/${protoc_zip} ]]; then \ + ( cd .protobuf/tmp && curl -sSL "${protoc_url}" -o "${protoc_zip}" ) + fi + + mkdir -p "${protoc_root}" + unzip -d "${protoc_root}" ".protobuf/tmp/${protoc_zip}" + chmod -R a+Xr "${protoc_root}" + chmod +x "${protoc_bin}" + + return 0 +} + +function proto_tools_install { + local protoc_gen_go_version + local mog_version + local protoc_go_inject_tag_version + + protoc_gen_go_version="$(grep github.com/golang/protobuf go.mod | awk '{print $2}')" + mog_version="$(make --no-print-directory print-MOG_VERSION)" + protoc_go_inject_tag_version="$(make --no-print-directory print-PROTOC_GO_INJECT_TAG_VERSION)" + + # echo "go: ${protoc_gen_go_version}" + # echo "mog: ${mog_version}" + # echo "tag: ${protoc_go_inject_tag_version}" + + install_versioned_tool \ + 'protoc-gen-go' \ + 'github.com/golang/protobuf' \ + "${protoc_gen_go_version}" \ + 'github.com/golang/protobuf/protoc-gen-go' + + install_unversioned_tool \ + protoc-gen-go-binary \ + 'github.com/hashicorp/protoc-gen-go-binary@master' + + install_versioned_tool \ + 'protoc-go-inject-tag' \ + 'github.com/favadi/protoc-go-inject-tag' \ + "${protoc_go_inject_tag_version}" \ + 'github.com/favadi/protoc-go-inject-tag' + + install_versioned_tool \ + 'mog' \ + 'github.com/hashicorp/mog' \ + "${mog_version}" \ + 'github.com/hashicorp/mog' + + return 0 +} + +function install_unversioned_tool { + local command="$1" + local install="$2" + + if ! command -v "${command}" &>/dev/null ; then + status_stage "installing tool: ${install}" + go install "${install}" + else + debug "skipping tool: ${install} (installed)" + fi + + return 0 +} + +function install_versioned_tool { + local command="$1" + local module="$2" + local version="$3" + local installbase="$4" + + local should_install= + local got + + local expect="${module}@${version}" + local install="${installbase}@${version}" + + if [[ -z "$version" ]]; then + err "cannot install '${command}' no version selected" + return 1 + fi + + if [[ "$version" = "@DEV" ]]; then + if ! command -v "${command}" &>/dev/null ; then + err "dev version of '${command}' requested but not installed" + return 1 + fi + status "skipping tool: ${installbase} (using development version)" + return 0 + fi + + if command -v "${command}" &>/dev/null ; then + got="$(go version -m $(which "${command}") | grep '\bmod\b' | grep "${module}" | + awk '{print $2 "@" $3}')" + if [[ "$expect" != "$got" ]]; then + should_install=1 + fi + else + should_install=1 + fi + + if [[ -n $should_install ]]; then + status_stage "installing tool: ${install}" + go install "${install}" + else + debug "skipping tool: ${install} (installed)" + fi + return 0 +} + +function generate_protobuf_code { + local proto_path="${1:-}" + if [[ -z "${proto_path}" ]]; then + err "missing protobuf path argument" + return 1 + fi + + if [[ -z "${golang_proto_path}" ]]; then + err "golang_proto_path was not set" + return 1 + fi + if [[ -z "${golang_proto_mod_path}" ]]; then + err "golang_proto_mod_path was not set" + return 1 + fi + + local proto_go_path="${proto_path%%.proto}.pb.go" + local proto_go_bin_path="${proto_path%%.proto}.pb.binary.go" + local proto_go_rpcglue_path="${proto_path%%.proto}.rpcglue.pb.go" + + local go_proto_out='paths=source_relative,plugins=grpc:' + + status_stage "Generating ${proto_path} into ${proto_go_path} and ${proto_go_bin_path}" + + rm -f "${proto_go_path}" ${proto_go_bin_path}" ${proto_go_rpcglue_path}" + + print_run ${protoc_bin} \ + -I="${golang_proto_path}" \ + -I="${golang_proto_mod_path}" \ + -I="${SOURCE_DIR}" \ + --go_out="${go_proto_out}${SOURCE_DIR}" \ + --go-binary_out="${SOURCE_DIR}" \ + "${proto_path}" || { + + err "Failed to run protoc for ${proto_path}" + return 1 + } + + print_run protoc-go-inject-tag -input="${proto_go_path}" || { + err "Failed to run protoc-go-inject-tag for ${proto_path}" + return 1 + } + + local build_tags + build_tags="$(head -n 2 "${proto_path}" | grep '^//go:build\|// +build' || true)" + if test -n "${build_tags}"; then + echo -e "${build_tags}\n" >> "${proto_go_bin_path}.new" + cat "${proto_go_bin_path}" >> "${proto_go_bin_path}.new" + mv "${proto_go_bin_path}.new" "${proto_go_bin_path}" + fi + + # NOTE: this has to run after we fix up the build tags above + rm -f "${proto_go_rpcglue_path}" + print_run go run ./internal/tools/proto-gen-rpc-glue/main.go -path "${proto_go_path}" || { + err "Failed to generate consul rpc glue outputs from ${proto_path}" + return 1 + } + + return 0 +} + +function generate_mog_code { + local mog_order + + mog_order="$(go list -tags "${GOTAGS}" -deps ./proto/pb... | grep "consul/proto")" + + for FULL_PKG in ${mog_order}; do + PKG="${FULL_PKG/#github.com\/hashicorp\/consul\/}" + status_stage "Generating ${PKG}/*.pb.go into ${PKG}/*.gen.go with mog" + find "$PKG" -name '*.gen.go' -delete + if [[ -n "${GOTAGS}" ]]; then + print_run mog -tags "${GOTAGS}" -source "./${PKG}/*.pb.go" + else + print_run mog -source "./${PKG}/*.pb.go" + fi + done + + return 0 +} + +main "$@" +exit $? diff --git a/build-support/scripts/release.sh b/build-support/scripts/release.sh index 879fe4320..ca1ccbd2e 100755 --- a/build-support/scripts/release.sh +++ b/build-support/scripts/release.sh @@ -1,14 +1,9 @@ -#!/bin/bash -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null +#!/usr/bin/env bash + +readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +readonly SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")" +readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions" source "${SCRIPT_DIR}/functions.sh" @@ -153,4 +148,4 @@ function main { main "$@" exit $? - \ No newline at end of file + diff --git a/build-support/scripts/version.sh b/build-support/scripts/version.sh index d7c166f0f..3812cd3f1 100755 --- a/build-support/scripts/version.sh +++ b/build-support/scripts/version.sh @@ -1,14 +1,9 @@ -#!/bin/bash -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null +#!/usr/bin/env bash + +readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +readonly SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")" +readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions" source "${SCRIPT_DIR}/functions.sh" @@ -89,4 +84,4 @@ function main { main "$@" exit $? - \ No newline at end of file + From e3d2b91e34a247a832f1a28b98823a3b3f29544f Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 5 Apr 2022 19:16:20 +0100 Subject: [PATCH 081/785] ca: move ConnectCA.Sign authorization logic to CAManager (#12609) OSS sync of enterprise changes at 8d6fd125 --- agent/consul/connect_ca_endpoint.go | 40 +-------------------------- agent/consul/leader_connect_ca.go | 43 +++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 39 deletions(-) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index bf68611fc..c325ff123 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" @@ -145,54 +144,17 @@ func (s *ConnectCA) Sign( return err } - // Parse the CSR csr, err := connect.ParseCSR(args.CSR) if err != nil { return err } - // Parse the SPIFFE ID - spiffeID, err := connect.ParseCertURI(csr.URIs[0]) - if err != nil { - return err - } - - // Verify that the ACL token provided has permission to act as this service authz, err := s.srv.ResolveToken(args.Token) if err != nil { return err } - var authzContext acl.AuthorizerContext - var entMeta structs.EnterpriseMeta - - serviceID, isService := spiffeID.(*connect.SpiffeIDService) - agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent) - if !isService && !isAgent { - return fmt.Errorf("SPIFFE ID in CSR must be a service or agent ID") - } - - if isService { - entMeta.Merge(serviceID.GetEnterpriseMeta()) - entMeta.FillAuthzContext(&authzContext) - if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(serviceID.Service, &authzContext); err != nil { - return err - } - - // Verify that the DC in the service URI matches us. We might relax this - // requirement later but being restrictive for now is safer. - if serviceID.Datacenter != s.srv.config.Datacenter { - return fmt.Errorf("SPIFFE ID in CSR from a different datacenter: %s, "+ - "we are %s", serviceID.Datacenter, s.srv.config.Datacenter) - } - } else if isAgent { - agentID.GetEnterpriseMeta().FillAuthzContext(&authzContext) - if err := authz.ToAllowAuthorizer().NodeWriteAllowed(agentID.Agent, &authzContext); err != nil { - return err - } - } - - cert, err := s.srv.caManager.SignCertificate(csr, spiffeID) + cert, err := s.srv.caManager.AuthorizeAndSignCertificate(csr, authz) if err != nil { return err } diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index ed5587ff5..899ff494a 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -15,6 +15,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "golang.org/x/time/rate" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib/semaphore" "github.com/hashicorp/consul/agent/connect" @@ -1375,6 +1376,48 @@ func (l *connectSignRateLimiter) getCSRRateLimiterWithLimit(limit rate.Limit) *r return l.csrRateLimiter } +// AuthorizeAndSignCertificate signs a leaf certificate for the service or agent +// identified by the SPIFFE ID in the given CSR's SAN. It performs authorization +// using the given acl.Authorizer. +func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) { + // Parse the SPIFFE ID from the CSR SAN. + if len(csr.URIs) == 0 { + return nil, errors.New("CSR SAN does not contain a SPIFFE ID") + } + spiffeID, err := connect.ParseCertURI(csr.URIs[0]) + if err != nil { + return nil, err + } + + // Perform authorization. + var authzContext acl.AuthorizerContext + allow := authz.ToAllowAuthorizer() + switch v := spiffeID.(type) { + case *connect.SpiffeIDService: + v.GetEnterpriseMeta().FillAuthzContext(&authzContext) + if err := allow.ServiceWriteAllowed(v.Service, &authzContext); err != nil { + return nil, err + } + + // Verify that the DC in the service URI matches us. We might relax this + // requirement later but being restrictive for now is safer. + dc := c.serverConf.Datacenter + if v.Datacenter != dc { + return nil, fmt.Errorf("SPIFFE ID in CSR from a different datacenter: %s, "+ + "we are %s", v.Datacenter, dc) + } + case *connect.SpiffeIDAgent: + v.GetEnterpriseMeta().FillAuthzContext(&authzContext) + if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil { + return nil, err + } + default: + return nil, errors.New("SPIFFE ID in CSR must be a service or agent ID") + } + + return c.SignCertificate(csr, spiffeID) +} + func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID connect.CertURI) (*structs.IssuedCert, error) { provider, caRoot := c.getCAProvider() if provider == nil { From e1b013d8356772e322173410f76ac8c2eb8e1f8f Mon Sep 17 00:00:00 2001 From: weichuliu Date: Wed, 6 Apr 2022 03:17:53 +0900 Subject: [PATCH 082/785] routine: fix that acl stops replicating after regaining leadership (#12295) (#12565) * routine: fix that acl stops replicating after regaining leadership (#12295) * routine: add TestManager_StopBlocking (#12295) * routine: update TestManager_StopBlocking (#12295) --- lib/routine/routine.go | 20 +++++++++++--- lib/routine/routine_test.go | 55 +++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/lib/routine/routine.go b/lib/routine/routine.go index 48fc2aef4..cf1ab1c0d 100644 --- a/lib/routine/routine.go +++ b/lib/routine/routine.go @@ -10,15 +10,22 @@ import ( type Routine func(ctx context.Context) error +// cancelCh is the ctx.Done() +// When cancel() is called, if the routine is running a blocking call (e.g. some ACL replication RPCs), +// stoppedCh won't be closed till the blocking call returns, while cancelCh will be closed immediately. +// cancelCh is used to properly detect routine running status between cancel() and close(stoppedCh) type routineTracker struct { cancel context.CancelFunc - stoppedCh chan struct{} // closed when no longer running + cancelCh <-chan struct{} // closed when ctx is done + stoppedCh chan struct{} // closed when no longer running } func (r *routineTracker) running() bool { select { case <-r.stoppedCh: return false + case <-r.cancelCh: + return false default: return true } @@ -74,6 +81,7 @@ func (m *Manager) Start(ctx context.Context, name string, routine Routine) error rtCtx, cancel := context.WithCancel(ctx) instance := &routineTracker{ cancel: cancel, + cancelCh: ctx.Done(), stoppedCh: make(chan struct{}), } @@ -97,10 +105,14 @@ func (m *Manager) execute(ctx context.Context, name string, routine Routine, don "error", err, ) } else { - m.logger.Debug("stopped routine", "routine", name) + m.logger.Info("stopped routine", "routine", name) } } +// Caveat: The returned stoppedCh indicates that the routine is completed +// It's possible that ctx is canceled, but stoppedCh not yet closed +// Use mgr.IsRunning(name) than this stoppedCh to tell whether the +// instance is still running (not cancelled or completed). func (m *Manager) Stop(name string) <-chan struct{} { instance := m.stopInstance(name) if instance == nil { @@ -127,7 +139,7 @@ func (m *Manager) stopInstance(name string) *routineTracker { return instance } - m.logger.Debug("stopping routine", "routine", name) + m.logger.Info("stopping routine", "routine", name) instance.cancel() delete(m.routines, name) @@ -145,7 +157,7 @@ func (m *Manager) StopAll() { if !routine.running() { continue } - m.logger.Debug("stopping routine", "routine", name) + m.logger.Info("stopping routine", "routine", name) routine.cancel() } } diff --git a/lib/routine/routine_test.go b/lib/routine/routine_test.go index 1bfdfa219..61eb15ccd 100644 --- a/lib/routine/routine_test.go +++ b/lib/routine/routine_test.go @@ -99,3 +99,58 @@ func TestManager_StopAll(t *testing.T) { require.False(r, mgr.IsRunning("run2")) }) } + +// Test IsRunning when routine is a blocking call that does not +// immediately return when cancelled +func TestManager_StopBlocking(t *testing.T) { + t.Parallel() + var runs uint32 + var running uint32 + unblock := make(chan struct{}) // To simulate a blocking call + mgr := NewManager(testutil.Logger(t)) + + // A routine that will be still running for a while after cancelled + run := func(ctx context.Context) error { + atomic.StoreUint32(&running, 1) + defer atomic.StoreUint32(&running, 0) + atomic.AddUint32(&runs, 1) + <-ctx.Done() + <-unblock + return nil + } + + require.NoError(t, mgr.Start(context.Background(), "blocking", run)) + retry.Run(t, func(r *retry.R) { + require.True(r, mgr.IsRunning("blocking")) + require.Equal(r, uint32(1), atomic.LoadUint32(&runs)) + require.Equal(r, uint32(1), atomic.LoadUint32(&running)) + }) + + doneCh := mgr.Stop("blocking") + + // IsRunning should return false, however &running is still 1 + retry.Run(t, func(r *retry.R) { + require.False(r, mgr.IsRunning("blocking")) + require.Equal(r, uint32(1), atomic.LoadUint32(&running)) + }) + + // New routine should be able to replace old "cancelled but running" routine. + require.NoError(t, mgr.Start(context.Background(), "blocking", func(ctx context.Context) error { + <-ctx.Done() + return nil + })) + defer mgr.Stop("blocking") + + retry.Run(t, func(r *retry.R) { + require.True(r, mgr.IsRunning("blocking")) // New routine + require.Equal(r, uint32(1), atomic.LoadUint32(&running)) // Old routine + }) + + // Complete the blocking routine + close(unblock) + <-doneCh + + retry.Run(t, func(r *retry.R) { + require.Equal(r, uint32(0), atomic.LoadUint32(&running)) + }) +} From 9d67f346140afed6f610e06ec9a5ce4ff7d7f5c7 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 5 Apr 2022 15:55:14 -0400 Subject: [PATCH 083/785] Creating a changelog entry for #12565 (#12699) --- .changelog/12565.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/12565.txt diff --git a/.changelog/12565.txt b/.changelog/12565.txt new file mode 100644 index 000000000..d7ad635fc --- /dev/null +++ b/.changelog/12565.txt @@ -0,0 +1,3 @@ +```release-note:bug +replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. +``` From 12523197f934e30457300ed805492f51c887c1cb Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 5 Apr 2022 15:46:56 -0500 Subject: [PATCH 084/785] syncing changes back from enterprise (#12701) --- acl/policy_merger.go | 2 ++ agent/delegate_mock_test.go | 4 ++-- api/acl_test.go | 8 ++++++-- .../acl/authmethod/create/authmethod_create_test.go | 12 +++++++++--- .../acl/authmethod/update/authmethod_update_test.go | 10 +++++++--- command/connect/expose/expose_test.go | 7 +++++-- test/integration/connect/envoy/case-basic/capture.sh | 4 ++++ test/integration/connect/envoy/case-http/capture.sh | 4 ++++ 8 files changed, 39 insertions(+), 12 deletions(-) create mode 100644 test/integration/connect/envoy/case-basic/capture.sh create mode 100644 test/integration/connect/envoy/case-http/capture.sh diff --git a/acl/policy_merger.go b/acl/policy_merger.go index cceb62dd9..d4a454bc1 100644 --- a/acl/policy_merger.go +++ b/acl/policy_merger.go @@ -19,6 +19,8 @@ type policyRulesMergeContext struct { servicePrefixRules map[string]*ServiceRule sessionRules map[string]*SessionRule sessionPrefixRules map[string]*SessionRule + // namespaceRule is an enterprise-only field + namespaceRule string } func (p *policyRulesMergeContext) init() { diff --git a/agent/delegate_mock_test.go b/agent/delegate_mock_test.go index d2c6e267c..36b32f689 100644 --- a/agent/delegate_mock_test.go +++ b/agent/delegate_mock_test.go @@ -47,9 +47,9 @@ func (m *delegateMock) RemoveFailedNode(node string, prune bool, entMeta *struct return m.Called(node, prune, entMeta).Error(0) } -func (m *delegateMock) ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) { +func (m *delegateMock) ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { ret := m.Called(token, entMeta, authzContext) - return ret.Get(0).(acl.Authorizer), ret.Error(1) + return ret.Get(0).(consul.ACLResolveResult), ret.Error(1) } func (m *delegateMock) RPC(method string, args interface{}, reply interface{}) error { diff --git a/api/acl_test.go b/api/acl_test.go index 59c9bc315..6177a5c3f 100644 --- a/api/acl_test.go +++ b/api/acl_test.go @@ -237,6 +237,10 @@ func prepTokenPolicies(t *testing.T, acl *ACL) (policies []*ACLPolicy) { } func prepTokenPoliciesInPartition(t *testing.T, acl *ACL, partition string) (policies []*ACLPolicy) { + datacenters := []string{"dc1", "dc2"} + if partition != "" && partition != "default" { + datacenters = []string{"dc1"} + } var wqPart *WriteOptions if partition != "" { wqPart = &WriteOptions{Partition: partition} @@ -245,7 +249,7 @@ func prepTokenPoliciesInPartition(t *testing.T, acl *ACL, partition string) (pol Name: "one", Description: "one description", Rules: `acl = "read"`, - Datacenters: []string{"dc1", "dc2"}, + Datacenters: datacenters, }, wqPart) require.NoError(t, err) @@ -256,7 +260,7 @@ func prepTokenPoliciesInPartition(t *testing.T, acl *ACL, partition string) (pol Name: "two", Description: "two description", Rules: `node_prefix "" { policy = "read" }`, - Datacenters: []string{"dc1", "dc2"}, + Datacenters: datacenters, }, wqPart) require.NoError(t, err) diff --git a/command/acl/authmethod/create/authmethod_create_test.go b/command/acl/authmethod/create/authmethod_create_test.go index 0e45e624d..03bdcd1e8 100644 --- a/command/acl/authmethod/create/authmethod_create_test.go +++ b/command/acl/authmethod/create/authmethod_create_test.go @@ -9,15 +9,16 @@ import ( "testing" "time" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/go-uuid" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" // activate testing auth method _ "github.com/hashicorp/consul/agent/consul/authmethod/testauth" @@ -293,6 +294,7 @@ func TestAuthMethodCreateCommand_JSON(t *testing.T) { delete(raw, "CreateIndex") delete(raw, "ModifyIndex") delete(raw, "Namespace") + delete(raw, "Partition") require.Equal(t, map[string]interface{}{ "Name": name, @@ -342,6 +344,7 @@ func TestAuthMethodCreateCommand_JSON(t *testing.T) { delete(raw, "CreateIndex") delete(raw, "ModifyIndex") delete(raw, "Namespace") + delete(raw, "Partition") require.Equal(t, map[string]interface{}{ "Name": name, @@ -613,6 +616,9 @@ func getTestMethod(t *testing.T, client *api.Client, methodName string) *api.ACL if method.Namespace == "default" { method.Namespace = "" } + if method.Partition == "default" { + method.Partition = "" + } return method } diff --git a/command/acl/authmethod/update/authmethod_update_test.go b/command/acl/authmethod/update/authmethod_update_test.go index 8ebde83cd..263f0b774 100644 --- a/command/acl/authmethod/update/authmethod_update_test.go +++ b/command/acl/authmethod/update/authmethod_update_test.go @@ -8,15 +8,16 @@ import ( "strings" "testing" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/go-uuid" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" // activate testing auth method _ "github.com/hashicorp/consul/agent/consul/authmethod/testauth" @@ -941,6 +942,9 @@ func getTestMethod(t *testing.T, client *api.Client, methodName string) *api.ACL if method.Namespace == "default" { method.Namespace = "" } + if method.Partition == "default" { + method.Partition = "" + } return method } diff --git a/command/connect/expose/expose_test.go b/command/connect/expose/expose_test.go index fa9c38de8..61434d6d1 100644 --- a/command/connect/expose/expose_test.go +++ b/command/connect/expose/expose_test.go @@ -3,11 +3,12 @@ package expose import ( "testing" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testrpc" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" ) func TestConnectExpose(t *testing.T) { @@ -47,6 +48,7 @@ func TestConnectExpose(t *testing.T) { Kind: api.IngressGateway, Name: "ingress", Namespace: ns, + Partition: ap, Listeners: []api.IngressListener{ { Port: 8888, @@ -280,6 +282,7 @@ func TestConnectExpose_existingConfig(t *testing.T) { }, }, }) + ingressConf.Partition = entryConf.Partition ingressConf.Namespace = entryConf.Namespace for i, listener := range ingressConf.Listeners { listener.Services[0].Namespace = entryConf.Listeners[i].Services[0].Namespace diff --git a/test/integration/connect/envoy/case-basic/capture.sh b/test/integration/connect/envoy/case-basic/capture.sh new file mode 100644 index 000000000..1a11f7d5e --- /dev/null +++ b/test/integration/connect/envoy/case-basic/capture.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +snapshot_envoy_admin localhost:19000 s1 primary || true +snapshot_envoy_admin localhost:19001 s2 primary || true diff --git a/test/integration/connect/envoy/case-http/capture.sh b/test/integration/connect/envoy/case-http/capture.sh new file mode 100644 index 000000000..1a11f7d5e --- /dev/null +++ b/test/integration/connect/envoy/case-http/capture.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +snapshot_envoy_admin localhost:19000 s1 primary || true +snapshot_envoy_admin localhost:19001 s2 primary || true From 67bedd02cc51ad31d1a201663fbd5cbfd91248f3 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 5 Apr 2022 16:01:02 -0500 Subject: [PATCH 085/785] build: remove unused tools from being installed (#12671) --- GNUmakefile | 2 -- build-support/docker/Build-Go.dockerfile | 18 ++---------------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 36ef83b2c..2fd96a9ec 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -5,8 +5,6 @@ SHELL = bash GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ - golang.org/x/tools/cmd/cover@master \ - golang.org/x/tools/cmd/stringer@master \ github.com/vektra/mockery/cmd/mockery@master \ github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ github.com/hashicorp/lint-consul-retry@master diff --git a/build-support/docker/Build-Go.dockerfile b/build-support/docker/Build-Go.dockerfile index 7d82dbef9..39fc6df5c 100644 --- a/build-support/docker/Build-Go.dockerfile +++ b/build-support/docker/Build-Go.dockerfile @@ -1,21 +1,7 @@ ARG GOLANG_VERSION=1.17.5 FROM golang:${GOLANG_VERSION} -ARG GOTOOLS="github.com/elazarl/go-bindata-assetfs/... \ - github.com/hashicorp/go-bindata/... \ - golang.org/x/tools/cmd/cover \ - golang.org/x/tools/cmd/stringer \ - github.com/axw/gocov/gocov \ - gopkg.in/matm/v1/gocov-html" - -RUN mkdir -p .gotools && \ - cd .gotools && \ - for tool in ${GOTOOLS}; do \ - echo "=== TOOL: ${tool}" ; \ - rm -rf go.mod go.sum ; \ - go mod init consul-tools ; \ - go get -v "${tool}" ; \ - done && \ - rm -rf go.mod go.sum +RUN go install github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master +RUN go install github.com/hashicorp/go-bindata/go-bindata@master WORKDIR /consul From 497b300c766b366b1abe8411ca298c6560f6256d Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Fri, 11 Mar 2022 19:51:24 -0800 Subject: [PATCH 086/785] add new entmeta stuff. Signed-off-by: Mark Anderson --- acl/acl_oss.go | 5 + acl/enterprisemeta_oss.go | 108 ++++++++++++++++++++++ agent/structs/structs.go | 4 +- agent/structs/structs_oss.go | 148 ++++++------------------------ agent/structs/structs_oss_test.go | 14 +-- 5 files changed, 153 insertions(+), 126 deletions(-) create mode 100644 acl/enterprisemeta_oss.go diff --git a/acl/acl_oss.go b/acl/acl_oss.go index 9c62c5bdb..ca2974e4e 100644 --- a/acl/acl_oss.go +++ b/acl/acl_oss.go @@ -5,6 +5,11 @@ package acl const DefaultPartitionName = "" +// Reviewer Note: This is a little bit strange; one might want it to be "" like partition name +// However in consul/structs/intention.go we define IntentionDefaultNamespace as 'default' and so +// we use the same here +const DefaultNamespaceName = "default" + type EnterpriseConfig struct { // no fields in OSS } diff --git a/acl/enterprisemeta_oss.go b/acl/enterprisemeta_oss.go new file mode 100644 index 000000000..2296fdd43 --- /dev/null +++ b/acl/enterprisemeta_oss.go @@ -0,0 +1,108 @@ +//go:build !consulent +// +build !consulent + +package acl + +import "hash" + +var emptyEnterpriseMeta = EnterpriseMeta{} + +// EnterpriseMeta stub +type EnterpriseMeta struct{} + +func (m *EnterpriseMeta) ToEnterprisePolicyMeta() *EnterprisePolicyMeta { + return nil +} + +func DefaultEnterpriseMeta() *EnterpriseMeta { + return &EnterpriseMeta{} +} + +func WildcardEnterpriseMeta() *EnterpriseMeta { + return &EnterpriseMeta{} +} + +func (m *EnterpriseMeta) EstimateSize() int { + return 0 +} + +func (m *EnterpriseMeta) AddToHash(_ hash.Hash, _ bool) { + // do nothing +} + +func (m *EnterpriseMeta) PartitionOrDefault() string { + return "default" +} + +func EqualPartitions(_, _ string) bool { + return true +} + +func IsDefaultPartition(partition string) bool { + return true +} + +func PartitionOrDefault(_ string) string { + return "default" +} + +func (m *EnterpriseMeta) PartitionOrEmpty() string { + return "" +} + +func (m *EnterpriseMeta) InDefaultPartition() bool { + return true +} + +func (m *EnterpriseMeta) NamespaceOrDefault() string { + return DefaultNamespaceName +} + +func NamespaceOrDefault(_ string) string { + return DefaultNamespaceName +} + +func (m *EnterpriseMeta) NamespaceOrEmpty() string { + return "" +} + +func (m *EnterpriseMeta) InDefaultNamespace() bool { + return true +} + +func (m *EnterpriseMeta) Merge(_ *EnterpriseMeta) { + // do nothing +} + +func (m *EnterpriseMeta) MergeNoWildcard(_ *EnterpriseMeta) { + // do nothing +} + +func (_ *EnterpriseMeta) Normalize() {} + +func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool { + return true +} + +func (m *EnterpriseMeta) IsSame(_ *EnterpriseMeta) bool { + return true +} + +func (m *EnterpriseMeta) LessThan(_ *EnterpriseMeta) bool { + return false +} + +func (m *EnterpriseMeta) WithWildcardNamespace() *EnterpriseMeta { + return &emptyEnterpriseMeta +} + +func (m *EnterpriseMeta) UnsetPartition() { + // do nothing +} + +func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta { + return emptyEnterpriseMeta +} + +// FillAuthzContext stub +func (_ *EnterpriseMeta) FillAuthzContext(_ *AuthorizerContext) {} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 1a678f5c0..9ea65fc3a 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -1896,13 +1896,13 @@ type CheckID struct { EnterpriseMeta } -// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer +// NamespaceOrDefault exists because acl.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (c CheckID) NamespaceOrDefault() string { return c.EnterpriseMeta.NamespaceOrDefault() } -// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer +// PartitionOrDefault exists because acl.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (c CheckID) PartitionOrDefault() string { return c.EnterpriseMeta.PartitionOrDefault() diff --git a/agent/structs/structs_oss.go b/agent/structs/structs_oss.go index 669361802..bf911f5a8 100644 --- a/agent/structs/structs_oss.go +++ b/agent/structs/structs_oss.go @@ -4,158 +4,70 @@ package structs import ( - "hash" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/types" ) -var emptyEnterpriseMeta = EnterpriseMeta{} - -// EnterpriseMeta stub -type EnterpriseMeta struct{} - -func (m *EnterpriseMeta) ToEnterprisePolicyMeta() *acl.EnterprisePolicyMeta { - return nil -} - -func (m *EnterpriseMeta) estimateSize() int { - return 0 -} - -func (m *EnterpriseMeta) addToHash(_ hash.Hash, _ bool) { - // do nothing -} - -func (m *EnterpriseMeta) Merge(_ *EnterpriseMeta) { - // do nothing -} - -func (m *EnterpriseMeta) MergeNoWildcard(_ *EnterpriseMeta) { - // do nothing -} - -func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool { - return true -} - -func (m *EnterpriseMeta) IsSame(_ *EnterpriseMeta) bool { - return true -} - -func (m *EnterpriseMeta) LessThan(_ *EnterpriseMeta) bool { - return false -} - -func (m *EnterpriseMeta) WithWildcardNamespace() *EnterpriseMeta { - return &emptyEnterpriseMeta -} - -func (m *EnterpriseMeta) UnsetPartition() { - // do nothing -} +// TODO(acl-move-enterprise-meta) sync this with enterprise +var emptyEnterpriseMeta = acl.EnterpriseMeta{} // TODO(partition): stop using this -func NewEnterpriseMetaInDefaultPartition(_ string) EnterpriseMeta { +func NewEnterpriseMetaInDefaultPartition(_ string) acl.EnterpriseMeta { return emptyEnterpriseMeta } -func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta { - return emptyEnterpriseMeta -} - -func (m *EnterpriseMeta) NamespaceOrDefault() string { - return IntentionDefaultNamespace -} - -func NamespaceOrDefault(_ string) string { - return IntentionDefaultNamespace -} - -func (m *EnterpriseMeta) NamespaceOrEmpty() string { - return "" -} - -func (m *EnterpriseMeta) InDefaultNamespace() bool { - return true -} - -func (m *EnterpriseMeta) PartitionOrDefault() string { - return "default" -} - -func EqualPartitions(_, _ string) bool { - return true -} - -func IsDefaultPartition(partition string) bool { - return true -} - -func PartitionOrDefault(_ string) string { - return "default" -} - -func (m *EnterpriseMeta) PartitionOrEmpty() string { - return "" -} - -func (m *EnterpriseMeta) InDefaultPartition() bool { - return true -} - // ReplicationEnterpriseMeta stub -func ReplicationEnterpriseMeta() *EnterpriseMeta { +func ReplicationEnterpriseMeta() *acl.EnterpriseMeta { return &emptyEnterpriseMeta } // TODO(partition): stop using this -func DefaultEnterpriseMetaInDefaultPartition() *EnterpriseMeta { +func WildcardEnterpriseMetaInDefaultPartition() *acl.EnterpriseMeta { + return &emptyEnterpriseMeta +} + +// TODO(partition): stop using this +func DefaultEnterpriseMetaInDefaultPartition() *acl.EnterpriseMeta { return &emptyEnterpriseMeta } // DefaultEnterpriseMetaInPartition stub -func DefaultEnterpriseMetaInPartition(_ string) *EnterpriseMeta { - return &emptyEnterpriseMeta -} - -func NodeEnterpriseMetaInPartition(_ string) *EnterpriseMeta { - return &emptyEnterpriseMeta -} - -// TODO(partition): stop using this -func NodeEnterpriseMetaInDefaultPartition() *EnterpriseMeta { - return &emptyEnterpriseMeta -} - -// TODO(partition): stop using this -func WildcardEnterpriseMetaInDefaultPartition() *EnterpriseMeta { +func DefaultEnterpriseMetaInPartition(_ string) *acl.EnterpriseMeta { return &emptyEnterpriseMeta } // WildcardEnterpriseMetaInPartition stub -func WildcardEnterpriseMetaInPartition(_ string) *EnterpriseMeta { +func WildcardEnterpriseMetaInPartition(_ string) *acl.EnterpriseMeta { + return &emptyEnterpriseMeta +} + +func NewEnterpriseMetaWithPartition(_, _ string) acl.EnterpriseMeta { + return emptyEnterpriseMeta +} + +func NodeEnterpriseMetaInPartition(_ string) *acl.EnterpriseMeta { + return &emptyEnterpriseMeta +} + +// TODO(partition): stop using this +func NodeEnterpriseMetaInDefaultPartition() *acl.EnterpriseMeta { return &emptyEnterpriseMeta } // FillAuthzContext stub -func (_ *EnterpriseMeta) FillAuthzContext(_ *acl.AuthorizerContext) {} - func (_ *Node) FillAuthzContext(_ *acl.AuthorizerContext) {} func (_ *Coordinate) FillAuthzContext(_ *acl.AuthorizerContext) {} func (_ *NodeInfo) FillAuthzContext(_ *acl.AuthorizerContext) {} -func (_ *EnterpriseMeta) Normalize() {} - // FillAuthzContext stub func (_ *DirEntry) FillAuthzContext(_ *acl.AuthorizerContext) {} // FillAuthzContext stub func (_ *RegisterRequest) FillAuthzContext(_ *acl.AuthorizerContext) {} -func (_ *RegisterRequest) GetEnterpriseMeta() *EnterpriseMeta { +func (_ *RegisterRequest) GetEnterpriseMeta() *acl.EnterpriseMeta { return nil } @@ -168,15 +80,15 @@ func (_ *TxnServiceOp) FillAuthzContext(_ *acl.AuthorizerContext) {} // OSS Stub func (_ *TxnCheckOp) FillAuthzContext(_ *acl.AuthorizerContext) {} -func NodeNameString(node string, _ *EnterpriseMeta) string { +func NodeNameString(node string, _ *acl.EnterpriseMeta) string { return node } -func ServiceIDString(id string, _ *EnterpriseMeta) string { +func ServiceIDString(id string, _ *acl.EnterpriseMeta) string { return id } -func ParseServiceIDString(input string) (string, *EnterpriseMeta) { +func ParseServiceIDString(input string) (string, *acl.EnterpriseMeta) { return input, DefaultEnterpriseMetaInDefaultPartition() } @@ -189,7 +101,7 @@ func ServiceIDFromString(input string) ServiceID { return ServiceID{ID: id} } -func ParseServiceNameString(input string) (string, *EnterpriseMeta) { +func ParseServiceNameString(input string) (string, *acl.EnterpriseMeta) { return input, DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/structs/structs_oss_test.go b/agent/structs/structs_oss_test.go index 28b6e3797..54bbb0f2f 100644 --- a/agent/structs/structs_oss_test.go +++ b/agent/structs/structs_oss_test.go @@ -8,39 +8,41 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" ) var enterpriseMetaField = "EnterpriseMeta" func TestServiceID_String(t *testing.T) { t.Run("value", func(t *testing.T) { - sid := NewServiceID("the-id", &EnterpriseMeta{}) + sid := NewServiceID("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", sid)) }) t.Run("pointer", func(t *testing.T) { - sid := NewServiceID("the-id", &EnterpriseMeta{}) + sid := NewServiceID("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", &sid)) }) } func TestCheckID_String(t *testing.T) { t.Run("value", func(t *testing.T) { - cid := NewCheckID("the-id", &EnterpriseMeta{}) + cid := NewCheckID("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", cid)) }) t.Run("pointer", func(t *testing.T) { - cid := NewCheckID("the-id", &EnterpriseMeta{}) + cid := NewCheckID("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", &cid)) }) } func TestServiceName_String(t *testing.T) { t.Run("value", func(t *testing.T) { - sn := NewServiceName("the-id", &EnterpriseMeta{}) + sn := NewServiceName("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", sn)) }) t.Run("pointer", func(t *testing.T) { - sn := NewServiceName("the-id", &EnterpriseMeta{}) + sn := NewServiceName("the-id", &acl.EnterpriseMeta{}) require.Equal(t, "the-id", fmt.Sprintf("%v", &sn)) }) } From 0905c1d83d36a01c5dfbf7e35b50e118146144e1 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Sat, 12 Mar 2022 19:55:53 -0800 Subject: [PATCH 087/785] Manual Structs fixup Change things by hand that I couldn't figure out how to automate Signed-off-by: Mark Anderson --- agent/structs/acl.go | 75 +++++++------ agent/structs/acl_oss.go | 10 +- agent/structs/catalog.go | 3 +- agent/structs/check_definition.go | 3 +- agent/structs/config_entry.go | 28 ++--- agent/structs/config_entry_discoverychain.go | 20 ++-- .../config_entry_discoverychain_oss.go | 16 ++- agent/structs/config_entry_exports.go | 8 +- agent/structs/config_entry_gateways.go | 18 +-- agent/structs/config_entry_intentions.go | 10 +- agent/structs/config_entry_intentions_oss.go | 6 +- agent/structs/config_entry_mesh.go | 6 +- agent/structs/config_entry_oss.go | 4 +- agent/structs/config_entry_test.go | 2 +- agent/structs/connect.go | 4 +- agent/structs/connect_ca.go | 3 +- agent/structs/connect_proxy_config_oss.go | 6 +- agent/structs/discovery_chain.go | 3 +- agent/structs/discovery_chain_oss.go | 6 +- agent/structs/identity.go | 4 +- agent/structs/intention.go | 6 +- agent/structs/intention_oss.go | 12 +- agent/structs/prepared_query.go | 8 +- agent/structs/service_definition.go | 3 +- agent/structs/structs.go | 104 +++++++++--------- agent/structs/testing_connect_proxy_config.go | 8 +- 26 files changed, 204 insertions(+), 172 deletions(-) diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 9e79ecb56..f516e0a6d 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -101,9 +101,8 @@ type ACLIdentity interface { NodeIdentityList() []*ACLNodeIdentity IsExpired(asOf time.Time) bool IsLocal() bool - EnterpriseMetadata() *EnterpriseMeta + EnterpriseMetadata() *acl.EnterpriseMeta } - type ACLTokenPolicyLink struct { ID string Name string `hash:"ignore"` @@ -149,7 +148,7 @@ func (s *ACLServiceIdentity) EstimateSize() int { return size } -func (s *ACLServiceIdentity) SyntheticPolicy(entMeta *EnterpriseMeta) *ACLPolicy { +func (s *ACLServiceIdentity) SyntheticPolicy(entMeta *acl.EnterpriseMeta) *ACLPolicy { // Given that we validate this string name before persisting, we do not // have to escape it before doing the following interpolation. rules := aclServiceIdentityRules(s.ServiceName, entMeta) @@ -194,7 +193,7 @@ func (s *ACLNodeIdentity) EstimateSize() int { return len(s.NodeName) + len(s.Datacenter) } -func (s *ACLNodeIdentity) SyntheticPolicy(entMeta *EnterpriseMeta) *ACLPolicy { +func (s *ACLNodeIdentity) SyntheticPolicy(entMeta *acl.EnterpriseMeta) *ACLPolicy { // Given that we validate this string name before persisting, we do not // have to escape it before doing the following interpolation. rules := aclNodeIdentityRules(s.NodeName, entMeta) @@ -289,7 +288,7 @@ type ACLToken struct { Hash []byte // Embedded Enterprise Metadata - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` // Embedded Raft Metadata RaftIndex @@ -415,7 +414,7 @@ func (t *ACLToken) HasExpirationTime() bool { return t.ExpirationTime != nil && !t.ExpirationTime.IsZero() } -func (t *ACLToken) EnterpriseMetadata() *EnterpriseMeta { +func (t *ACLToken) EnterpriseMetadata() *acl.EnterpriseMeta { return &t.EnterpriseMeta } @@ -462,7 +461,7 @@ func (t *ACLToken) SetHash(force bool) []byte { nodeID.AddToHash(hash) } - t.EnterpriseMeta.addToHash(hash, false) + t.EnterpriseMeta.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) @@ -488,7 +487,7 @@ func (t *ACLToken) EstimateSize() int { for _, nodeID := range t.NodeIdentities { size += nodeID.EstimateSize() } - return size + t.EnterpriseMeta.estimateSize() + return size + t.EnterpriseMeta.EstimateSize() } // ACLTokens is a slice of ACLTokens. @@ -510,7 +509,7 @@ type ACLTokenListStub struct { CreateIndex uint64 ModifyIndex uint64 Legacy bool `json:",omitempty"` - EnterpriseMeta + acl.EnterpriseMeta ACLAuthMethodEnterpriseMeta } @@ -583,7 +582,7 @@ type ACLPolicy struct { Hash []byte // Embedded Enterprise ACL Metadata - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` // Embedded Raft Metadata RaftIndex `hash:"ignore"` @@ -621,7 +620,7 @@ type ACLPolicyListStub struct { Hash []byte CreateIndex uint64 ModifyIndex uint64 - EnterpriseMeta + acl.EnterpriseMeta } func (p *ACLPolicy) Stub() *ACLPolicyListStub { @@ -664,7 +663,7 @@ func (p *ACLPolicy) SetHash(force bool) []byte { hash.Write([]byte(dc)) } - p.EnterpriseMeta.addToHash(hash, false) + p.EnterpriseMeta.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) @@ -685,7 +684,7 @@ func (p *ACLPolicy) EstimateSize() int { size += len(dc) } - return size + p.EnterpriseMeta.estimateSize() + return size + p.EnterpriseMeta.EstimateSize() } // HashKey returns a consistent hash for a set of policies. @@ -824,7 +823,7 @@ type ACLRole struct { Hash []byte // Embedded Enterprise ACL metadata - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` // Embedded Raft Metadata RaftIndex `hash:"ignore"` @@ -902,7 +901,7 @@ func (r *ACLRole) SetHash(force bool) []byte { nodeID.AddToHash(hash) } - r.EnterpriseMeta.addToHash(hash, false) + r.EnterpriseMeta.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) @@ -929,7 +928,7 @@ func (r *ACLRole) EstimateSize() int { size += nodeID.EstimateSize() } - return size + r.EnterpriseMeta.estimateSize() + return size + r.EnterpriseMeta.EstimateSize() } const ( @@ -1005,7 +1004,7 @@ type ACLBindingRule struct { BindName string // Embedded Enterprise ACL metadata - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` // Embedded Raft Metadata RaftIndex `hash:"ignore"` @@ -1034,7 +1033,7 @@ type ACLAuthMethodListStub struct { TokenLocality string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 - EnterpriseMeta + acl.EnterpriseMeta } func (p *ACLAuthMethod) Stub() *ACLAuthMethodListStub { @@ -1118,7 +1117,7 @@ type ACLAuthMethod struct { Config map[string]interface{} // Embedded Enterprise ACL Meta - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` ACLAuthMethodEnterpriseFields `mapstructure:",squash"` @@ -1222,7 +1221,7 @@ type ACLTokenGetRequest struct { TokenIDType ACLTokenIDType // The Type of ID used to lookup the token Expanded bool Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1234,7 +1233,7 @@ func (r *ACLTokenGetRequest) RequestDatacenter() string { type ACLTokenDeleteRequest struct { TokenID string // ID of the token to delete Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta WriteRequest } @@ -1251,7 +1250,7 @@ type ACLTokenListRequest struct { AuthMethod string // Auth Method filter Datacenter string // The datacenter to perform the request within ACLAuthMethodEnterpriseMeta - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1362,7 +1361,7 @@ func (r *ACLPolicySetRequest) RequestDatacenter() string { type ACLPolicyDeleteRequest struct { PolicyID string // The id of the policy to delete Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta WriteRequest } @@ -1375,7 +1374,7 @@ type ACLPolicyGetRequest struct { PolicyID string // id used for the policy lookup (one of PolicyID or PolicyName is allowed) PolicyName string // name used for the policy lookup (one of PolicyID or PolicyName is allowed) Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1386,7 +1385,7 @@ func (r *ACLPolicyGetRequest) RequestDatacenter() string { // ACLPolicyListRequest is used at the RPC layer to request a listing of policies type ACLPolicyListRequest struct { Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1453,7 +1452,7 @@ func (r *ACLRoleSetRequest) RequestDatacenter() string { type ACLRoleDeleteRequest struct { RoleID string // id of the role to delete Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta WriteRequest } @@ -1466,7 +1465,7 @@ type ACLRoleGetRequest struct { RoleID string // id used for the role lookup (one of RoleID or RoleName is allowed) RoleName string // name used for the role lookup (one of RoleID or RoleName is allowed) Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1478,7 +1477,7 @@ func (r *ACLRoleGetRequest) RequestDatacenter() string { type ACLRoleListRequest struct { Policy string // Policy filter Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1546,7 +1545,7 @@ func (r *ACLBindingRuleSetRequest) RequestDatacenter() string { type ACLBindingRuleDeleteRequest struct { BindingRuleID string // id of the rule to delete Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta WriteRequest } @@ -1558,7 +1557,7 @@ func (r *ACLBindingRuleDeleteRequest) RequestDatacenter() string { type ACLBindingRuleGetRequest struct { BindingRuleID string // id used for the rule lookup Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1570,7 +1569,7 @@ func (r *ACLBindingRuleGetRequest) RequestDatacenter() string { type ACLBindingRuleListRequest struct { AuthMethod string // optional filter Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1616,7 +1615,7 @@ func (r *ACLAuthMethodSetRequest) RequestDatacenter() string { type ACLAuthMethodDeleteRequest struct { AuthMethodName string // name of the auth method to delete Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta WriteRequest } @@ -1628,7 +1627,7 @@ func (r *ACLAuthMethodDeleteRequest) RequestDatacenter() string { type ACLAuthMethodGetRequest struct { AuthMethodName string // name used for the auth method lookup Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1639,7 +1638,7 @@ func (r *ACLAuthMethodGetRequest) RequestDatacenter() string { // ACLAuthMethodListRequest is used at the RPC layer to request a listing of auth methods type ACLAuthMethodListRequest struct { Datacenter string // The datacenter to perform the request within - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -1673,14 +1672,14 @@ type ACLAuthMethodBatchDeleteRequest struct { // delete a single entry. This is because AuthMethods unlike tokens, policies // and roles are not replicated between datacenters and therefore never // batch applied. - EnterpriseMeta + acl.EnterpriseMeta } type ACLLoginParams struct { AuthMethod string BearerToken string Meta map[string]string `json:",omitempty"` - EnterpriseMeta + acl.EnterpriseMeta } type ACLLoginRequest struct { @@ -1712,7 +1711,7 @@ type ACLAuthorizationRequest struct { Resource acl.Resource Segment string `json:",omitempty"` Access string - EnterpriseMeta + acl.EnterpriseMeta } type ACLAuthorizationResponse struct { @@ -1786,6 +1785,6 @@ func (id *AgentRecoveryTokenIdentity) IsLocal() bool { return true } -func (id *AgentRecoveryTokenIdentity) EnterpriseMetadata() *EnterpriseMeta { +func (id *AgentRecoveryTokenIdentity) EnterpriseMetadata() *acl.EnterpriseMeta { return nil } diff --git a/agent/structs/acl_oss.go b/agent/structs/acl_oss.go index 3a1457aad..b41986547 100644 --- a/agent/structs/acl_oss.go +++ b/agent/structs/acl_oss.go @@ -51,19 +51,19 @@ type ACLAuthMethodEnterpriseFields struct{} type ACLAuthMethodEnterpriseMeta struct{} -func (_ *ACLAuthMethodEnterpriseMeta) FillWithEnterpriseMeta(_ *EnterpriseMeta) { +func (_ *ACLAuthMethodEnterpriseMeta) FillWithEnterpriseMeta(_ *acl.EnterpriseMeta) { // do nothing } -func (_ *ACLAuthMethodEnterpriseMeta) ToEnterpriseMeta() *EnterpriseMeta { +func (_ *ACLAuthMethodEnterpriseMeta) ToEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } -func aclServiceIdentityRules(svc string, _ *EnterpriseMeta) string { +func aclServiceIdentityRules(svc string, _ *acl.EnterpriseMeta) string { return fmt.Sprintf(aclPolicyTemplateServiceIdentity, svc) } -func aclNodeIdentityRules(node string, _ *EnterpriseMeta) string { +func aclNodeIdentityRules(node string, _ *acl.EnterpriseMeta) string { return fmt.Sprintf(aclPolicyTemplateNodeIdentity, node) } @@ -95,6 +95,6 @@ func (r *ACLRole) NodeIdentityList() []*ACLNodeIdentity { return out } -func IsValidPartitionAndDatacenter(meta EnterpriseMeta, datacenters []string, primaryDatacenter string) bool { +func IsValidPartitionAndDatacenter(meta acl.EnterpriseMeta, datacenters []string, primaryDatacenter string) bool { return true } diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index 73cd0264f..94581a45c 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -1,6 +1,7 @@ package structs import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" ) @@ -41,7 +42,7 @@ type HealthSummary struct { Warning int Critical int - EnterpriseMeta + acl.EnterpriseMeta } func (h *HealthSummary) Add(status string) { diff --git a/agent/structs/check_definition.go b/agent/structs/check_definition.go index 434f35e65..c6967d2fc 100644 --- a/agent/structs/check_definition.go +++ b/agent/structs/check_definition.go @@ -1,6 +1,7 @@ package structs import ( + "github.com/hashicorp/consul/acl" "time" "github.com/hashicorp/consul/api" @@ -47,7 +48,7 @@ type CheckDefinition struct { DeregisterCriticalServiceAfter time.Duration OutputMaxSize int - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) { diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 09e05fa4c..1fe7cad73 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -65,7 +65,7 @@ type ConfigEntry interface { CanWrite(acl.Authorizer) error GetMeta() map[string]string - GetEnterpriseMeta() *EnterpriseMeta + GetEnterpriseMeta() *acl.EnterpriseMeta GetRaftIndex() *RaftIndex } @@ -103,8 +103,8 @@ type ServiceConfigEntry struct { ExternalSNI string `json:",omitempty" alias:"external_sni"` UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -212,7 +212,7 @@ func (e *ServiceConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ServiceConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ServiceConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } @@ -262,8 +262,8 @@ type ProxyConfigEntry struct { MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -333,7 +333,7 @@ func (e *ProxyConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ProxyConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ProxyConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } @@ -555,7 +555,7 @@ type ConfigEntryQuery struct { Name string Datacenter string - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -601,7 +601,7 @@ type ConfigEntryListAllRequest struct { Kinds []string Datacenter string - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -629,7 +629,7 @@ type ServiceConfigRequest struct { // uniquely identify a service. Upstreams []string - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -654,7 +654,7 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo { // and change it. v, err := hashstructure.Hash(struct { Name string - EnterpriseMeta EnterpriseMeta + EnterpriseMeta acl.EnterpriseMeta Upstreams []string `hash:"set"` UpstreamIDs []ServiceID `hash:"set"` MeshGatewayConfig MeshGatewayConfig @@ -683,7 +683,7 @@ type UpstreamConfig struct { // Name is only accepted within a service-defaults config entry. Name string `json:",omitempty"` // EnterpriseMeta is only accepted within a service-defaults config entry. - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's // listener. @@ -773,10 +773,10 @@ func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}) { func (cfg *UpstreamConfig) NormalizeWithoutName() error { return cfg.normalize(false, nil) } -func (cfg *UpstreamConfig) NormalizeWithName(entMeta *EnterpriseMeta) error { +func (cfg *UpstreamConfig) NormalizeWithName(entMeta *acl.EnterpriseMeta) error { return cfg.normalize(true, entMeta) } -func (cfg *UpstreamConfig) normalize(named bool, entMeta *EnterpriseMeta) error { +func (cfg *UpstreamConfig) normalize(named bool, entMeta *acl.EnterpriseMeta) error { if named { // If the upstream namespace is omitted it inherits that of the enclosing // config entry. diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 826b2528d..aaac8652a 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -73,8 +73,8 @@ type ServiceRouterConfigEntry struct { // the default service. Routes []ServiceRoute - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -298,7 +298,7 @@ func (e *ServiceRouterConfigEntry) ListRelatedServices() []ServiceID { return out } -func (e *ServiceRouterConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ServiceRouterConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } @@ -485,8 +485,8 @@ type ServiceSplitterConfigEntry struct { // to the FIRST split. Splits []ServiceSplit - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -610,7 +610,7 @@ func (e *ServiceSplitterConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ServiceSplitterConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ServiceSplitterConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } @@ -815,8 +815,8 @@ type ServiceResolverConfigEntry struct { // issuing requests to this upstream service. LoadBalancer *LoadBalancer `json:",omitempty" alias:"load_balancer"` - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -948,7 +948,7 @@ func (e *ServiceResolverConfigEntry) Validate() error { if !e.InDefaultPartition() && e.Redirect.Datacenter != "" { return fmt.Errorf("Cross-datacenter redirect is only supported in the default partition") } - if PartitionOrDefault(e.Redirect.Partition) != e.PartitionOrDefault() && e.Redirect.Datacenter != "" { + if acl.PartitionOrDefault(e.Redirect.Partition) != e.PartitionOrDefault() && e.Redirect.Datacenter != "" { return fmt.Errorf("Cross-datacenter and cross-partition redirect is not supported") } @@ -1085,7 +1085,7 @@ func (e *ServiceResolverConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ServiceResolverConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ServiceResolverConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } diff --git a/agent/structs/config_entry_discoverychain_oss.go b/agent/structs/config_entry_discoverychain_oss.go index cd22c9686..0dcf1cc8f 100644 --- a/agent/structs/config_entry_discoverychain_oss.go +++ b/agent/structs/config_entry_discoverychain_oss.go @@ -3,38 +3,42 @@ package structs +import ( + "github.com/hashicorp/consul/acl" +) + // GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from // fields in the ServiceRouteDestination -func (dest *ServiceRouteDestination) GetEnterpriseMeta(_ *EnterpriseMeta) *EnterpriseMeta { +func (dest *ServiceRouteDestination) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } // GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from // fields in the ServiceSplit -func (split *ServiceSplit) GetEnterpriseMeta(_ *EnterpriseMeta) *EnterpriseMeta { +func (split *ServiceSplit) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } // GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from // fields in the ServiceResolverRedirect -func (redir *ServiceResolverRedirect) GetEnterpriseMeta(_ *EnterpriseMeta) *EnterpriseMeta { +func (redir *ServiceResolverRedirect) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } // GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from // fields in the ServiceResolverFailover -func (failover *ServiceResolverFailover) GetEnterpriseMeta(_ *EnterpriseMeta) *EnterpriseMeta { +func (failover *ServiceResolverFailover) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } // GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from // fields in the DiscoveryChainRequest -func (req *DiscoveryChainRequest) GetEnterpriseMeta() *EnterpriseMeta { +func (req *DiscoveryChainRequest) GetEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } // WithEnterpriseMeta will populate the corresponding fields in the // DiscoveryChainRequest from the EnterpriseMeta struct -func (req *DiscoveryChainRequest) WithEnterpriseMeta(_ *EnterpriseMeta) { +func (req *DiscoveryChainRequest) WithEnterpriseMeta(_ *acl.EnterpriseMeta) { // do nothing } diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 910f7451d..8a184cc39 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -16,8 +16,8 @@ type ExportedServicesConfigEntry struct { // to expose them to. Services []ExportedService - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -99,7 +99,7 @@ func (e *ExportedServicesConfigEntry) Normalize() error { e.EnterpriseMeta.Normalize() for i := range e.Services { - e.Services[i].Namespace = NamespaceOrDefault(e.Services[i].Namespace) + e.Services[i].Namespace = acl.NamespaceOrDefault(e.Services[i].Namespace) } return nil @@ -156,7 +156,7 @@ func (e *ExportedServicesConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ExportedServicesConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ExportedServicesConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index fc9c840a0..4aadd8ef9 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -31,8 +31,8 @@ type IngressGatewayConfigEntry struct { // what services to associated to those ports. Listeners []IngressListener - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -90,8 +90,8 @@ type IngressService struct { RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } type GatewayTLSConfig struct { @@ -420,7 +420,7 @@ func (e *IngressGatewayConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *IngressGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *IngressGatewayConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } @@ -439,8 +439,8 @@ type TerminatingGatewayConfigEntry struct { Name string Services []LinkedService - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -464,7 +464,7 @@ type LinkedService struct { // SNI is the optional name to specify during the TLS handshake with a linked service SNI string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } func (e *TerminatingGatewayConfigEntry) GetKind() string { @@ -562,7 +562,7 @@ func (e *TerminatingGatewayConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } diff --git a/agent/structs/config_entry_intentions.go b/agent/structs/config_entry_intentions.go index 8829c2178..8f7cd8123 100644 --- a/agent/structs/config_entry_intentions.go +++ b/agent/structs/config_entry_intentions.go @@ -19,7 +19,7 @@ type ServiceIntentionsConfigEntry struct { Meta map[string]string `json:",omitempty"` // formerly Intention.Meta - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` // formerly DestinationNS + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` // formerly DestinationNS RaftIndex } @@ -258,7 +258,7 @@ type SourceIntention struct { // Things like L7 rules or Sentinel rules could go here later. // formerly Intention.SourceNS - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } type IntentionPermission struct { @@ -498,7 +498,7 @@ func computeIntentionPrecedence(entry *ServiceIntentionsConfigEntry, src *Source // intentionCountExact counts the number of exact values (not wildcards) in // the given namespace and name. -func intentionCountExact(name string, entMeta *EnterpriseMeta) int { +func intentionCountExact(name string, entMeta *acl.EnterpriseMeta) int { ns := entMeta.NamespaceOrDefault() // If NS is wildcard, pair must be */* since an exact service cannot follow a wildcard NS @@ -753,7 +753,7 @@ func (e *ServiceIntentionsConfigEntry) validate(legacyWrite bool) error { } // Wildcard usage verification -func validateIntentionWildcards(name string, entMeta *EnterpriseMeta) error { +func validateIntentionWildcards(name string, entMeta *acl.EnterpriseMeta) error { ns := entMeta.NamespaceOrDefault() if ns != WildcardSpecifier { if strings.Contains(ns, WildcardSpecifier) { @@ -783,7 +783,7 @@ func (e *ServiceIntentionsConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *ServiceIntentionsConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ServiceIntentionsConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } diff --git a/agent/structs/config_entry_intentions_oss.go b/agent/structs/config_entry_intentions_oss.go index d2edadd27..2628951f8 100644 --- a/agent/structs/config_entry_intentions_oss.go +++ b/agent/structs/config_entry_intentions_oss.go @@ -3,6 +3,10 @@ package structs -func validateSourceIntentionEnterpriseMeta(_, _ *EnterpriseMeta) error { +import ( + "github.com/hashicorp/consul/acl" +) + +func validateSourceIntentionEnterpriseMeta(_, _ *acl.EnterpriseMeta) error { return nil } diff --git a/agent/structs/config_entry_mesh.go b/agent/structs/config_entry_mesh.go index eb9f34f43..2d983eb82 100644 --- a/agent/structs/config_entry_mesh.go +++ b/agent/structs/config_entry_mesh.go @@ -15,8 +15,8 @@ type MeshConfigEntry struct { TLS *MeshTLSConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Meta map[string]string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -113,7 +113,7 @@ func (e *MeshConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *MeshConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *MeshConfigEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { if e == nil { return nil } diff --git a/agent/structs/config_entry_oss.go b/agent/structs/config_entry_oss.go index f7ccac38c..2cd1db7ac 100644 --- a/agent/structs/config_entry_oss.go +++ b/agent/structs/config_entry_oss.go @@ -8,6 +8,8 @@ import ( "strings" "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/consul/acl" ) func (e *ProxyConfigEntry) validateEnterpriseMeta() error { @@ -32,7 +34,7 @@ func validateUnusedKeys(unused []string) error { return err } -func validateInnerEnterpriseMeta(_, _ *EnterpriseMeta) error { +func validateInnerEnterpriseMeta(_, _ *acl.EnterpriseMeta) error { return nil } diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index f125998b9..5203bcc00 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -2750,7 +2750,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn // nothing else changes though during Normalize. So we ignore // EnterpriseMeta Defaults. opts := cmp.Options{ - cmp.Comparer(func(a, b EnterpriseMeta) bool { + cmp.Comparer(func(a, b acl.EnterpriseMeta) bool { return a.IsSame(&b) }), } diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 533b44270..9edf744a1 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -1,5 +1,7 @@ package structs +import "github.com/hashicorp/consul/acl" + // ConnectAuthorizeRequest is the structure of a request to authorize // a connection. type ConnectAuthorizeRequest struct { @@ -7,7 +9,7 @@ type ConnectAuthorizeRequest struct { Target string // EnterpriseMeta is the embedded Consul Enterprise specific metadata - EnterpriseMeta + acl.EnterpriseMeta // ClientCertURI is a unique identifier for the requesting client. This // is currently the URI SAN from the TLS client certificate. diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index ca08506e8..18210e571 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -9,6 +9,7 @@ import ( "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib" ) @@ -229,7 +230,7 @@ type IssuedCert struct { ValidBefore time.Time // EnterpriseMeta is the Consul Enterprise specific metadata - EnterpriseMeta + acl.EnterpriseMeta RaftIndex } diff --git a/agent/structs/connect_proxy_config_oss.go b/agent/structs/connect_proxy_config_oss.go index dff9cc25c..9e53a8fa2 100644 --- a/agent/structs/connect_proxy_config_oss.go +++ b/agent/structs/connect_proxy_config_oss.go @@ -3,7 +3,11 @@ package structs -func (us *Upstream) GetEnterpriseMeta() *EnterpriseMeta { +import ( + "github.com/hashicorp/consul/acl" +) + +func (us *Upstream) GetEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 0dd3010e7..17b9ee77a 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -3,6 +3,7 @@ package structs import ( "encoding/json" "fmt" + "github.com/hashicorp/consul/acl" "time" "github.com/hashicorp/consul/lib" @@ -77,7 +78,7 @@ func (c *CompiledDiscoveryChain) ID() string { } func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName { - entMeta := NewEnterpriseMetaWithPartition(c.Partition, c.Namespace) + entMeta := acl.NewEnterpriseMetaWithPartition(c.Partition, c.Namespace) return NewServiceName(c.ServiceName, &entMeta) } diff --git a/agent/structs/discovery_chain_oss.go b/agent/structs/discovery_chain_oss.go index 3b7f091c5..cdabdf6b3 100644 --- a/agent/structs/discovery_chain_oss.go +++ b/agent/structs/discovery_chain_oss.go @@ -3,6 +3,10 @@ package structs -func (t *DiscoveryTarget) GetEnterpriseMetadata() *EnterpriseMeta { +import ( + "github.com/hashicorp/consul/acl" +) + +func (t *DiscoveryTarget) GetEnterpriseMetadata() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/structs/identity.go b/agent/structs/identity.go index afe560abf..d0bfd62ec 100644 --- a/agent/structs/identity.go +++ b/agent/structs/identity.go @@ -1,10 +1,12 @@ package structs +import "github.com/hashicorp/consul/acl" + // Identity of some entity (ex: service, node, check). // // TODO: this type should replace ServiceID, ServiceName, and CheckID which all // have roughly identical implementations. type Identity struct { ID string - EnterpriseMeta + acl.EnterpriseMeta } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 49217b130..feefc5672 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -728,9 +728,9 @@ func (q *IntentionQueryExact) Validate() error { } type IntentionListRequest struct { - Datacenter string - Legacy bool `json:"-"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + Legacy bool `json:"-"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } diff --git a/agent/structs/intention_oss.go b/agent/structs/intention_oss.go index 11a57e07d..e35b9b5b3 100644 --- a/agent/structs/intention_oss.go +++ b/agent/structs/intention_oss.go @@ -7,23 +7,23 @@ import ( "github.com/hashicorp/consul/acl" ) -func (ixn *Intention) SourceEnterpriseMeta() *EnterpriseMeta { +func (ixn *Intention) SourceEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } -func (ixn *Intention) DestinationEnterpriseMeta() *EnterpriseMeta { +func (ixn *Intention) DestinationEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } -func (e *IntentionMatchEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *IntentionMatchEntry) GetEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } -func (e *IntentionQueryExact) SourceEnterpriseMeta() *EnterpriseMeta { +func (e *IntentionQueryExact) SourceEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } -func (e *IntentionQueryExact) DestinationEnterpriseMeta() *EnterpriseMeta { +func (e *IntentionQueryExact) DestinationEnterpriseMeta() *acl.EnterpriseMeta { return DefaultEnterpriseMetaInDefaultPartition() } @@ -55,7 +55,7 @@ func (_ *IntentionQueryCheck) FillAuthzContext(_ *acl.AuthorizerContext) { // fillDefault MUST be true on servers to ensure that all fields are populated on writes. // fillDefault MUST be false on clients so that servers can correctly fill in the // namespace/partition of the ACL token. -func (ixn *Intention) FillPartitionAndNamespace(entMeta *EnterpriseMeta, fillDefault bool) { +func (ixn *Intention) FillPartitionAndNamespace(entMeta *acl.EnterpriseMeta, fillDefault bool) { if ixn == nil { return } diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index 0f795891c..b6028cead 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -3,9 +3,11 @@ package structs import ( "strconv" + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/types" - "github.com/mitchellh/hashstructure" ) // QueryDatacenterOptions sets options about how we fail over if there are no @@ -78,7 +80,7 @@ type ServiceQuery struct { Connect bool // EnterpriseMeta is the embedded enterprise metadata - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } const ( @@ -306,7 +308,7 @@ type PreparedQueryExecuteResponse struct { Service string // EnterpriseMeta of the service that was queried. - EnterpriseMeta + acl.EnterpriseMeta // Nodes has the nodes that were output by the query. Nodes CheckServiceNodes diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 5141cbd96..05357c8f2 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib" ) @@ -33,7 +34,7 @@ type ServiceDefinition struct { // also called just "Config" Proxy *ConnectProxyConfig - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` Connect *ServiceConnect } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 9ea65fc3a..46dbbe7c5 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -429,7 +429,7 @@ type RegisterRequest struct { SkipNodeUpdate bool // EnterpriseMeta is the embedded enterprise metadata - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest RaftIndex `bexpr:"-"` @@ -474,11 +474,11 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { // If a ServiceID is provided, any associated Checks with that service // are also deregistered. type DeregisterRequest struct { - Datacenter string - Node string - ServiceID string - CheckID types.CheckID - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + Node string + ServiceID string + CheckID types.CheckID + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest } @@ -512,12 +512,12 @@ type QuerySource struct { Ip string } -func (s QuerySource) NodeEnterpriseMeta() *EnterpriseMeta { +func (s QuerySource) NodeEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(s.NodePartition) } func (s QuerySource) NodePartitionOrDefault() string { - return PartitionOrDefault(s.NodePartition) + return acl.PartitionOrDefault(s.NodePartition) } type DatacentersRequest struct { @@ -538,10 +538,10 @@ func (r *DatacentersRequest) CacheInfo() cache.RequestInfo { // DCSpecificRequest is used to query about a specific DC type DCSpecificRequest struct { - Datacenter string - NodeMetaFilters map[string]string - Source QuerySource - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + NodeMetaFilters map[string]string + Source QuerySource + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -582,11 +582,11 @@ func (r *DCSpecificRequest) CacheMinIndex() uint64 { } type ServiceDumpRequest struct { - Datacenter string - ServiceKind ServiceKind - UseServiceKind bool - Source QuerySource - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + ServiceKind ServiceKind + UseServiceKind bool + Source QuerySource + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -652,7 +652,7 @@ type ServiceSpecificRequest struct { // Ingress if true will only search for Ingress gateways for the given service. Ingress bool - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -714,9 +714,9 @@ func (r *ServiceSpecificRequest) CacheMinIndex() uint64 { // NodeSpecificRequest is used to request the information about a single node type NodeSpecificRequest struct { - Datacenter string - Node string - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + Node string + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -756,7 +756,7 @@ type ChecksInStateRequest struct { State string Source QuerySource - EnterpriseMeta `mapstructure:",squash"` + acl.EnterpriseMeta `mapstructure:",squash"` QueryOptions } @@ -777,12 +777,12 @@ type Node struct { RaftIndex `bexpr:"-"` } -func (n *Node) GetEnterpriseMeta() *EnterpriseMeta { +func (n *Node) GetEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(n.Partition) } func (n *Node) PartitionOrDefault() string { - return PartitionOrDefault(n.Partition) + return acl.PartitionOrDefault(n.Partition) } func (n *Node) BestAddress(wan bool) string { @@ -920,7 +920,7 @@ type ServiceNode struct { ServiceProxy ConnectProxyConfig ServiceConnect ServiceConnect - EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` RaftIndex `bexpr:"-"` } @@ -1128,7 +1128,7 @@ type NodeService struct { // somewhere this is used in API output. LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` RaftIndex `bexpr:"-"` } @@ -1528,7 +1528,7 @@ type HealthCheck struct { Definition HealthCheckDefinition `bexpr:"-"` - EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` RaftIndex `bexpr:"-"` } @@ -1869,12 +1869,12 @@ type NodeInfo struct { Checks HealthChecks } -func (n *NodeInfo) GetEnterpriseMeta() *EnterpriseMeta { +func (n *NodeInfo) GetEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(n.Partition) } func (n *NodeInfo) PartitionOrDefault() string { - return PartitionOrDefault(n.Partition) + return acl.PartitionOrDefault(n.Partition) } // NodeDump is used to dump all the nodes with all their @@ -1893,7 +1893,7 @@ type ServiceDump []*ServiceInfo type CheckID struct { ID types.CheckID - EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because acl.EnterpriseMeta uses a pointer @@ -1908,7 +1908,7 @@ func (c CheckID) PartitionOrDefault() string { return c.EnterpriseMeta.PartitionOrDefault() } -func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID { +func NewCheckID(id types.CheckID, entMeta *acl.EnterpriseMeta) CheckID { var cid CheckID cid.ID = id if entMeta == nil { @@ -1926,7 +1926,7 @@ func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID { func (cid CheckID) StringHashMD5() string { hasher := md5.New() hasher.Write([]byte(cid.ID)) - cid.EnterpriseMeta.addToHash(hasher, true) + cid.EnterpriseMeta.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } @@ -1935,16 +1935,16 @@ func (cid CheckID) StringHashMD5() string { func (cid CheckID) StringHashSHA256() string { hasher := sha256.New() hasher.Write([]byte(cid.ID)) - cid.EnterpriseMeta.addToHash(hasher, true) + cid.EnterpriseMeta.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } type ServiceID struct { ID string - EnterpriseMeta + acl.EnterpriseMeta } -func NewServiceID(id string, entMeta *EnterpriseMeta) ServiceID { +func NewServiceID(id string, entMeta *acl.EnterpriseMeta) ServiceID { var sid ServiceID sid.ID = id if entMeta == nil { @@ -1965,7 +1965,7 @@ func (sid ServiceID) Matches(other ServiceID) bool { func (sid ServiceID) StringHashSHA256() string { hasher := sha256.New() hasher.Write([]byte(sid.ID)) - sid.EnterpriseMeta.addToHash(hasher, true) + sid.EnterpriseMeta.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } @@ -1978,16 +1978,16 @@ type IndexedServices struct { Services Services // In various situations we need to know the meta that the services are for - in particular // this is needed to be able to properly filter the list based on ACLs - EnterpriseMeta + acl.EnterpriseMeta QueryMeta } type ServiceName struct { Name string - EnterpriseMeta + acl.EnterpriseMeta } -func NewServiceName(name string, entMeta *EnterpriseMeta) ServiceName { +func NewServiceName(name string, entMeta *acl.EnterpriseMeta) ServiceName { var ret ServiceName ret.Name = name if entMeta == nil { @@ -2252,7 +2252,7 @@ type DirEntry struct { Value []byte Session string `json:",omitempty"` - EnterpriseMeta `bexpr:"-"` + acl.EnterpriseMeta `bexpr:"-"` RaftIndex } @@ -2303,7 +2303,7 @@ func (r *KVSRequest) RequestDatacenter() string { type KeyRequest struct { Datacenter string Key string - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -2317,7 +2317,7 @@ type KeyListRequest struct { Prefix string Seperator string QueryOptions - EnterpriseMeta + acl.EnterpriseMeta } func (r *KeyListRequest) RequestDatacenter() string { @@ -2363,7 +2363,7 @@ type Session struct { // Deprecated v1.7.0. Checks []types.CheckID `json:",omitempty"` - EnterpriseMeta + acl.EnterpriseMeta RaftIndex } @@ -2432,7 +2432,7 @@ type SessionSpecificRequest struct { SessionID string // DEPRECATED in 1.7.0 Session string - EnterpriseMeta + acl.EnterpriseMeta QueryOptions } @@ -2453,12 +2453,12 @@ type Coordinate struct { Coord *coordinate.Coordinate } -func (c *Coordinate) GetEnterpriseMeta() *EnterpriseMeta { +func (c *Coordinate) GetEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(c.Partition) } func (c *Coordinate) PartitionOrDefault() string { - return PartitionOrDefault(c.Partition) + return acl.PartitionOrDefault(c.Partition) } type Coordinates []*Coordinate @@ -2489,11 +2489,11 @@ type DatacenterMap struct { // CoordinateUpdateRequest is used to update the network coordinate of a given // node. type CoordinateUpdateRequest struct { - Datacenter string - Node string - Segment string - Coord *coordinate.Coordinate - EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Datacenter string + Node string + Segment string + Coord *coordinate.Coordinate + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest } @@ -2643,7 +2643,7 @@ type KeyringResponse struct { } func (r *KeyringResponse) PartitionOrDefault() string { - return PartitionOrDefault(r.Partition) + return acl.PartitionOrDefault(r.Partition) } // KeyringResponses holds multiple responses to keyring queries. Each diff --git a/agent/structs/testing_connect_proxy_config.go b/agent/structs/testing_connect_proxy_config.go index 12124ba9a..ad918927a 100644 --- a/agent/structs/testing_connect_proxy_config.go +++ b/agent/structs/testing_connect_proxy_config.go @@ -1,6 +1,10 @@ package structs -import "github.com/mitchellh/go-testing-interface" +import ( + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/consul/acl" +) // TestConnectProxyConfig returns a ConnectProxyConfig representing a valid // Connect proxy. @@ -44,7 +48,7 @@ func TestUpstreams(t testing.T) Upstreams { // TestUpstreams) and adds default values that are populated during // registration. Use this for generating the expected Upstreams value after // registration. -func TestAddDefaultsToUpstreams(t testing.T, upstreams []Upstream, entMeta EnterpriseMeta) Upstreams { +func TestAddDefaultsToUpstreams(t testing.T, upstreams []Upstream, entMeta acl.EnterpriseMeta) Upstreams { ups := make([]Upstream, len(upstreams)) for i := range upstreams { ups[i] = upstreams[i] From 5b1e494fafa4441434e762502a1d90773d94f2ea Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Fri, 25 Mar 2022 11:27:28 -0700 Subject: [PATCH 088/785] Add script (REMOVE LATER) ' Signed-off-by: Mark Anderson --- fixup_acl_move.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 fixup_acl_move.sh diff --git a/fixup_acl_move.sh b/fixup_acl_move.sh new file mode 100644 index 000000000..23da0a7cb --- /dev/null +++ b/fixup_acl_move.sh @@ -0,0 +1,24 @@ + +GOIMPORTS=~/go/bin/goimports + +CHANGED=(EnterpriseMeta PartitionOrDefault IsDefaultPartition NamespaceOrDefault NewEnterpriseMetaWithPartition EqualPartitions) + +DIRS=(agent command proto) + +for dir in "${DIRS[@]}" + do + echo "CD to $dir" + pushd $dir + for s in "${CHANGED[@]}" + do + REWRITE='structs.'$s' -> acl.'$s + echo "REPL $REWRITE" + gofmt -w -r="$REWRITE" . + done + popd +done + +git diff --name-only | xargs $GOIMPORTS -local "github.com/hashicorp/consul" -w + +make --always-make proto + From d652063d0a94174ff72e734a2a886d4f03afda49 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Mon, 28 Mar 2022 18:45:12 -0700 Subject: [PATCH 089/785] SQ FIX Signed-off-by: Mark Anderson --- fixup_acl_move.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fixup_acl_move.sh b/fixup_acl_move.sh index 23da0a7cb..4fc428c0c 100644 --- a/fixup_acl_move.sh +++ b/fixup_acl_move.sh @@ -18,7 +18,15 @@ for dir in "${DIRS[@]}" popd done -git diff --name-only | xargs $GOIMPORTS -local "github.com/hashicorp/consul" -w +find . -name \*.go | xargs fgrep 'acl.' -l | xargs $GOIMPORTS -local "github.com/hashicorp/consul" -w make --always-make proto - + + +go get google.golang.org/protobuf/reflect/protoreflect +go get google.golang.org/protobuf/types/known/structpb +go get google.golang.org/protobuf/runtime/protoimpl +go get github.com/hashicorp/consul/agent/xds +go get github.com/hashicorp/consul/agent/structs +go get google.golang.org/protobuf + From 58ca8c8ed45d4e82d7b2d13ad90d040edbd7cff4 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Mon, 28 Mar 2022 19:31:46 -0700 Subject: [PATCH 090/785] Fixup script 2 Signed-off-by: Mark Anderson --- fixup_acl_move.sh | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/fixup_acl_move.sh b/fixup_acl_move.sh index 4fc428c0c..e8fa2698a 100644 --- a/fixup_acl_move.sh +++ b/fixup_acl_move.sh @@ -21,12 +21,4 @@ done find . -name \*.go | xargs fgrep 'acl.' -l | xargs $GOIMPORTS -local "github.com/hashicorp/consul" -w make --always-make proto - - -go get google.golang.org/protobuf/reflect/protoreflect -go get google.golang.org/protobuf/types/known/structpb -go get google.golang.org/protobuf/runtime/protoimpl -go get github.com/hashicorp/consul/agent/xds -go get github.com/hashicorp/consul/agent/structs -go get google.golang.org/protobuf - +make go-mod-tidy From ed3e42296d911656342bf1d2db1e6c42f4d27a7a Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Tue, 5 Apr 2022 14:10:06 -0700 Subject: [PATCH 091/785] Fixup acl.EnterpriseMeta Signed-off-by: Mark Anderson --- agent/acl_test.go | 6 +- agent/agent.go | 20 +-- agent/agent_endpoint.go | 12 +- agent/agent_endpoint_oss.go | 4 +- agent/agent_endpoint_test.go | 2 +- agent/agent_oss.go | 7 +- agent/cache-types/connect_ca_leaf.go | 3 +- agent/cache-types/service_checks.go | 8 +- agent/check.go | 3 +- agent/checks/alias.go | 5 +- agent/checks/alias_test.go | 3 +- agent/config/config_oss.go | 7 +- agent/config_endpoint.go | 5 +- agent/config_endpoint_test.go | 3 +- agent/configentry/config_entry.go | 5 +- agent/connect/authz.go | 5 +- agent/connect/uri_agent.go | 4 +- agent/connect/uri_agent_oss.go | 6 +- agent/connect/uri_service.go | 8 +- agent/connect/uri_service_oss.go | 6 +- agent/connect_auth.go | 2 +- agent/consul/acl.go | 18 +-- agent/consul/acl_authmethod.go | 8 +- agent/consul/acl_endpoint.go | 6 +- agent/consul/acl_endpoint_oss.go | 10 +- agent/consul/acl_endpoint_test.go | 11 +- agent/consul/acl_oss.go | 4 +- agent/consul/acl_test.go | 3 +- agent/consul/authmethod/authmethods.go | 6 +- agent/consul/authmethod/kubeauth/k8s_oss.go | 7 +- agent/consul/authmethod/ssoauth/sso_oss.go | 4 +- agent/consul/authmethod/testauth/testing.go | 5 +- .../consul/authmethod/testauth/testing_oss.go | 6 +- agent/consul/auto_config_backend.go | 3 +- agent/consul/auto_config_endpoint.go | 6 +- agent/consul/catalog_endpoint_test.go | 5 +- agent/consul/client.go | 7 +- agent/consul/config_endpoint_test.go | 3 +- agent/consul/config_oss.go | 7 +- agent/consul/connect_ca_endpoint_test.go | 3 +- agent/consul/coordinate_endpoint_test.go | 5 +- agent/consul/discovery_chain_endpoint_test.go | 3 +- agent/consul/discoverychain/compile_oss.go | 7 +- agent/consul/enterprise_server_oss.go | 5 +- .../consul/federation_state_endpoint_test.go | 5 +- agent/consul/fsm/snapshot_oss_test.go | 3 +- agent/consul/health_endpoint_test.go | 3 +- agent/consul/helper_test.go | 10 +- agent/consul/intention_endpoint.go | 18 +-- agent/consul/intention_endpoint_test.go | 3 +- agent/consul/internal_endpoint.go | 2 +- agent/consul/internal_endpoint_test.go | 3 +- agent/consul/kvs_endpoint_test.go | 3 +- agent/consul/leader.go | 12 +- agent/consul/leader_connect_ca.go | 2 +- .../operator_autopilot_endpoint_test.go | 3 +- agent/consul/operator_raft_endpoint_test.go | 3 +- agent/consul/prepared_query_endpoint_test.go | 5 +- agent/consul/rpc_test.go | 4 +- agent/consul/serf_filter.go | 6 +- agent/consul/server.go | 6 +- agent/consul/server_oss.go | 7 +- agent/consul/server_overview.go | 6 +- agent/consul/session_endpoint_test.go | 3 +- agent/consul/session_ttl.go | 5 +- agent/consul/snapshot_endpoint_test.go | 3 +- agent/consul/state/acl.go | 95 ++++++------ agent/consul/state/acl_oss.go | 37 ++--- agent/consul/state/acl_oss_test.go | 7 +- agent/consul/state/acl_schema.go | 5 +- agent/consul/state/catalog.go | 142 +++++++++--------- agent/consul/state/catalog_events.go | 8 +- agent/consul/state/catalog_oss.go | 41 ++--- agent/consul/state/catalog_oss_test.go | 15 +- agent/consul/state/catalog_schema.go | 5 +- agent/consul/state/config_entry.go | 51 ++++--- agent/consul/state/config_entry_intention.go | 9 +- .../state/config_entry_intention_oss.go | 3 +- agent/consul/state/config_entry_oss.go | 11 +- agent/consul/state/config_entry_oss_test.go | 3 +- agent/consul/state/coordinate.go | 7 +- agent/consul/state/coordinate_oss.go | 5 +- agent/consul/state/coordinate_oss_test.go | 9 +- agent/consul/state/delay_oss.go | 7 +- agent/consul/state/graveyard.go | 6 +- agent/consul/state/graveyard_oss.go | 3 +- agent/consul/state/indexer.go | 3 +- agent/consul/state/intention.go | 6 +- agent/consul/state/intention_oss.go | 4 +- agent/consul/state/kvs.go | 25 +-- agent/consul/state/kvs_oss.go | 9 +- agent/consul/state/kvs_oss_test.go | 9 +- agent/consul/state/operations_oss.go | 4 +- agent/consul/state/query.go | 13 +- agent/consul/state/query_oss.go | 6 +- agent/consul/state/schema_oss.go | 4 +- agent/consul/state/session.go | 9 +- agent/consul/state/session_oss.go | 9 +- agent/consul/state/state_store.go | 3 +- agent/consul/state/state_store_test.go | 3 +- agent/consul/subscribe_backend.go | 2 +- agent/consul/txn_endpoint_test.go | 3 +- .../usagemetrics/usagemetrics_oss_test.go | 5 +- agent/delegate_mock_test.go | 6 +- agent/discovery_chain_endpoint.go | 3 +- agent/dns.go | 7 +- agent/dns_oss.go | 6 +- .../private/services/subscribe/subscribe.go | 6 +- .../services/subscribe/subscribe_test.go | 10 +- .../services/connectca/mock_ACLResolver.go | 9 +- .../grpc/public/services/connectca/server.go | 2 +- .../dataplane/get_supported_features.go | 5 +- .../dataplane/get_supported_features_test.go | 9 +- .../services/dataplane/mock_ACLResolver.go | 9 +- .../grpc/public/services/dataplane/server.go | 8 +- agent/grpc/public/testutils/acl.go | 3 +- agent/http_oss.go | 7 +- agent/intentions_endpoint.go | 23 +-- agent/intentions_endpoint_test.go | 3 +- agent/local/state.go | 24 +-- agent/local/state_test.go | 6 +- agent/operator_endpoint.go | 3 +- agent/proxycfg/manager_test.go | 5 +- agent/proxycfg/naming.go | 9 +- agent/proxycfg/naming_oss.go | 5 +- agent/proxycfg/snapshot.go | 4 +- agent/proxycfg/state_test.go | 7 +- agent/proxycfg/testing_ingress_gateway.go | 7 +- agent/proxycfg/upstreams.go | 7 +- agent/structs/acl_cache_test.go | 3 +- agent/structs/check_definition.go | 3 +- agent/structs/config_entry.go | 3 +- agent/structs/config_entry_test.go | 3 +- agent/structs/discovery_chain.go | 3 +- agent/structs/structs.go | 9 +- agent/submatview/store_integration_test.go | 2 +- agent/txn_endpoint.go | 7 +- agent/ui_endpoint.go | 6 +- agent/xds/endpoints.go | 3 +- agent/xds/listeners.go | 3 +- agent/xds/server_oss.go | 3 +- .../bindingrule/delete/bindingrule_delete.go | 3 +- .../bindingrule/update/bindingrule_update.go | 3 +- command/acl/policy/create/policy_create.go | 3 +- command/acl/policy/delete/policy_delete.go | 3 +- command/acl/policy/read/policy_read.go | 3 +- command/acl/policy/update/policy_update.go | 3 +- command/acl/role/create/role_create.go | 3 +- command/acl/role/delete/role_delete.go | 3 +- command/acl/role/read/role_read.go | 3 +- command/acl/role/update/role_update.go | 3 +- command/acl/rules/translate.go | 3 +- command/acl/token/clone/token_clone.go | 3 +- command/acl/token/create/token_create.go | 3 +- command/acl/token/delete/token_delete.go | 3 +- command/acl/token/formatter.go | 3 +- command/acl/token/read/token_read.go | 3 +- command/acl/token/update/token_update.go | 3 +- command/keyring/keyring.go | 4 +- command/logout/logout_test.go | 7 +- command/members/members.go | 4 +- proto/pbcommon/common_oss.go | 10 +- proto/pbconnect/connect.go | 7 +- proto/pbservice/convert_oss.go | 8 +- proto/pbservice/convert_oss_test.go | 4 +- 165 files changed, 717 insertions(+), 586 deletions(-) diff --git a/agent/acl_test.go b/agent/acl_test.go index 539cf5c07..995a3b6e6 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -92,7 +92,7 @@ func (a *TestACLAgent) ResolveToken(secretID string) (acl.Authorizer, error) { return authz, err } -func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { +func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { authz, err := a.ResolveToken(secretID) if err != nil { return consul.ACLResolveResult{}, err @@ -133,10 +133,10 @@ func (a *TestACLAgent) LANMembers(f consul.LANMemberFilter) ([]serf.Member, erro func (a *TestACLAgent) AgentLocalMember() serf.Member { return serf.Member{} } -func (a *TestACLAgent) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (n int, err error) { +func (a *TestACLAgent) JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (n int, err error) { return 0, fmt.Errorf("Unimplemented") } -func (a *TestACLAgent) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (a *TestACLAgent) RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error { return fmt.Errorf("Unimplemented") } func (a *TestACLAgent) RPC(method string, args interface{}, reply interface{}) error { diff --git a/agent/agent.go b/agent/agent.go index 91d42cb73..c08316fd8 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -164,16 +164,16 @@ type delegate interface { // JoinLAN is used to have Consul join the inner-DC pool The target address // should be another node inside the DC listening on the Serf LAN address - JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (n int, err error) + JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (n int, err error) // RemoveFailedNode is used to remove a failed node from the cluster. - RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error + RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error // ResolveTokenAndDefaultMeta returns an acl.Authorizer which authorizes // actions based on the permissions granted to the token. // If either entMeta or authzContext are non-nil they will be populated with the // default partition and namespace from the token. - ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) + ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) RPC(method string, args interface{}, reply interface{}) error SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error @@ -1536,7 +1536,7 @@ func (a *Agent) ShutdownCh() <-chan struct{} { } // JoinLAN is used to have the agent join a LAN cluster -func (a *Agent) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (n int, err error) { +func (a *Agent) JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (n int, err error) { a.logger.Info("(LAN) joining", "lan_addresses", addrs) n, err = a.delegate.JoinLAN(addrs, entMeta) if err == nil { @@ -1603,7 +1603,7 @@ func (a *Agent) RefreshPrimaryGatewayFallbackAddresses(addrs []string) error { } // ForceLeave is used to remove a failed node from the cluster -func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (a *Agent) ForceLeave(node string, prune bool, entMeta *acl.EnterpriseMeta) error { a.logger.Info("Force leaving node", "node", node) err := a.delegate.RemoveFailedNode(node, prune, entMeta) @@ -1617,7 +1617,7 @@ func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseM } // ForceLeaveWAN is used to remove a failed node from the WAN cluster -func (a *Agent) ForceLeaveWAN(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (a *Agent) ForceLeaveWAN(node string, prune bool, entMeta *acl.EnterpriseMeta) error { a.logger.Info("(WAN) Force leaving node", "node", node) srv, ok := a.delegate.(*consul.Server) @@ -1923,7 +1923,7 @@ func (a *Agent) purgeCheck(checkID structs.CheckID) error { type persistedServiceConfig struct { ServiceID string Defaults *structs.ServiceConfigResponse - structs.EnterpriseMeta + acl.EnterpriseMeta } func (a *Agent) makeServiceConfigFilePath(serviceID structs.ServiceID) string { @@ -2017,7 +2017,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se } } - if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.PartitionOrDefault()) { + if !acl.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.PartitionOrDefault()) { a.logger.Info("Purging service config file in wrong partition", "file", file, "partition", p.PartitionOrDefault(), @@ -3390,7 +3390,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI } } - if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Service.PartitionOrDefault()) { + if !acl.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Service.PartitionOrDefault()) { a.logger.Info("Purging service file in wrong partition", "file", file, "partition", p.Service.EnterpriseMeta.PartitionOrDefault(), @@ -3546,7 +3546,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] } } - if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Check.PartitionOrDefault()) { + if !acl.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Check.PartitionOrDefault()) { a.logger.Info("Purging check file in wrong partition", "file", file, "partition", p.Check.PartitionOrDefault(), diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 67158f87a..d9a516f96 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -308,7 +308,7 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request var token string s.parseToken(req, &token) - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -397,7 +397,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request) var token string s.parseToken(req, &token) - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -471,7 +471,7 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) var token string s.parseToken(req, &token) - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -576,7 +576,7 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) // Older 'consul members' calls will default to adding segment=_all // so we only choose to use that request argument in the case where // the partition is also the default and ignore it the rest of the time. - if structs.IsDefaultPartition(filter.Partition) { + if acl.IsDefaultPartition(filter.Partition) { filter.AllSegments = true } } else { @@ -984,7 +984,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt return nil, &BadRequestError{Reason: "Missing serviceID"} } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -1046,7 +1046,7 @@ func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *h return nil, &BadRequestError{Reason: "Missing service Name"} } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } diff --git a/agent/agent_endpoint_oss.go b/agent/agent_endpoint_oss.go index 2c6585a4b..b775b5e79 100644 --- a/agent/agent_endpoint_oss.go +++ b/agent/agent_endpoint_oss.go @@ -6,9 +6,9 @@ package agent import ( "net/http" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) -func (s *HTTPHandlers) validateRequestPartition(_ http.ResponseWriter, _ *structs.EnterpriseMeta) bool { +func (s *HTTPHandlers) validateRequestPartition(_ http.ResponseWriter, _ *acl.EnterpriseMeta) bool { return true } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 70840f950..103243497 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1640,7 +1640,7 @@ type fakeResolveTokenDelegate struct { authorizer acl.Authorizer } -func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *structs.EnterpriseMeta, _ *acl.AuthorizerContext) (consul.ACLResolveResult, error) { +func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (consul.ACLResolveResult, error) { return consul.ACLResolveResult{Authorizer: f.authorizer}, nil } diff --git a/agent/agent_oss.go b/agent/agent_oss.go index b9b1f91dc..43de920a5 100644 --- a/agent/agent_oss.go +++ b/agent/agent_oss.go @@ -6,6 +6,7 @@ package agent import ( "context" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" @@ -16,10 +17,10 @@ import ( type enterpriseAgent struct{} // fillAgentServiceEnterpriseMeta is a noop stub for the func defined agent_ent.go -func fillAgentServiceEnterpriseMeta(_ *api.AgentService, _ *structs.EnterpriseMeta) {} +func fillAgentServiceEnterpriseMeta(_ *api.AgentService, _ *acl.EnterpriseMeta) {} // fillHealthCheckEnterpriseMeta is a noop stub for the func defined agent_ent.go -func fillHealthCheckEnterpriseMeta(_ *api.HealthCheck, _ *structs.EnterpriseMeta) {} +func fillHealthCheckEnterpriseMeta(_ *api.HealthCheck, _ *acl.EnterpriseMeta) {} // initEnterprise is a noop stub for the func defined agent_ent.go func (a *Agent) initEnterprise(consulCfg *consul.Config) error { @@ -52,7 +53,7 @@ func (a *Agent) enterpriseStats() map[string]map[string]string { return nil } -func (a *Agent) AgentEnterpriseMeta() *structs.EnterpriseMeta { +func (a *Agent) AgentEnterpriseMeta() *acl.EnterpriseMeta { return structs.NodeEnterpriseMetaInDefaultPartition() } diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 1950ef756..3b382573a 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -11,6 +11,7 @@ import ( "github.com/mitchellh/hashstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/agent/cache" @@ -672,7 +673,7 @@ type ConnectCALeafRequest struct { MaxQueryTime time.Duration MustRevalidate bool - structs.EnterpriseMeta + acl.EnterpriseMeta } func (r *ConnectCALeafRequest) Key() string { diff --git a/agent/cache-types/service_checks.go b/agent/cache-types/service_checks.go index 9c0b7f253..a42cb3a8e 100644 --- a/agent/cache-types/service_checks.go +++ b/agent/cache-types/service_checks.go @@ -5,11 +5,13 @@ import ( "strconv" "time" + "github.com/hashicorp/go-memdb" + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/go-memdb" - "github.com/mitchellh/hashstructure" ) // Recommended name for registration. @@ -103,7 +105,7 @@ type ServiceHTTPChecksRequest struct { ServiceID string MinQueryIndex uint64 MaxQueryTime time.Duration - structs.EnterpriseMeta + acl.EnterpriseMeta } func (s *ServiceHTTPChecksRequest) CacheInfo() cache.RequestInfo { diff --git a/agent/check.go b/agent/check.go index 8f8bd5d08..da70a8005 100644 --- a/agent/check.go +++ b/agent/check.go @@ -1,6 +1,7 @@ package agent import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/types" ) @@ -23,5 +24,5 @@ type persistedCheckState struct { Output string Status string Expires int64 - structs.EnterpriseMeta + acl.EnterpriseMeta } diff --git a/agent/checks/alias.go b/agent/checks/alias.go index 3cbb8ed82..9553745af 100644 --- a/agent/checks/alias.go +++ b/agent/checks/alias.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" ) @@ -35,7 +36,7 @@ type CheckAlias struct { stopLock sync.Mutex stopWg sync.WaitGroup - structs.EnterpriseMeta + acl.EnterpriseMeta } // AliasNotifier is a CheckNotifier specifically for the Alias check. @@ -46,7 +47,7 @@ type AliasNotifier interface { AddAliasCheck(structs.CheckID, structs.ServiceID, chan<- struct{}) error RemoveAliasCheck(structs.CheckID, structs.ServiceID) - Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck + Checks(*acl.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck } // Start is used to start the check, runs until Stop() func (c *CheckAlias) Start() { diff --git a/agent/checks/alias_test.go b/agent/checks/alias_test.go index 941ffbc7c..673e83304 100644 --- a/agent/checks/alias_test.go +++ b/agent/checks/alias_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/mock" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -523,7 +524,7 @@ func (m *mockAliasNotify) AddAliasCheck(chkID structs.CheckID, serviceID structs func (m *mockAliasNotify) RemoveAliasCheck(chkID structs.CheckID, serviceID structs.ServiceID) { } -func (m *mockAliasNotify) Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { +func (m *mockAliasNotify) Checks(*acl.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { return nil } diff --git a/agent/config/config_oss.go b/agent/config/config_oss.go index 7e061b8e5..5a297cacc 100644 --- a/agent/config/config_oss.go +++ b/agent/config/config_oss.go @@ -3,11 +3,14 @@ package config -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) // EnterpriseMeta stub type EnterpriseMeta struct{} -func (_ *EnterpriseMeta) ToStructs() structs.EnterpriseMeta { +func (_ *EnterpriseMeta) ToStructs() acl.EnterpriseMeta { return *structs.DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/config_endpoint.go b/agent/config_endpoint.go index 4bd96d436..637b8ab91 100644 --- a/agent/config_endpoint.go +++ b/agent/config_endpoint.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -148,7 +149,7 @@ func (s *HTTPHandlers) ConfigApply(resp http.ResponseWriter, req *http.Request) } // Parse enterprise meta. - var meta structs.EnterpriseMeta + var meta acl.EnterpriseMeta if err := s.parseEntMetaForConfigEntryKind(args.Entry.GetKind(), req, &meta); err != nil { return nil, err } @@ -172,7 +173,7 @@ func (s *HTTPHandlers) ConfigApply(resp http.ResponseWriter, req *http.Request) return reply, nil } -func (s *HTTPHandlers) parseEntMetaForConfigEntryKind(kind string, req *http.Request, entMeta *structs.EnterpriseMeta) error { +func (s *HTTPHandlers) parseEntMetaForConfigEntryKind(kind string, req *http.Request, entMeta *acl.EnterpriseMeta) error { if kind == structs.ServiceIntentions { return s.parseEntMeta(req, entMeta) } diff --git a/agent/config_endpoint_test.go b/agent/config_endpoint_test.go index 3518d045e..949b6be11 100644 --- a/agent/config_endpoint_test.go +++ b/agent/config_endpoint_test.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" ) @@ -119,7 +120,7 @@ func TestConfig_Get(t *testing.T) { // Set indexes and EnterpriseMeta to expected values for assertions ce.CreateIndex = 12 ce.ModifyIndex = 13 - ce.EnterpriseMeta = structs.EnterpriseMeta{} + ce.EnterpriseMeta = acl.EnterpriseMeta{} out, err := a.srv.marshalJSON(req, obj) require.NoError(t, err) diff --git a/agent/configentry/config_entry.go b/agent/configentry/config_entry.go index 7ede09358..a34a197c4 100644 --- a/agent/configentry/config_entry.go +++ b/agent/configentry/config_entry.go @@ -1,6 +1,7 @@ package configentry import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -11,7 +12,7 @@ import ( type KindName struct { Kind string Name string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NewKindName returns a new KindName. The EnterpriseMeta values will be @@ -19,7 +20,7 @@ type KindName struct { // // Any caller which modifies the EnterpriseMeta field must call Normalize // before persisting or using the value as a map key. -func NewKindName(kind, name string, entMeta *structs.EnterpriseMeta) KindName { +func NewKindName(kind, name string, entMeta *acl.EnterpriseMeta) KindName { ret := KindName{ Kind: kind, Name: name, diff --git a/agent/connect/authz.go b/agent/connect/authz.go index ead804174..f3beb1be6 100644 --- a/agent/connect/authz.go +++ b/agent/connect/authz.go @@ -1,6 +1,7 @@ package connect import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -18,7 +19,7 @@ func AuthorizeIntentionTarget( switch matchType { case structs.IntentionMatchDestination: - if structs.PartitionOrDefault(ixn.DestinationPartition) != structs.PartitionOrDefault(targetAP) { + if acl.PartitionOrDefault(ixn.DestinationPartition) != acl.PartitionOrDefault(targetAP) { return false, false } @@ -33,7 +34,7 @@ func AuthorizeIntentionTarget( } case structs.IntentionMatchSource: - if structs.PartitionOrDefault(ixn.SourcePartition) != structs.PartitionOrDefault(targetAP) { + if acl.PartitionOrDefault(ixn.SourcePartition) != acl.PartitionOrDefault(targetAP) { return false, false } diff --git a/agent/connect/uri_agent.go b/agent/connect/uri_agent.go index 3d144b016..fb86614cd 100644 --- a/agent/connect/uri_agent.go +++ b/agent/connect/uri_agent.go @@ -3,7 +3,7 @@ package connect import ( "net/url" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) // SpiffeIDService is the structure to represent the SPIFFE ID for an agent. @@ -15,7 +15,7 @@ type SpiffeIDAgent struct { } func (id SpiffeIDAgent) PartitionOrDefault() string { - return structs.PartitionOrDefault(id.Partition) + return acl.PartitionOrDefault(id.Partition) } // URI returns the *url.URL for this SPIFFE ID. diff --git a/agent/connect/uri_agent_oss.go b/agent/connect/uri_agent_oss.go index 1ae6f18c3..e24f9b560 100644 --- a/agent/connect/uri_agent_oss.go +++ b/agent/connect/uri_agent_oss.go @@ -6,13 +6,13 @@ package connect import ( "fmt" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) // GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDAgent. // in OSS this just returns an empty (but never nil) struct pointer -func (id SpiffeIDAgent) GetEnterpriseMeta() *structs.EnterpriseMeta { - return &structs.EnterpriseMeta{} +func (id SpiffeIDAgent) GetEnterpriseMeta() *acl.EnterpriseMeta { + return &acl.EnterpriseMeta{} } func (id SpiffeIDAgent) uriPath() string { diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index 82ce662f6..34c55a92c 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -3,7 +3,7 @@ package connect import ( "net/url" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) // SpiffeIDService is the structure to represent the SPIFFE ID for a service. @@ -16,15 +16,15 @@ type SpiffeIDService struct { } func (id SpiffeIDService) NamespaceOrDefault() string { - return structs.NamespaceOrDefault(id.Namespace) + return acl.NamespaceOrDefault(id.Namespace) } func (id SpiffeIDService) MatchesPartition(partition string) bool { - return id.PartitionOrDefault() == structs.PartitionOrDefault(partition) + return id.PartitionOrDefault() == acl.PartitionOrDefault(partition) } func (id SpiffeIDService) PartitionOrDefault() string { - return structs.PartitionOrDefault(id.Partition) + return acl.PartitionOrDefault(id.Partition) } // URI returns the *url.URL for this SPIFFE ID. diff --git a/agent/connect/uri_service_oss.go b/agent/connect/uri_service_oss.go index 8270f96c2..a20074e84 100644 --- a/agent/connect/uri_service_oss.go +++ b/agent/connect/uri_service_oss.go @@ -6,13 +6,13 @@ package connect import ( "fmt" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) // GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDService. // in OSS this just returns an empty (but never nil) struct pointer -func (id SpiffeIDService) GetEnterpriseMeta() *structs.EnterpriseMeta { - return &structs.EnterpriseMeta{} +func (id SpiffeIDService) GetEnterpriseMeta() *acl.EnterpriseMeta { + return &acl.EnterpriseMeta{} } func (id SpiffeIDService) uriPath() string { diff --git a/agent/connect_auth.go b/agent/connect_auth.go index bc89d50af..9bd8a46eb 100644 --- a/agent/connect_auth.go +++ b/agent/connect_auth.go @@ -72,7 +72,7 @@ func (a *Agent) ConnectAuthorize(token string, if !uriService.MatchesPartition(req.TargetPartition()) { reason = fmt.Sprintf("Mismatched partitions: %q != %q", uriService.PartitionOrDefault(), - structs.PartitionOrDefault(req.TargetPartition())) + acl.PartitionOrDefault(req.TargetPartition())) return false, reason, nil, nil } diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 8b3d4e55e..2c4264223 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -108,7 +108,7 @@ func (id *missingIdentity) IsLocal() bool { return false } -func (id *missingIdentity) EnterpriseMetadata() *structs.EnterpriseMeta { +func (id *missingIdentity) EnterpriseMetadata() *acl.EnterpriseMeta { return structs.DefaultEnterpriseMetaInDefaultPartition() } @@ -182,7 +182,7 @@ type ACLResolverSettings struct { ACLsEnabled bool Datacenter string NodeName string - EnterpriseMeta structs.EnterpriseMeta + EnterpriseMeta acl.EnterpriseMeta // ACLPolicyTTL is used to control the time-to-live of cached ACL policies. This has // a major impact on performance. By default, it is set to 30 seconds. @@ -264,7 +264,7 @@ type ACLResolver struct { agentRecoveryAuthz acl.Authorizer } -func agentRecoveryAuthorizer(nodeName string, entMeta *structs.EnterpriseMeta, aclConf *acl.Config) (acl.Authorizer, error) { +func agentRecoveryAuthorizer(nodeName string, entMeta *acl.EnterpriseMeta, aclConf *acl.Config) (acl.Authorizer, error) { var conf acl.Config if aclConf != nil { conf = *aclConf @@ -638,7 +638,7 @@ func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) ( return filtered, nil } -func (r *ACLResolver) synthesizePoliciesForServiceIdentities(serviceIdentities []*structs.ACLServiceIdentity, entMeta *structs.EnterpriseMeta) []*structs.ACLPolicy { +func (r *ACLResolver) synthesizePoliciesForServiceIdentities(serviceIdentities []*structs.ACLServiceIdentity, entMeta *acl.EnterpriseMeta) []*structs.ACLPolicy { if len(serviceIdentities) == 0 { return nil } @@ -651,7 +651,7 @@ func (r *ACLResolver) synthesizePoliciesForServiceIdentities(serviceIdentities [ return syntheticPolicies } -func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*structs.ACLNodeIdentity, entMeta *structs.EnterpriseMeta) []*structs.ACLPolicy { +func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*structs.ACLNodeIdentity, entMeta *acl.EnterpriseMeta) []*structs.ACLPolicy { if len(nodeIdentities) == 0 { return nil } @@ -676,7 +676,7 @@ type plainACLResolver struct { func (r plainACLResolver) ResolveTokenAndDefaultMeta( token string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext, ) (acl.Authorizer, error) { // ACLResolver.ResolveTokenAndDefaultMeta returns a ACLResolveResult which @@ -1174,14 +1174,14 @@ func (r *ACLResolver) ACLsEnabled() bool { return true } -func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (ACLResolveResult, error) { +func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (ACLResolveResult, error) { result, err := r.ResolveToken(token) if err != nil { return ACLResolveResult{}, err } if entMeta == nil { - entMeta = &structs.EnterpriseMeta{} + entMeta = &acl.EnterpriseMeta{} } // Default the EnterpriseMeta based on the Tokens meta or actual defaults @@ -1279,7 +1279,7 @@ func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) bool { // filterServices is used to filter a set of services based on ACLs. Returns // true if any elements were removed. -func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.EnterpriseMeta) bool { +func (f *aclFilter) filterServices(services structs.Services, entMeta *acl.EnterpriseMeta) bool { var authzContext acl.AuthorizerContext entMeta.FillAuthzContext(&authzContext) diff --git a/agent/consul/acl_authmethod.go b/agent/consul/acl_authmethod.go index b901ce131..34035e159 100644 --- a/agent/consul/acl_authmethod.go +++ b/agent/consul/acl_authmethod.go @@ -3,9 +3,11 @@ package consul import ( "fmt" + "github.com/hashicorp/go-bexpr" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/go-bexpr" // register these as a builtin auth method _ "github.com/hashicorp/consul/agent/consul/authmethod/awsauth" @@ -51,8 +53,8 @@ type aclBindings struct { func (s *Server) evaluateRoleBindings( validator authmethod.Validator, verifiedIdentity *authmethod.Identity, - methodMeta *structs.EnterpriseMeta, - targetMeta *structs.EnterpriseMeta, + methodMeta *acl.EnterpriseMeta, + targetMeta *acl.EnterpriseMeta, ) (*aclBindings, error) { // Only fetch rules that are relevant for this method. _, rules, err := s.fsm.State().ACLBindingRuleList(nil, validator.Name(), methodMeta) diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index bac938dfa..77ca6edf3 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -1000,7 +1000,7 @@ func (a *ACL) TokenList(args *structs.ACLTokenListRequest, reply *structs.ACLTok } var authzContext acl.AuthorizerContext - var requestMeta structs.EnterpriseMeta + var requestMeta acl.EnterpriseMeta authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &requestMeta, &authzContext) if err != nil { return err @@ -1012,7 +1012,7 @@ func (a *ACL) TokenList(args *structs.ACLTokenListRequest, reply *structs.ACLTok return err } - var methodMeta *structs.EnterpriseMeta + var methodMeta *acl.EnterpriseMeta if args.AuthMethod != "" { methodMeta = args.ACLAuthMethodEnterpriseMeta.ToEnterpriseMeta() // attempt to merge in the overall meta, wildcards will not be merged @@ -2449,7 +2449,7 @@ func (a *ACL) Login(args *structs.ACLLoginRequest, reply *structs.ACLToken) erro func (a *ACL) tokenSetFromAuthMethod( method *structs.ACLAuthMethod, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, tokenDescriptionPrefix string, tokenMetadata map[string]string, validator authmethod.Validator, diff --git a/agent/consul/acl_endpoint_oss.go b/agent/consul/acl_endpoint_oss.go index 3cc9e35d4..e218826a6 100644 --- a/agent/consul/acl_endpoint_oss.go +++ b/agent/consul/acl_endpoint_oss.go @@ -4,10 +4,12 @@ package consul import ( + memdb "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" - memdb "github.com/hashicorp/go-memdb" ) func (a *ACL) tokenUpsertValidateEnterprise(token *structs.ACLToken, existing *structs.ACLToken) error { @@ -36,10 +38,10 @@ func enterpriseAuthMethodValidation(method *structs.ACLAuthMethod, validator aut func computeTargetEnterpriseMeta( method *structs.ACLAuthMethod, verifiedIdentity *authmethod.Identity, -) (*structs.EnterpriseMeta, error) { - return &structs.EnterpriseMeta{}, nil +) (*acl.EnterpriseMeta, error) { + return &acl.EnterpriseMeta{}, nil } -func getTokenNamespaceDefaults(ws memdb.WatchSet, state *state.Store, entMeta *structs.EnterpriseMeta) ([]string, []string, error) { +func getTokenNamespaceDefaults(ws memdb.WatchSet, state *state.Store, entMeta *acl.EnterpriseMeta) ([]string, []string, error) { return nil, nil, nil } diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 637872070..1ceb3a0a2 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" uuid "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" "gopkg.in/square/go-jose.v2/jwt" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod/kubeauth" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" @@ -5415,7 +5416,7 @@ func TestValidateBindingRuleBindName(t *testing.T) { // upsertTestToken creates a token for testing purposes func upsertTestTokenInEntMeta(codec rpc.ClientCodec, initialManagementToken string, datacenter string, - tokenModificationFn func(token *structs.ACLToken), entMeta *structs.EnterpriseMeta) (*structs.ACLToken, error) { + tokenModificationFn func(token *structs.ACLToken), entMeta *acl.EnterpriseMeta) (*structs.ACLToken, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } @@ -5455,7 +5456,7 @@ func upsertTestToken(codec rpc.ClientCodec, initialManagementToken string, datac tokenModificationFn, structs.DefaultEnterpriseMetaInDefaultPartition()) } -func upsertTestTokenWithPolicyRulesInEntMeta(codec rpc.ClientCodec, initialManagementToken string, datacenter string, rules string, entMeta *structs.EnterpriseMeta) (*structs.ACLToken, error) { +func upsertTestTokenWithPolicyRulesInEntMeta(codec rpc.ClientCodec, initialManagementToken string, datacenter string, rules string, entMeta *acl.EnterpriseMeta) (*structs.ACLToken, error) { policy, err := upsertTestPolicyWithRulesInEntMeta(codec, initialManagementToken, datacenter, rules, entMeta) if err != nil { return nil, err @@ -5585,7 +5586,7 @@ func upsertTestPolicyWithRules(codec rpc.ClientCodec, initialManagementToken str return upsertTestPolicyWithRulesInEntMeta(codec, initialManagementToken, datacenter, rules, structs.DefaultEnterpriseMetaInDefaultPartition()) } -func upsertTestPolicyWithRulesInEntMeta(codec rpc.ClientCodec, initialManagementToken string, datacenter string, rules string, entMeta *structs.EnterpriseMeta) (*structs.ACLPolicy, error) { +func upsertTestPolicyWithRulesInEntMeta(codec rpc.ClientCodec, initialManagementToken string, datacenter string, rules string, entMeta *acl.EnterpriseMeta) (*structs.ACLPolicy, error) { return upsertTestCustomizedPolicy(codec, initialManagementToken, datacenter, func(policy *structs.ACLPolicy) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() diff --git a/agent/consul/acl_oss.go b/agent/consul/acl_oss.go index 33b11a416..1fe4fbbf8 100644 --- a/agent/consul/acl_oss.go +++ b/agent/consul/acl_oss.go @@ -13,7 +13,7 @@ import ( // EnterpriseACLResolverDelegate stub type EnterpriseACLResolverDelegate interface{} -func (s *Server) replicationEnterpriseMeta() *structs.EnterpriseMeta { +func (s *Server) replicationEnterpriseMeta() *acl.EnterpriseMeta { return structs.ReplicationEnterpriseMeta() } @@ -48,4 +48,4 @@ func (_ *ACLResolver) resolveLocallyManagedEnterpriseToken(_ string) (structs.AC return nil, nil, false } -func setEnterpriseConf(entMeta *structs.EnterpriseMeta, conf *acl.Config) {} +func setEnterpriseConf(entMeta *acl.EnterpriseMeta, conf *acl.Config) {} diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 54dc3a6b5..49036ae4f 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" diff --git a/agent/consul/authmethod/authmethods.go b/agent/consul/authmethod/authmethods.go index 3f701aa8b..fbcd27e01 100644 --- a/agent/consul/authmethod/authmethods.go +++ b/agent/consul/authmethod/authmethods.go @@ -6,9 +6,11 @@ import ( "sort" "sync" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-hclog" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" ) type Cache interface { @@ -62,7 +64,7 @@ type Identity struct { // in a bind name within a binding rule. ProjectedVars map[string]string - *structs.EnterpriseMeta + *acl.EnterpriseMeta } // ProjectedVarNames returns just the keyspace of the ProjectedVars map. diff --git a/agent/consul/authmethod/kubeauth/k8s_oss.go b/agent/consul/authmethod/kubeauth/k8s_oss.go index b3d74361e..a023c24e7 100644 --- a/agent/consul/authmethod/kubeauth/k8s_oss.go +++ b/agent/consul/authmethod/kubeauth/k8s_oss.go @@ -3,7 +3,10 @@ package kubeauth -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) type enterpriseConfig struct{} @@ -11,6 +14,6 @@ func enterpriseValidation(method *structs.ACLAuthMethod, config *Config) error { return nil } -func (v *Validator) k8sEntMetaFromFields(fields map[string]string) *structs.EnterpriseMeta { +func (v *Validator) k8sEntMetaFromFields(fields map[string]string) *acl.EnterpriseMeta { return nil } diff --git a/agent/consul/authmethod/ssoauth/sso_oss.go b/agent/consul/authmethod/ssoauth/sso_oss.go index 2f6bbe12a..495ce482b 100644 --- a/agent/consul/authmethod/ssoauth/sso_oss.go +++ b/agent/consul/authmethod/ssoauth/sso_oss.go @@ -6,7 +6,7 @@ package ssoauth import ( "fmt" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/go-sso/oidcauth" ) @@ -17,7 +17,7 @@ func validateType(typ string) error { return nil } -func (v *Validator) ssoEntMetaFromClaims(_ *oidcauth.Claims) *structs.EnterpriseMeta { +func (v *Validator) ssoEntMetaFromClaims(_ *oidcauth.Claims) *acl.EnterpriseMeta { return nil } diff --git a/agent/consul/authmethod/testauth/testing.go b/agent/consul/authmethod/testauth/testing.go index 11e6fd7fc..5ad0f1e49 100644 --- a/agent/consul/authmethod/testauth/testing.go +++ b/agent/consul/authmethod/testauth/testing.go @@ -5,11 +5,12 @@ import ( "fmt" "sync" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" ) func init() { diff --git a/agent/consul/authmethod/testauth/testing_oss.go b/agent/consul/authmethod/testauth/testing_oss.go index a3c9b4382..d03a6ad87 100644 --- a/agent/consul/authmethod/testauth/testing_oss.go +++ b/agent/consul/authmethod/testauth/testing_oss.go @@ -3,12 +3,10 @@ package testauth -import ( - "github.com/hashicorp/consul/agent/structs" -) +import "github.com/hashicorp/consul/acl" type enterpriseConfig struct{} -func (v *Validator) testAuthEntMetaFromFields(fields map[string]string) *structs.EnterpriseMeta { +func (v *Validator) testAuthEntMetaFromFields(fields map[string]string) *acl.EnterpriseMeta { return nil } diff --git a/agent/consul/auto_config_backend.go b/agent/consul/auto_config_backend.go index aef6ad7ba..e77093d5e 100644 --- a/agent/consul/auto_config_backend.go +++ b/agent/consul/auto_config_backend.go @@ -6,6 +6,7 @@ import ( "net" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" @@ -40,7 +41,7 @@ func (b autoConfigBackend) DatacenterJoinAddresses(partition, segment string) ([ if segment != "" { return nil, fmt.Errorf("Failed to retrieve members for segment %s: %w", segment, err) } - return nil, fmt.Errorf("Failed to retrieve members for partition %s: %w", structs.PartitionOrDefault(partition), err) + return nil, fmt.Errorf("Failed to retrieve members for partition %s: %w", acl.PartitionOrDefault(partition), err) } var joinAddrs []string diff --git a/agent/consul/auto_config_endpoint.go b/agent/consul/auto_config_endpoint.go index 5ca15f33b..088c9a3e0 100644 --- a/agent/consul/auto_config_endpoint.go +++ b/agent/consul/auto_config_endpoint.go @@ -30,7 +30,7 @@ type AutoConfigOptions struct { } func (opts AutoConfigOptions) PartitionOrDefault() string { - return structs.PartitionOrDefault(opts.Partition) + return acl.PartitionOrDefault(opts.Partition) } type AutoConfigAuthorizer interface { @@ -99,7 +99,7 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig return AutoConfigOptions{}, err } - if id.Agent != req.Node || !structs.EqualPartitions(id.Partition, req.Partition) { + if id.Agent != req.Node || !acl.EqualPartitions(id.Partition, req.Partition) { return AutoConfigOptions{}, fmt.Errorf("Spiffe ID agent name (%s) of the certificate signing request is not for the correct node (%s)", printNodeName(id.Agent, id.Partition), @@ -392,7 +392,7 @@ func parseAutoConfigCSR(csr string) (*x509.CertificateRequest, *connect.SpiffeID } func printNodeName(nodeName, partition string) string { - if structs.IsDefaultPartition(partition) { + if acl.IsDefaultPartition(partition) { return nodeName } return partition + "/" + nodeName diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index cf4b024f0..d6d303c2b 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -9,11 +9,12 @@ import ( "github.com/hashicorp/go-uuid" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/client.go b/agent/consul/client.go index 4709c3108..6a15acb94 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/serf/serf" "golang.org/x/time/rate" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/structs" @@ -192,7 +193,7 @@ func (c *Client) Leave() error { // JoinLAN is used to have Consul join the inner-DC pool The target address // should be another node inside the DC listening on the Serf LAN address -func (c *Client) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { +func (c *Client) JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (int, error) { // Partitions definitely have to match. if c.config.AgentEnterpriseMeta().PartitionOrDefault() != entMeta.PartitionOrDefault() { return 0, fmt.Errorf("target partition %q must match client agent partition %q", @@ -240,7 +241,7 @@ func (c *Client) LANMembers(filter LANMemberFilter) ([]serf.Member, error) { } // RemoveFailedNode is used to remove a failed node from the cluster. -func (c *Client) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (c *Client) RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error { // Partitions definitely have to match. if c.config.AgentEnterpriseMeta().PartitionOrDefault() != entMeta.PartitionOrDefault() { return fmt.Errorf("client agent in partition %q cannot remove node in different partition %q", @@ -418,7 +419,7 @@ func (c *Client) ReloadConfig(config ReloadableConfig) error { return nil } -func (c *Client) AgentEnterpriseMeta() *structs.EnterpriseMeta { +func (c *Client) AgentEnterpriseMeta() *acl.EnterpriseMeta { return c.config.AgentEnterpriseMeta() } diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index e5c0b7f6d..3c60818e4 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" hashstructure_v2 "github.com/mitchellh/hashstructure/v2" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" diff --git a/agent/consul/config_oss.go b/agent/consul/config_oss.go index 63d3cb2a3..bae469eaf 100644 --- a/agent/consul/config_oss.go +++ b/agent/consul/config_oss.go @@ -3,8 +3,11 @@ package consul -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) -func (c *Config) AgentEnterpriseMeta() *structs.EnterpriseMeta { +func (c *Config) AgentEnterpriseMeta() *acl.EnterpriseMeta { return structs.NodeEnterpriseMetaInDefaultPartition() } diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 394721411..f69960f5f 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -9,10 +9,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" ca "github.com/hashicorp/consul/agent/connect/ca" diff --git a/agent/consul/coordinate_endpoint_test.go b/agent/consul/coordinate_endpoint_test.go index 25cb41a5f..471a92623 100644 --- a/agent/consul/coordinate_endpoint_test.go +++ b/agent/consul/coordinate_endpoint_test.go @@ -9,11 +9,12 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index e875ec25d..1f9a82f14 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" diff --git a/agent/consul/discoverychain/compile_oss.go b/agent/consul/discoverychain/compile_oss.go index 4aa43eb27..c0aa1118e 100644 --- a/agent/consul/discoverychain/compile_oss.go +++ b/agent/consul/discoverychain/compile_oss.go @@ -3,8 +3,11 @@ package discoverychain -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) -func (c *compiler) GetEnterpriseMeta() *structs.EnterpriseMeta { +func (c *compiler) GetEnterpriseMeta() *acl.EnterpriseMeta { return structs.DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/consul/enterprise_server_oss.go b/agent/consul/enterprise_server_oss.go index cad141c11..187d59e97 100644 --- a/agent/consul/enterprise_server_oss.go +++ b/agent/consul/enterprise_server_oss.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/serf/serf" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/structs" ) @@ -56,7 +57,7 @@ func (s *Server) revokeEnterpriseLeadership() error { return nil } -func (s *Server) validateEnterpriseRequest(entMeta *structs.EnterpriseMeta, write bool) error { +func (s *Server) validateEnterpriseRequest(entMeta *acl.EnterpriseMeta, write bool) error { return nil } @@ -109,6 +110,6 @@ func (s *Server) shutdownSerfLAN() { } } -func addEnterpriseSerfTags(_ map[string]string, _ *structs.EnterpriseMeta) { +func addEnterpriseSerfTags(_ map[string]string, _ *acl.EnterpriseMeta) { // do nothing } diff --git a/agent/consul/federation_state_endpoint_test.go b/agent/consul/federation_state_endpoint_test.go index 8cd880826..299622447 100644 --- a/agent/consul/federation_state_endpoint_test.go +++ b/agent/consul/federation_state_endpoint_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" uuid "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 6d14d6f2d..c75bbc197 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -6,10 +6,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-raftchunking" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index d84fbb21a..4193f7fee 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" diff --git a/agent/consul/helper_test.go b/agent/consul/helper_test.go index f5c37f14d..807bb8be2 100644 --- a/agent/consul/helper_test.go +++ b/agent/consul/helper_test.go @@ -6,12 +6,14 @@ import ( "net" "testing" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -116,9 +118,9 @@ func joinAddrWAN(s *Server) string { } type clientOrServer interface { - JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) + JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (int, error) LANMembersInAgentPartition() []serf.Member - AgentEnterpriseMeta() *structs.EnterpriseMeta + AgentEnterpriseMeta() *acl.EnterpriseMeta agentSegmentName() string } diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 89a5f219a..fc6db87db 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -101,7 +101,7 @@ func (s *Intention) Apply(args *structs.IntentionRequest, reply *string) error { } // Get the ACL token for the request for the checks below. - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta authz, err := s.srv.ACLResolver.ResolveTokenAndDefaultMeta(args.Token, &entMeta, nil) if err != nil { return err @@ -162,7 +162,7 @@ func (s *Intention) Apply(args *structs.IntentionRequest, reply *string) error { func (s *Intention) computeApplyChangesLegacyCreate( accessorID string, authz acl.Authorizer, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, args *structs.IntentionRequest, ) (*structs.IntentionMutation, error) { // This variant is just for legacy UUID-based intentions. @@ -232,7 +232,7 @@ func (s *Intention) computeApplyChangesLegacyCreate( func (s *Intention) computeApplyChangesLegacyUpdate( accessorID string, authz acl.Authorizer, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, args *structs.IntentionRequest, ) (*structs.IntentionMutation, error) { // This variant is just for legacy UUID-based intentions. @@ -292,7 +292,7 @@ func (s *Intention) computeApplyChangesLegacyUpdate( func (s *Intention) computeApplyChangesUpsert( accessorID string, authz acl.Authorizer, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, args *structs.IntentionRequest, ) (*structs.IntentionMutation, error) { // This variant is just for config-entry based intentions. @@ -355,7 +355,7 @@ func (s *Intention) computeApplyChangesUpsert( func (s *Intention) computeApplyChangesLegacyDelete( accessorID string, authz acl.Authorizer, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, args *structs.IntentionRequest, ) (*structs.IntentionMutation, error) { _, _, ixn, err := s.srv.fsm.State().IntentionGet(nil, args.Intention.ID) @@ -380,7 +380,7 @@ func (s *Intention) computeApplyChangesLegacyDelete( func (s *Intention) computeApplyChangesDelete( accessorID string, authz acl.Authorizer, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, args *structs.IntentionRequest, ) (*structs.IntentionMutation, error) { args.Intention.FillPartitionAndNamespace(entMeta, true) @@ -425,7 +425,7 @@ func (s *Intention) Get(args *structs.IntentionQueryRequest, reply *structs.Inde } // Get the ACL token for the request for the checks below. - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta authz, err := s.srv.ResolveTokenAndDefaultMeta(args.Token, &entMeta, nil) if err != nil { return err @@ -574,7 +574,7 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In } // Get the ACL token for the request for the checks below. - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta authz, err := s.srv.ResolveTokenAndDefaultMeta(args.Token, &entMeta, nil) if err != nil { return err @@ -695,7 +695,7 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In } // Get the ACL token for the request for the checks below. - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta authz, err := s.srv.ResolveTokenAndDefaultMeta(args.Token, &entMeta, nil) if err != nil { return err diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index e1a35bf62..1fc0db35e 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil" diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index 9c2f2c75d..d78f20046 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -362,7 +362,7 @@ func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply } // Get the ACL token for the request for the checks below. - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta var authzContext acl.AuthorizerContext authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &entMeta, &authzContext) diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index e639c003f..7f5e59a0a 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -8,10 +8,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/kvs_endpoint_test.go b/agent/consul/kvs_endpoint_test.go index 10c94e702..1289ac655 100644 --- a/agent/consul/kvs_endpoint_test.go +++ b/agent/consul/kvs_endpoint_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/leader.go b/agent/consul/leader.go index b6291ee56..f40faed42 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -883,7 +883,7 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error { // reconcileReaped is used to reconcile nodes that have failed and been reaped // from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered. // We generate a "reap" event to cause the node to be cleaned up. -func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.EnterpriseMeta) error { if nodeEntMeta == nil { nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() } @@ -1016,7 +1016,7 @@ func (s *Server) shouldHandleMember(member serf.Member) bool { // handleAliveMember is used to ensure the node // is registered, with a passing health check. -func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { if nodeEntMeta == nil { nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() } @@ -1122,7 +1122,7 @@ AFTER_CHECK: // handleFailedMember is used to mark the node's status // as being critical, along with all checks as unknown. -func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { if nodeEntMeta == nil { nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() } @@ -1184,18 +1184,18 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *structs.Ent // handleLeftMember is used to handle members that gracefully // left. They are deregistered if necessary. -func (s *Server) handleLeftMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) handleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { return s.handleDeregisterMember("left", member, nodeEntMeta) } // handleReapMember is used to handle members that have been // reaped after a prolonged failure. They are deregistered. -func (s *Server) handleReapMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) handleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { return s.handleDeregisterMember("reaped", member, nodeEntMeta) } // handleDeregisterMember is used to deregister a member of a given reason -func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error { +func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { if nodeEntMeta == nil { nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() } diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 899ff494a..88d3c5d42 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -1439,7 +1439,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne return nil, fmt.Errorf("SPIFFE ID in CSR must be a service or agent ID") } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if isService { if !signingID.CanSign(spiffeID) { return nil, fmt.Errorf("SPIFFE ID in CSR from a different trust domain: %s, "+ diff --git a/agent/consul/operator_autopilot_endpoint_test.go b/agent/consul/operator_autopilot_endpoint_test.go index e28f30dab..a0a300c6d 100644 --- a/agent/consul/operator_autopilot_endpoint_test.go +++ b/agent/consul/operator_autopilot_endpoint_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil/retry" diff --git a/agent/consul/operator_raft_endpoint_test.go b/agent/consul/operator_raft_endpoint_test.go index e9e055e68..be60ec66a 100644 --- a/agent/consul/operator_raft_endpoint_test.go +++ b/agent/consul/operator_raft_endpoint_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/raft" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/freeport" diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index 8c67eb5d7..5a0aef305 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -10,13 +10,14 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" tokenStore "github.com/hashicorp/consul/agent/token" diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index d8bb45241..0e236eed5 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -995,7 +995,7 @@ func TestRPC_LocalTokenStrippedOnForward(t *testing.T) { // Wait for it to replicate retry.Run(t, func(r *retry.R) { - _, p, err := s2.fsm.State().ACLPolicyGetByID(nil, kvPolicy.ID, &structs.EnterpriseMeta{}) + _, p, err := s2.fsm.State().ACLPolicyGetByID(nil, kvPolicy.ID, &acl.EnterpriseMeta{}) require.Nil(r, err) require.NotNil(r, p) }) @@ -1128,7 +1128,7 @@ func TestRPC_LocalTokenStrippedOnForward_GRPC(t *testing.T) { // Wait for it to replicate retry.Run(t, func(r *retry.R) { - _, p, err := s2.fsm.State().ACLPolicyGetByID(nil, policy.ID, &structs.EnterpriseMeta{}) + _, p, err := s2.fsm.State().ACLPolicyGetByID(nil, policy.ID, &acl.EnterpriseMeta{}) require.Nil(r, err) require.NotNil(r, p) }) diff --git a/agent/consul/serf_filter.go b/agent/consul/serf_filter.go index 4ea2811d2..ebd6db759 100644 --- a/agent/consul/serf_filter.go +++ b/agent/consul/serf_filter.go @@ -3,7 +3,7 @@ package consul import ( "fmt" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) type LANMemberFilter struct { @@ -16,12 +16,12 @@ func (f LANMemberFilter) Validate() error { if f.AllSegments && f.Segment != "" { return fmt.Errorf("cannot specify both allSegments and segment filters") } - if (f.AllSegments || f.Segment != "") && !structs.IsDefaultPartition(f.Partition) { + if (f.AllSegments || f.Segment != "") && !acl.IsDefaultPartition(f.Partition) { return fmt.Errorf("segments do not exist outside of the default partition") } return nil } func (f LANMemberFilter) PartitionOrDefault() string { - return structs.PartitionOrDefault(f.Partition) + return acl.PartitionOrDefault(f.Partition) } diff --git a/agent/consul/server.go b/agent/consul/server.go index 2b40a615e..c48204bb5 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -1230,7 +1230,7 @@ func (s *Server) WANMembers() []serf.Member { } // RemoveFailedNode is used to remove a failed node from the cluster. -func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error { var removeFn func(*serf.Serf, string) error if prune { removeFn = (*serf.Serf).RemoveFailedNodePrune @@ -1250,7 +1250,7 @@ func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *structs.Ente } // RemoveFailedNodeWAN is used to remove a failed node from the WAN cluster. -func (s *Server) RemoveFailedNodeWAN(wanNode string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (s *Server) RemoveFailedNodeWAN(wanNode string, prune bool, entMeta *acl.EnterpriseMeta) error { var removeFn func(*serf.Serf, string) error if prune { removeFn = (*serf.Serf).RemoveFailedNodePrune @@ -1283,7 +1283,7 @@ func (s *Server) KeyManagerWAN() *serf.KeyManager { return s.serfWAN.KeyManager() } -func (s *Server) AgentEnterpriseMeta() *structs.EnterpriseMeta { +func (s *Server) AgentEnterpriseMeta() *acl.EnterpriseMeta { return s.config.AgentEnterpriseMeta() } diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index 0281b4d7a..5ae2fc3ea 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/serf/serf" "google.golang.org/grpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" ) @@ -26,7 +27,7 @@ func (s *Server) enterpriseValidateJoinWAN() error { // JoinLAN is used to have Consul join the inner-DC pool The target address // should be another node inside the DC listening on the Serf LAN address -func (s *Server) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { +func (s *Server) JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (int, error) { return s.serfLAN.Join(addrs, true) } @@ -36,7 +37,7 @@ func (s *Server) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, func (s *Server) removeFailedNode( removeFn func(*serf.Serf, string) error, node, wanNode string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) error { maybeRemove := func(s *serf.Serf, node string) (bool, error) { if !isSerfMember(s, node) { @@ -155,6 +156,6 @@ func (s *Server) addEnterpriseStats(stats map[string]map[string]string) { // no-op } -func getSerfMemberEnterpriseMeta(member serf.Member) *structs.EnterpriseMeta { +func getSerfMemberEnterpriseMeta(member serf.Member) *acl.EnterpriseMeta { return structs.NodeEnterpriseMetaInDefaultPartition() } diff --git a/agent/consul/server_overview.go b/agent/consul/server_overview.go index b75ffed5d..1c42f3483 100644 --- a/agent/consul/server_overview.go +++ b/agent/consul/server_overview.go @@ -7,10 +7,12 @@ import ( "sync" "time" + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/usagemetrics" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-hclog" ) type OverviewManager struct { @@ -69,7 +71,7 @@ func getCatalogOverview(catalog *structs.CatalogContents) *structs.CatalogSummar serviceInstanceChecks := make(map[string][]*structs.HealthCheck) checkSummaries := make(map[string]structs.HealthSummary) - entMetaIDString := func(id string, entMeta structs.EnterpriseMeta) string { + entMetaIDString := func(id string, entMeta acl.EnterpriseMeta) string { return fmt.Sprintf("%s/%s/%s", id, entMeta.PartitionOrEmpty(), entMeta.NamespaceOrEmpty()) } diff --git a/agent/consul/session_endpoint_test.go b/agent/consul/session_endpoint_test.go index cbfcdc43f..277d326f3 100644 --- a/agent/consul/session_endpoint_test.go +++ b/agent/consul/session_endpoint_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/stringslice" diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 7052d5db1..1b2d1ff0c 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -7,6 +7,7 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -82,7 +83,7 @@ func (s *Server) resetSessionTimer(session *structs.Session) error { return nil } -func (s *Server) createSessionTimer(id string, ttl time.Duration, entMeta *structs.EnterpriseMeta) { +func (s *Server) createSessionTimer(id string, ttl time.Duration, entMeta *acl.EnterpriseMeta) { // Reset the session timer // Adjust the given TTL by the TTL multiplier. This is done // to give a client a grace period and to compensate for network @@ -95,7 +96,7 @@ func (s *Server) createSessionTimer(id string, ttl time.Duration, entMeta *struc // invalidateSession is invoked when a session TTL is reached and we // need to invalidate the session. -func (s *Server) invalidateSession(id string, entMeta *structs.EnterpriseMeta) { +func (s *Server) invalidateSession(id string, entMeta *acl.EnterpriseMeta) { defer metrics.MeasureSince([]string{"session_ttl", "invalidate"}, time.Now()) // Clear the session timer diff --git a/agent/consul/snapshot_endpoint_test.go b/agent/consul/snapshot_endpoint_test.go index 03e25f847..29f60618b 100644 --- a/agent/consul/snapshot_endpoint_test.go +++ b/agent/consul/snapshot_endpoint_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" autopilot "github.com/hashicorp/raft-autopilot" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/state/acl.go b/agent/consul/state/acl.go index 877037fe2..61fa3337f 100644 --- a/agent/consul/state/acl.go +++ b/agent/consul/state/acl.go @@ -6,6 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" pbacl "github.com/hashicorp/consul/proto/pbacl" ) @@ -571,17 +572,17 @@ func aclTokenSetTxn(tx WriteTxn, idx uint64, token *structs.ACLToken, opts ACLTo } // ACLTokenGetBySecret is used to look up an existing ACL token by its SecretID. -func (s *Store) ACLTokenGetBySecret(ws memdb.WatchSet, secret string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLToken, error) { +func (s *Store) ACLTokenGetBySecret(ws memdb.WatchSet, secret string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLToken, error) { return s.aclTokenGet(ws, secret, "id", entMeta) } // ACLTokenGetByAccessor is used to look up an existing ACL token by its AccessorID. -func (s *Store) ACLTokenGetByAccessor(ws memdb.WatchSet, accessor string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLToken, error) { +func (s *Store) ACLTokenGetByAccessor(ws memdb.WatchSet, accessor string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLToken, error) { return s.aclTokenGet(ws, accessor, indexAccessor, entMeta) } // aclTokenGet looks up a token using one of the indexes provided -func (s *Store) aclTokenGet(ws memdb.WatchSet, value, index string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLToken, error) { +func (s *Store) aclTokenGet(ws memdb.WatchSet, value, index string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLToken, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -616,7 +617,7 @@ func (s *Store) ACLTokenBatchGet(ws memdb.WatchSet, accessors []string) (uint64, return idx, tokens, nil } -func aclTokenGetTxn(tx ReadTxn, ws memdb.WatchSet, value, index string, entMeta *structs.EnterpriseMeta) (*structs.ACLToken, error) { +func aclTokenGetTxn(tx ReadTxn, ws memdb.WatchSet, value, index string, entMeta *acl.EnterpriseMeta) (*structs.ACLToken, error) { watchCh, rawToken, err := aclTokenGetFromIndex(tx, value, index, entMeta) if err != nil { return nil, fmt.Errorf("failed acl token lookup: %v", err) @@ -640,7 +641,7 @@ func aclTokenGetTxn(tx ReadTxn, ws memdb.WatchSet, value, index string, entMeta } // ACLTokenList return a list of ACL Tokens that match the policy, role, and method. -func (s *Store) ACLTokenList(ws memdb.WatchSet, local, global bool, policy, role, methodName string, methodMeta, entMeta *structs.EnterpriseMeta) (uint64, structs.ACLTokens, error) { +func (s *Store) ACLTokenList(ws memdb.WatchSet, local, global bool, policy, role, methodName string, methodMeta, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLTokens, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -799,7 +800,7 @@ func (s *Store) expiresIndexName(local bool) string { // ACLTokenDeleteByAccessor is used to remove an existing ACL from the state store. If // the ACL does not exist this is a no-op and no error is returned. -func (s *Store) ACLTokenDeleteByAccessor(idx uint64, accessor string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLTokenDeleteByAccessor(idx uint64, accessor string, entMeta *acl.EnterpriseMeta) error { return s.aclTokenDelete(idx, accessor, indexAccessor, entMeta) } @@ -816,7 +817,7 @@ func (s *Store) ACLTokenBatchDelete(idx uint64, tokenIDs []string) error { return tx.Commit() } -func (s *Store) aclTokenDelete(idx uint64, value, index string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) aclTokenDelete(idx uint64, value, index string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -827,7 +828,7 @@ func (s *Store) aclTokenDelete(idx uint64, value, index string, entMeta *structs return tx.Commit() } -func aclTokenDeleteTxn(tx WriteTxn, idx uint64, value, index string, entMeta *structs.EnterpriseMeta) error { +func aclTokenDeleteTxn(tx WriteTxn, idx uint64, value, index string, entMeta *acl.EnterpriseMeta) error { // Look up the existing token _, token, err := aclTokenGetFromIndex(tx, value, index, entMeta) if err != nil { @@ -845,7 +846,7 @@ func aclTokenDeleteTxn(tx WriteTxn, idx uint64, value, index string, entMeta *st return aclTokenDeleteWithToken(tx, token.(*structs.ACLToken), idx) } -func aclTokenDeleteAllForAuthMethodTxn(tx WriteTxn, idx uint64, methodName string, methodGlobalLocality bool, methodMeta *structs.EnterpriseMeta) error { +func aclTokenDeleteAllForAuthMethodTxn(tx WriteTxn, idx uint64, methodName string, methodGlobalLocality bool, methodMeta *acl.EnterpriseMeta) error { // collect all the tokens linked with the given auth method. iter, err := aclTokenListByAuthMethod(tx, methodName, methodMeta, methodMeta.WithWildcardNamespace()) if err != nil { @@ -966,15 +967,15 @@ func aclPolicySetTxn(tx WriteTxn, idx uint64, policy *structs.ACLPolicy) error { return aclPolicyInsert(tx, policy) } -func (s *Store) ACLPolicyGetByID(ws memdb.WatchSet, id string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { +func (s *Store) ACLPolicyGetByID(ws memdb.WatchSet, id string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { return s.aclPolicyGet(ws, id, aclPolicyGetByID, entMeta) } -func (s *Store) ACLPolicyGetByName(ws memdb.WatchSet, name string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { +func (s *Store) ACLPolicyGetByName(ws memdb.WatchSet, name string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { return s.aclPolicyGet(ws, name, aclPolicyGetByName, entMeta) } -func aclPolicyGetByName(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclPolicyGetByName(tx ReadTxn, name string, entMeta *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { // todo: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1006,9 +1007,9 @@ func (s *Store) ACLPolicyBatchGet(ws memdb.WatchSet, ids []string) (uint64, stru return idx, policies, nil } -type aclPolicyGetFn func(ReadTxn, string, *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) +type aclPolicyGetFn func(ReadTxn, string, *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) -func getPolicyWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclPolicyGetFn, entMeta *structs.EnterpriseMeta) (*structs.ACLPolicy, error) { +func getPolicyWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclPolicyGetFn, entMeta *acl.EnterpriseMeta) (*structs.ACLPolicy, error) { watchCh, policy, err := fn(tx, value, entMeta) if err != nil { return nil, fmt.Errorf("failed acl policy lookup: %v", err) @@ -1022,7 +1023,7 @@ func getPolicyWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclPolicyG return policy.(*structs.ACLPolicy), nil } -func (s *Store) aclPolicyGet(ws memdb.WatchSet, value string, fn aclPolicyGetFn, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { +func (s *Store) aclPolicyGet(ws memdb.WatchSet, value string, fn aclPolicyGetFn, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1036,7 +1037,7 @@ func (s *Store) aclPolicyGet(ws memdb.WatchSet, value string, fn aclPolicyGetFn, return idx, policy, nil } -func (s *Store) ACLPolicyList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ACLPolicies, error) { +func (s *Store) ACLPolicyList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLPolicies, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1057,11 +1058,11 @@ func (s *Store) ACLPolicyList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta return idx, result, nil } -func (s *Store) ACLPolicyDeleteByID(idx uint64, id string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLPolicyDeleteByID(idx uint64, id string, entMeta *acl.EnterpriseMeta) error { return s.aclPolicyDelete(idx, id, aclPolicyGetByID, entMeta) } -func (s *Store) ACLPolicyDeleteByName(idx uint64, name string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLPolicyDeleteByName(idx uint64, name string, entMeta *acl.EnterpriseMeta) error { return s.aclPolicyDelete(idx, name, aclPolicyGetByName, entMeta) } @@ -1077,7 +1078,7 @@ func (s *Store) ACLPolicyBatchDelete(idx uint64, policyIDs []string) error { return tx.Commit() } -func (s *Store) aclPolicyDelete(idx uint64, value string, fn aclPolicyGetFn, entMeta *structs.EnterpriseMeta) error { +func (s *Store) aclPolicyDelete(idx uint64, value string, fn aclPolicyGetFn, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1088,7 +1089,7 @@ func (s *Store) aclPolicyDelete(idx uint64, value string, fn aclPolicyGetFn, ent return tx.Commit() } -func aclPolicyDeleteTxn(tx WriteTxn, idx uint64, value string, fn aclPolicyGetFn, entMeta *structs.EnterpriseMeta) error { +func aclPolicyDeleteTxn(tx WriteTxn, idx uint64, value string, fn aclPolicyGetFn, entMeta *acl.EnterpriseMeta) error { // Look up the existing token _, rawPolicy, err := fn(tx, value, entMeta) if err != nil { @@ -1197,17 +1198,17 @@ func aclRoleSetTxn(tx WriteTxn, idx uint64, role *structs.ACLRole, allowMissing return aclRoleInsert(tx, role) } -type aclRoleGetFn func(ReadTxn, string, *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) +type aclRoleGetFn func(ReadTxn, string, *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) -func (s *Store) ACLRoleGetByID(ws memdb.WatchSet, id string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLRole, error) { +func (s *Store) ACLRoleGetByID(ws memdb.WatchSet, id string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLRole, error) { return s.aclRoleGet(ws, id, aclRoleGetByID, entMeta) } -func (s *Store) ACLRoleGetByName(ws memdb.WatchSet, name string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLRole, error) { +func (s *Store) ACLRoleGetByName(ws memdb.WatchSet, name string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLRole, error) { return s.aclRoleGet(ws, name, aclRoleGetByName, entMeta) } -func aclRoleGetByName(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclRoleGetByName(tx ReadTxn, name string, entMeta *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1237,7 +1238,7 @@ func (s *Store) ACLRoleBatchGet(ws memdb.WatchSet, ids []string) (uint64, struct return idx, roles, nil } -func getRoleWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclRoleGetFn, entMeta *structs.EnterpriseMeta) (*structs.ACLRole, error) { +func getRoleWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclRoleGetFn, entMeta *acl.EnterpriseMeta) (*structs.ACLRole, error) { watchCh, rawRole, err := fn(tx, value, entMeta) if err != nil { return nil, fmt.Errorf("failed acl role lookup: %v", err) @@ -1256,7 +1257,7 @@ func getRoleWithTxn(tx ReadTxn, ws memdb.WatchSet, value string, fn aclRoleGetFn return nil, nil } -func (s *Store) aclRoleGet(ws memdb.WatchSet, value string, fn aclRoleGetFn, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLRole, error) { +func (s *Store) aclRoleGet(ws memdb.WatchSet, value string, fn aclRoleGetFn, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLRole, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1270,7 +1271,7 @@ func (s *Store) aclRoleGet(ws memdb.WatchSet, value string, fn aclRoleGetFn, ent return idx, role, nil } -func (s *Store) ACLRoleList(ws memdb.WatchSet, policy string, entMeta *structs.EnterpriseMeta) (uint64, structs.ACLRoles, error) { +func (s *Store) ACLRoleList(ws memdb.WatchSet, policy string, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLRoles, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1310,11 +1311,11 @@ func (s *Store) ACLRoleList(ws memdb.WatchSet, policy string, entMeta *structs.E return idx, result, nil } -func (s *Store) ACLRoleDeleteByID(idx uint64, id string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLRoleDeleteByID(idx uint64, id string, entMeta *acl.EnterpriseMeta) error { return s.aclRoleDelete(idx, id, aclRoleGetByID, entMeta) } -func (s *Store) ACLRoleDeleteByName(idx uint64, name string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLRoleDeleteByName(idx uint64, name string, entMeta *acl.EnterpriseMeta) error { return s.aclRoleDelete(idx, name, aclRoleGetByName, entMeta) } @@ -1330,7 +1331,7 @@ func (s *Store) ACLRoleBatchDelete(idx uint64, roleIDs []string) error { return tx.Commit() } -func (s *Store) aclRoleDelete(idx uint64, value string, fn aclRoleGetFn, entMeta *structs.EnterpriseMeta) error { +func (s *Store) aclRoleDelete(idx uint64, value string, fn aclRoleGetFn, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1341,7 +1342,7 @@ func (s *Store) aclRoleDelete(idx uint64, value string, fn aclRoleGetFn, entMeta return tx.Commit() } -func aclRoleDeleteTxn(tx WriteTxn, idx uint64, value string, fn aclRoleGetFn, entMeta *structs.EnterpriseMeta) error { +func aclRoleDeleteTxn(tx WriteTxn, idx uint64, value string, fn aclRoleGetFn, entMeta *acl.EnterpriseMeta) error { // Look up the existing role _, rawRole, err := fn(tx, value, entMeta) if err != nil { @@ -1417,11 +1418,11 @@ func aclBindingRuleSetTxn(tx WriteTxn, idx uint64, rule *structs.ACLBindingRule) return aclBindingRuleInsert(tx, rule) } -func (s *Store) ACLBindingRuleGetByID(ws memdb.WatchSet, id string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLBindingRule, error) { +func (s *Store) ACLBindingRuleGetByID(ws memdb.WatchSet, id string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLBindingRule, error) { return s.aclBindingRuleGet(ws, id, entMeta) } -func (s *Store) aclBindingRuleGet(ws memdb.WatchSet, value string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLBindingRule, error) { +func (s *Store) aclBindingRuleGet(ws memdb.WatchSet, value string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLBindingRule, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1441,7 +1442,7 @@ func (s *Store) aclBindingRuleGet(ws memdb.WatchSet, value string, entMeta *stru return idx, rule, nil } -func (s *Store) ACLBindingRuleList(ws memdb.WatchSet, methodName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ACLBindingRules, error) { +func (s *Store) ACLBindingRuleList(ws memdb.WatchSet, methodName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLBindingRules, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1471,7 +1472,7 @@ func (s *Store) ACLBindingRuleList(ws memdb.WatchSet, methodName string, entMeta return idx, result, nil } -func (s *Store) ACLBindingRuleDeleteByID(idx uint64, id string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLBindingRuleDeleteByID(idx uint64, id string, entMeta *acl.EnterpriseMeta) error { return s.aclBindingRuleDelete(idx, id, entMeta) } @@ -1485,7 +1486,7 @@ func (s *Store) ACLBindingRuleBatchDelete(idx uint64, bindingRuleIDs []string) e return tx.Commit() } -func (s *Store) aclBindingRuleDelete(idx uint64, id string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) aclBindingRuleDelete(idx uint64, id string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1496,7 +1497,7 @@ func (s *Store) aclBindingRuleDelete(idx uint64, id string, entMeta *structs.Ent return tx.Commit() } -func aclBindingRuleDeleteTxn(tx WriteTxn, idx uint64, id string, entMeta *structs.EnterpriseMeta) error { +func aclBindingRuleDeleteTxn(tx WriteTxn, idx uint64, id string, entMeta *acl.EnterpriseMeta) error { // Look up the existing binding rule _, rawRule, err := aclBindingRuleGetByID(tx, id, entMeta) if err != nil { @@ -1515,7 +1516,7 @@ func aclBindingRuleDeleteTxn(tx WriteTxn, idx uint64, id string, entMeta *struct return nil } -func aclBindingRuleDeleteAllForAuthMethodTxn(tx WriteTxn, idx uint64, methodName string, entMeta *structs.EnterpriseMeta) error { +func aclBindingRuleDeleteAllForAuthMethodTxn(tx WriteTxn, idx uint64, methodName string, entMeta *acl.EnterpriseMeta) error { // collect them all iter, err := aclBindingRuleListByAuthMethod(tx, methodName, entMeta) if err != nil { @@ -1596,11 +1597,11 @@ func aclAuthMethodSetTxn(tx WriteTxn, idx uint64, method *structs.ACLAuthMethod) return aclAuthMethodInsert(tx, method) } -func (s *Store) ACLAuthMethodGetByName(ws memdb.WatchSet, name string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLAuthMethod, error) { +func (s *Store) ACLAuthMethodGetByName(ws memdb.WatchSet, name string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLAuthMethod, error) { return s.aclAuthMethodGet(ws, name, entMeta) } -func (s *Store) aclAuthMethodGet(ws memdb.WatchSet, name string, entMeta *structs.EnterpriseMeta) (uint64, *structs.ACLAuthMethod, error) { +func (s *Store) aclAuthMethodGet(ws memdb.WatchSet, name string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLAuthMethod, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1614,7 +1615,7 @@ func (s *Store) aclAuthMethodGet(ws memdb.WatchSet, name string, entMeta *struct return idx, method, nil } -func getAuthMethodWithTxn(tx ReadTxn, ws memdb.WatchSet, name string, entMeta *structs.EnterpriseMeta) (*structs.ACLAuthMethod, error) { +func getAuthMethodWithTxn(tx ReadTxn, ws memdb.WatchSet, name string, entMeta *acl.EnterpriseMeta) (*structs.ACLAuthMethod, error) { watchCh, rawMethod, err := aclAuthMethodGetByName(tx, name, entMeta) if err != nil { return nil, fmt.Errorf("failed acl auth method lookup: %v", err) @@ -1628,7 +1629,7 @@ func getAuthMethodWithTxn(tx ReadTxn, ws memdb.WatchSet, name string, entMeta *s return nil, nil } -func (s *Store) ACLAuthMethodList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ACLAuthMethods, error) { +func (s *Store) ACLAuthMethodList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLAuthMethods, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1650,11 +1651,11 @@ func (s *Store) ACLAuthMethodList(ws memdb.WatchSet, entMeta *structs.Enterprise return idx, result, nil } -func (s *Store) ACLAuthMethodDeleteByName(idx uint64, name string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLAuthMethodDeleteByName(idx uint64, name string, entMeta *acl.EnterpriseMeta) error { return s.aclAuthMethodDelete(idx, name, entMeta) } -func (s *Store) ACLAuthMethodBatchDelete(idx uint64, names []string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) ACLAuthMethodBatchDelete(idx uint64, names []string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1669,7 +1670,7 @@ func (s *Store) ACLAuthMethodBatchDelete(idx uint64, names []string, entMeta *st return tx.Commit() } -func (s *Store) aclAuthMethodDelete(idx uint64, name string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) aclAuthMethodDelete(idx uint64, name string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1680,7 +1681,7 @@ func (s *Store) aclAuthMethodDelete(idx uint64, name string, entMeta *structs.En return tx.Commit() } -func aclAuthMethodDeleteTxn(tx WriteTxn, idx uint64, name string, entMeta *structs.EnterpriseMeta) error { +func aclAuthMethodDeleteTxn(tx WriteTxn, idx uint64, name string, entMeta *acl.EnterpriseMeta) error { // Look up the existing method _, rawMethod, err := aclAuthMethodGetByName(tx, name, entMeta) if err != nil { @@ -1704,7 +1705,7 @@ func aclAuthMethodDeleteTxn(tx WriteTxn, idx uint64, name string, entMeta *struc return aclAuthMethodDeleteWithMethod(tx, method, idx) } -func aclTokenList(tx ReadTxn, entMeta *structs.EnterpriseMeta, locality bool) (memdb.ResultIterator, error) { +func aclTokenList(tx ReadTxn, entMeta *acl.EnterpriseMeta, locality bool) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() diff --git a/agent/consul/state/acl_oss.go b/agent/consul/state/acl_oss.go index d7bed1d80..67a272c24 100644 --- a/agent/consul/state/acl_oss.go +++ b/agent/consul/state/acl_oss.go @@ -9,17 +9,18 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func updateTableIndexEntries(tx WriteTxn, tableName string, modifyIndex uint64, _ *structs.EnterpriseMeta) error { +func updateTableIndexEntries(tx WriteTxn, tableName string, modifyIndex uint64, _ *acl.EnterpriseMeta) error { if err := indexUpdateMaxTxn(tx, modifyIndex, tableName); err != nil { return fmt.Errorf("failed updating %s index: %v", tableName, err) } return nil } -func aclPolicyGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclPolicyGetByID(tx ReadTxn, id string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableACLPolicies, indexID, id) } @@ -36,7 +37,7 @@ func aclPolicyDeleteWithPolicy(tx WriteTxn, policy *structs.ACLPolicy, idx uint6 return nil } -func aclPolicyMaxIndex(tx ReadTxn, _ *structs.ACLPolicy, _ *structs.EnterpriseMeta) uint64 { +func aclPolicyMaxIndex(tx ReadTxn, _ *structs.ACLPolicy, _ *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableACLPolicies) } @@ -52,23 +53,23 @@ func (s *Store) ACLPolicyUpsertValidateEnterprise(*structs.ACLPolicy, *structs.A ///// ACL Token Functions ///// /////////////////////////////////////////////////////////////////////////////// -func aclTokenGetFromIndex(tx ReadTxn, id string, index string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclTokenGetFromIndex(tx ReadTxn, id string, index string, entMeta *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableACLTokens, index, id) } -func aclTokenListAll(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclTokenListAll(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLTokens, indexID) } -func aclTokenListByPolicy(tx ReadTxn, policy string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclTokenListByPolicy(tx ReadTxn, policy string, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLTokens, indexPolicies, Query{Value: policy}) } -func aclTokenListByRole(tx ReadTxn, role string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclTokenListByRole(tx ReadTxn, role string, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLTokens, indexRoles, Query{Value: role}) } -func aclTokenListByAuthMethod(tx ReadTxn, authMethod string, _, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclTokenListByAuthMethod(tx ReadTxn, authMethod string, _, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLTokens, indexAuthMethod, AuthMethodQuery{Value: authMethod}) } @@ -85,7 +86,7 @@ func aclTokenDeleteWithToken(tx WriteTxn, token *structs.ACLToken, idx uint64) e return nil } -func aclTokenMaxIndex(tx ReadTxn, _ *structs.ACLToken, entMeta *structs.EnterpriseMeta) uint64 { +func aclTokenMaxIndex(tx ReadTxn, _ *structs.ACLToken, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableACLTokens) } @@ -101,7 +102,7 @@ func (s *Store) ACLTokenUpsertValidateEnterprise(token *structs.ACLToken, existi ///// ACL Role Functions ///// /////////////////////////////////////////////////////////////////////////////// -func aclRoleGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclRoleGetByID(tx ReadTxn, id string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableACLRoles, indexID, id) } @@ -118,7 +119,7 @@ func aclRoleDeleteWithRole(tx WriteTxn, role *structs.ACLRole, idx uint64) error return nil } -func aclRoleMaxIndex(tx ReadTxn, _ *structs.ACLRole, _ *structs.EnterpriseMeta) uint64 { +func aclRoleMaxIndex(tx ReadTxn, _ *structs.ACLRole, _ *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableACLRoles) } @@ -134,15 +135,15 @@ func (s *Store) ACLRoleUpsertValidateEnterprise(role *structs.ACLRole, existing ///// ACL Binding Rule Functions ///// /////////////////////////////////////////////////////////////////////////////// -func aclBindingRuleGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclBindingRuleGetByID(tx ReadTxn, id string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableACLBindingRules, indexID, id) } -func aclBindingRuleList(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclBindingRuleList(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLBindingRules, indexID) } -func aclBindingRuleListByAuthMethod(tx ReadTxn, method string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclBindingRuleListByAuthMethod(tx ReadTxn, method string, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLBindingRules, indexAuthMethod, Query{Value: method}) } @@ -159,7 +160,7 @@ func aclBindingRuleDeleteWithRule(tx WriteTxn, rule *structs.ACLBindingRule, idx return nil } -func aclBindingRuleMaxIndex(tx ReadTxn, _ *structs.ACLBindingRule, entMeta *structs.EnterpriseMeta) uint64 { +func aclBindingRuleMaxIndex(tx ReadTxn, _ *structs.ACLBindingRule, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableACLBindingRules) } @@ -175,11 +176,11 @@ func (s *Store) ACLBindingRuleUpsertValidateEnterprise(rule *structs.ACLBindingR ///// ACL Auth Method Functions ///// /////////////////////////////////////////////////////////////////////////////// -func aclAuthMethodGetByName(tx ReadTxn, method string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func aclAuthMethodGetByName(tx ReadTxn, method string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableACLAuthMethods, indexID, Query{Value: method}) } -func aclAuthMethodList(tx ReadTxn, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func aclAuthMethodList(tx ReadTxn, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableACLAuthMethods, indexID) } @@ -196,7 +197,7 @@ func aclAuthMethodDeleteWithMethod(tx WriteTxn, method *structs.ACLAuthMethod, i return nil } -func aclAuthMethodMaxIndex(tx ReadTxn, _ *structs.ACLAuthMethod, entMeta *structs.EnterpriseMeta) uint64 { +func aclAuthMethodMaxIndex(tx ReadTxn, _ *structs.ACLAuthMethod, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableACLAuthMethods) } diff --git a/agent/consul/state/acl_oss_test.go b/agent/consul/state/acl_oss_test.go index 22b3c3b94..f86afc1a3 100644 --- a/agent/consul/state/acl_oss_test.go +++ b/agent/consul/state/acl_oss_test.go @@ -3,7 +3,10 @@ package state -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) func testIndexerTableACLPolicies() map[string]indexerTestCase { obj := &structs.ACLPolicy{ @@ -177,7 +180,7 @@ func testIndexerTableACLBindingRules() map[string]indexerTestCase { func testIndexerTableACLAuthMethods() map[string]indexerTestCase { obj := &structs.ACLAuthMethod{ Name: "ThEAuthMethod", - EnterpriseMeta: structs.EnterpriseMeta{}, + EnterpriseMeta: acl.EnterpriseMeta{}, } encodedName := []byte{0x74, 0x68, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x0} return map[string]indexerTestCase{ diff --git a/agent/consul/state/acl_schema.go b/agent/consul/state/acl_schema.go index 1e8f415f4..f2b77dcbf 100644 --- a/agent/consul/state/acl_schema.go +++ b/agent/consul/state/acl_schema.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -227,9 +228,9 @@ func indexFromUUIDQuery(raw interface{}) ([]byte, error) { func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) { switch v := arg.(type) { - case *structs.EnterpriseMeta: + case *acl.EnterpriseMeta: return nil, nil - case structs.EnterpriseMeta: + case acl.EnterpriseMeta: return nil, nil case Query: return variableLengthUUIDStringToBytes(v.Value) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index b882931d6..db256cfe1 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -57,7 +57,7 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) { // Services is used to pull the full list of services for a given node for use // during snapshots. -func (s *Snapshot) Services(node string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -70,7 +70,7 @@ func (s *Snapshot) Services(node string, entMeta *structs.EnterpriseMeta) (memdb // Checks is used to pull the full list of checks for a given node for use // during snapshots. -func (s *Snapshot) Checks(node string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -136,7 +136,7 @@ func (s *Store) ensureCheckIfNodeMatches( nodePartition string, check *structs.HealthCheck, ) error { - if !strings.EqualFold(check.Node, node) || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) { + if !strings.EqualFold(check.Node, node) || !acl.EqualPartitions(nodePartition, check.PartitionOrDefault()) { return fmt.Errorf("check node %q does not match node %q", printNodeName(check.Node, check.PartitionOrDefault()), printNodeName(node, nodePartition), @@ -149,7 +149,7 @@ func (s *Store) ensureCheckIfNodeMatches( } func printNodeName(nodeName, partition string) string { - if structs.IsDefaultPartition(partition) { + if acl.IsDefaultPartition(partition) { return nodeName } return partition + "/" + nodeName @@ -396,7 +396,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod } // GetNode is used to retrieve a node registration by node name ID. -func (s *Store) GetNode(nodeNameOrID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -416,7 +416,7 @@ func (s *Store) GetNode(nodeNameOrID string, entMeta *structs.EnterpriseMeta) (u return idx, node, nil } -func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *structs.EnterpriseMeta) (*structs.Node, error) { +func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { node, err := tx.First(tableNodes, indexID, Query{ Value: nodeNameOrID, EnterpriseMeta: *entMeta, @@ -430,7 +430,7 @@ func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *structs.EnterpriseMeta return nil, nil } -func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *structs.EnterpriseMeta) (*structs.Node, error) { +func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { node, err := tx.First(tableNodes, indexUUID+"_prefix", Query{ Value: string(id), EnterpriseMeta: *entMeta, @@ -445,7 +445,7 @@ func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *structs.EnterpriseMeta) } // GetNodeID is used to retrieve a node registration by node ID. -func (s *Store) GetNodeID(id types.NodeID, entMeta *structs.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -463,7 +463,7 @@ func (s *Store) GetNodeID(id types.NodeID, entMeta *structs.EnterpriseMeta) (uin } // Nodes is used to return all of the known nodes. -func (s *Store) Nodes(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -491,7 +491,7 @@ func (s *Store) Nodes(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint6 } // NodesByMeta is used to return all nodes with the given metadata key/value pairs. -func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -537,7 +537,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet } // DeleteNode is used to delete a given node by its ID. -func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -557,7 +557,7 @@ func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *structs.Enterpr // deleteNodeCASTxn is used to try doing a node delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *structs.EnterpriseMeta) (bool, error) { +func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *acl.EnterpriseMeta) (bool, error) { // Look up the node. node, err := getNodeTxn(tx, nodeName, entMeta) if err != nil { @@ -584,7 +584,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, // deleteNodeTxn is the inner method used for removing a node from // the store within a given transaction. -func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -978,7 +978,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, } // Services returns all services along with a list of associated tags. -func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1018,14 +1018,14 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui return idx, results, nil } -func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { tx := s.db.Txn(false) defer tx.Abort() return serviceListTxn(tx, ws, entMeta) } -func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { idx := catalogServicesMaxIndex(tx, entMeta) services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) @@ -1049,7 +1049,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMe } // ServicesByNodeMeta returns all services, filtered by the given node metadata. -func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1144,7 +1144,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, // * return when the last instance of a service is removed // * block until an instance for this service is available, or another // service is unregistered. -func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *structs.EnterpriseMeta) uint64 { +func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) uint64 { idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta) return idx } @@ -1163,7 +1163,7 @@ func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bo // returned for the chan. This allows for blocking watchers to _only_ watch this // one chan in the common case, falling back to watching all touched MemDB // indexes in more complicated cases. -func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *structs.EnterpriseMeta) (uint64, <-chan struct{}) { +func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) (uint64, <-chan struct{}) { if !serviceExists { res, err := catalogServiceLastExtinctionIndex(tx, entMeta) if missingIdx, ok := res.(*IndexEntry); ok && err == nil { @@ -1207,7 +1207,7 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn, // ConnectServiceNodes returns the nodes associated with a Connect // compatible destination for the given service name. This will include // both proxies and native integrations. -func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1220,7 +1220,7 @@ func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMe } // ServiceNodes returns the nodes associated with a given service name. -func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1294,7 +1294,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // ServiceTagNodes returns the nodes associated with a given service, filtering // out services that don't contain the given tags. -func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1364,7 +1364,7 @@ func serviceTagsFilter(sn *structs.ServiceNode, tags []string) bool { // ServiceAddressNodes returns the nodes associated with a given service, filtering // out services that don't match the given serviceAddress -func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1401,7 +1401,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta * // parseServiceNodes iterates over a services query and fills in the node details, // returning a ServiceNodes slice. -func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *structs.EnterpriseMeta) (structs.ServiceNodes, error) { +func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta) (structs.ServiceNodes, error) { // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) @@ -1446,7 +1446,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo // NodeService is used to retrieve a specific service associated with the given // node. -func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeService, error) { +func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeService, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1462,7 +1462,7 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs. return idx, service, nil } -func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) { +func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (*structs.NodeService, error) { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1485,7 +1485,7 @@ func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs. return nil, nil } -func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { +func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1555,7 +1555,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeServices, error) { +func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServices, error) { done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, false) if done || err != nil { return idx, nil, err @@ -1579,7 +1579,7 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeServiceList, error) { +func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServiceList, error) { done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, true) if done || err != nil { return idx, nil, err @@ -1606,7 +1606,7 @@ func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta } // DeleteService is used to delete a given service associated with a node. -func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -1621,7 +1621,7 @@ func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *s // deleteServiceCASTxn is used to try doing a service delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given service, then the call is a noop, otherwise a normal delete is invoked. -func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (bool, error) { +func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (bool, error) { // Look up the service. service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta) if err != nil { @@ -1648,7 +1648,7 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser // deleteServiceTxn is the inner method called to remove a service // registration within an existing transaction. -func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1751,7 +1751,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st // freeServiceVirtualIP is used to free a virtual IP for a service after the last instance // is removed. -func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.ServiceName, entMeta *structs.EnterpriseMeta) error { +func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.ServiceName, entMeta *acl.EnterpriseMeta) error { supported, err := virtualIPsSupported(tx, nil) if err != nil { return err @@ -1818,7 +1818,7 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error { } // updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node -func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *structs.EnterpriseMeta) error { +func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *acl.EnterpriseMeta) error { services, err := tx.Get(tableServices, indexNode, Query{ Value: nodeID, EnterpriseMeta: *entMeta.WithWildcardNamespace(), @@ -1977,7 +1977,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc // NodeCheck is used to retrieve a specific check associated with the given // node. -func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1986,7 +1986,7 @@ func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *struc // nodeCheckTxn is used as the inner method to handle reading a health check // from the state store. -func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { // Get the table index. idx := catalogChecksMaxIndex(tx, entMeta) @@ -2009,7 +2009,7 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta // NodeChecks is used to retrieve checks associated with the // given node from the state store. -func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2037,7 +2037,7 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs. // ServiceChecks is used to get all checks associated with a // given service ID. The query is performed against a service // _name_ instead of a service ID. -func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2065,7 +2065,7 @@ func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *st // given service ID, filtered by the given node metadata values. The query // is performed against a service _name_ instead of a service ID. func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, - filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { + filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2088,7 +2088,7 @@ func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, // ChecksInState is used to query the state store for all checks // which are in the provided state. -func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2106,7 +2106,7 @@ func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *structs. // ChecksInStateByNodeMeta is used to query the state store for all checks // which are in the provided state, filtered by the given node metadata values. -func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2118,7 +2118,7 @@ func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta) } -func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *structs.EnterpriseMeta) (uint64, memdb.ResultIterator, error) { +func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, memdb.ResultIterator, error) { // Get the table index. idx := catalogChecksMaxIndex(tx, entMeta) @@ -2147,7 +2147,7 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *stru // repetitive code for returning health checks filtered by node metadata fields. func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, idx uint64, iter memdb.ResultIterator, filters map[string]string, - entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { + entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. @@ -2183,7 +2183,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, } // DeleteCheck is used to delete a health check registration. -func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error { +func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -2198,7 +2198,7 @@ func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entM // deleteCheckCASTxn is used to try doing a check delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (bool, error) { +func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (bool, error) { // Try to retrieve the existing health check. _, hc, err := getNodeCheckTxn(tx, node, checkID, entMeta) if err != nil { @@ -2227,7 +2227,7 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch type NodeServiceQuery struct { Node string Service string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer @@ -2244,7 +2244,7 @@ func (q NodeServiceQuery) PartitionOrDefault() string { // deleteCheckTxn is the inner method used to call a health // check deletion within an existing transaction. -func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error { +func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } @@ -2337,19 +2337,19 @@ func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.Ser } // CheckServiceNodes is used to query all nodes and checks for a given service. -func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { return s.checkServiceNodes(ws, serviceName, false, entMeta) } // CheckConnectServiceNodes is used to query all nodes and checks for Connect // compatible endpoints for a given service. -func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { return s.checkServiceNodes(ws, serviceName, true, entMeta) } // CheckIngressServiceNodes is used to query all nodes and checks for ingress // endpoints for a given service. -func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2385,14 +2385,14 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, return maxIdx, results, nil } -func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() return checkServiceNodesTxn(tx, ws, serviceName, connect, entMeta) } -func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { index := indexService if connect { index = indexConnect @@ -2526,7 +2526,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // CheckServiceTagNodes is used to query all nodes and checks for a given // service, filtering out services that don't contain the given tag. -func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2559,7 +2559,7 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags } // GatewayServices is used to query all services associated with a gateway -func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *structs.EnterpriseMeta) (uint64, structs.GatewayServices, error) { +func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2631,7 +2631,7 @@ func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKi func parseCheckServiceNodes( tx ReadTxn, ws memdb.WatchSet, idx uint64, services structs.ServiceNodes, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, err error) (uint64, structs.CheckServiceNodes, error) { if err != nil { return 0, nil, err @@ -2722,7 +2722,7 @@ func parseCheckServiceNodes( // NodeInfo is used to generate a dump of a single node. The dump includes // all services and checks which are registered against the node. -func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2748,7 +2748,7 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.Enterp // NodeDump is used to generate a dump of all nodes. This call is expensive // as it has to query every node, service, and check. The response can also // be quite large since there is currently no filtering applied. -func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2764,7 +2764,7 @@ func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui return parseNodes(tx, ws, idx, nodes, entMeta) } -func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2775,7 +2775,7 @@ func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind } } -func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { // Get the table index idx := catalogMaxIndexWatch(tx, ws, entMeta, true) @@ -2793,7 +2793,7 @@ func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.Enterpris return parseCheckServiceNodes(tx, nil, idx, results, entMeta, err) } -func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { // unlike when we are dumping all services here we only need to watch the kind specific index entry for changing (or nodes, checks) // updating any services, nodes or checks will bump the appropriate service kind index so there is no need to watch any of the individual // entries @@ -2821,7 +2821,7 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, // containing the nodes along with all of their associated services // and/or health checks. func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, - iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { + iter memdb.ResultIterator, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -2900,7 +2900,7 @@ func checkSessionsTxn(tx ReadTxn, hc *structs.HealthCheck) ([]*sessionCheck, err } // updateGatewayServices associates services with gateways as specified in a gateway config entry -func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, entMeta *structs.EnterpriseMeta) error { +func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, entMeta *acl.EnterpriseMeta) error { var ( noChange bool gatewayServices structs.GatewayServices @@ -2970,7 +2970,7 @@ func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, en return nil } -func getTermGatewayVirtualIPs(tx WriteTxn, services []structs.LinkedService, entMeta *structs.EnterpriseMeta) (map[string]structs.ServiceAddress, error) { +func getTermGatewayVirtualIPs(tx WriteTxn, services []structs.LinkedService, entMeta *acl.EnterpriseMeta) (map[string]structs.ServiceAddress, error) { addrs := make(map[string]structs.ServiceAddress, len(services)) for _, s := range services { sn := structs.ServiceName{Name: s.Name, EnterpriseMeta: *entMeta} @@ -2985,7 +2985,7 @@ func getTermGatewayVirtualIPs(tx WriteTxn, services []structs.LinkedService, ent return addrs, nil } -func updateTerminatingGatewayVirtualIPs(tx WriteTxn, idx uint64, conf *structs.TerminatingGatewayConfigEntry, entMeta *structs.EnterpriseMeta) error { +func updateTerminatingGatewayVirtualIPs(tx WriteTxn, idx uint64, conf *structs.TerminatingGatewayConfigEntry, entMeta *acl.EnterpriseMeta) error { // Build the current map of services with virtual IPs for this gateway services := conf.Services addrs, err := getTermGatewayVirtualIPs(tx, services, entMeta) @@ -3067,7 +3067,7 @@ func ingressConfigGatewayServices( tx ReadTxn, gateway structs.ServiceName, conf structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (bool, structs.GatewayServices, error) { entry, ok := conf.(*structs.IngressGatewayConfigEntry) if !ok { @@ -3112,7 +3112,7 @@ func terminatingConfigGatewayServices( tx ReadTxn, gateway structs.ServiceName, conf structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (bool, structs.GatewayServices, error) { entry, ok := conf.(*structs.TerminatingGatewayConfigEntry) if !ok { @@ -3149,7 +3149,7 @@ func terminatingConfigGatewayServices( } // updateGatewayNamespace is used to target all services within a namespace -func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewayService, entMeta *structs.EnterpriseMeta) error { +func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewayService, entMeta *acl.EnterpriseMeta) error { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } @@ -3343,7 +3343,7 @@ func (s *Store) collectGatewayServices(tx ReadTxn, ws memdb.WatchSet, iter memdb // TODO(ingress): How to handle index rolling back when a config entry is // deleted that references a service? // We might need something like the service_last_extinction index? -func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { // Look up gateway name associated with the service gws, err := tx.Get(tableGatewayServices, indexService, structs.NewServiceName(service, entMeta)) if err != nil { @@ -3448,7 +3448,7 @@ func (s *Store) ServiceTopology( dc, service string, kind structs.ServiceKind, defaultAllow acl.EnforcementDecision, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceTopology, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -3883,7 +3883,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS oldUpstreams := make(map[structs.ServiceName]bool) if e, ok := existing.(*structs.ServiceNode); ok { for _, u := range e.ServiceProxy.Upstreams { - upstreamMeta := structs.NewEnterpriseMetaWithPartition(e.PartitionOrDefault(), u.DestinationNamespace) + upstreamMeta := acl.NewEnterpriseMetaWithPartition(e.PartitionOrDefault(), u.DestinationNamespace) sn := structs.NewServiceName(u.DestinationName, &upstreamMeta) oldUpstreams[sn] = true @@ -3899,7 +3899,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS } // TODO (freddy): Account for upstream datacenter - upstreamMeta := structs.NewEnterpriseMetaWithPartition(svc.PartitionOrDefault(), u.DestinationNamespace) + upstreamMeta := acl.NewEnterpriseMetaWithPartition(svc.PartitionOrDefault(), u.DestinationNamespace) upstream := structs.NewServiceName(u.DestinationName, &upstreamMeta) obj, err := tx.First(tableMeshTopology, indexID, upstream, downstream) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index eaca440a8..91e1bf361 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -16,7 +16,7 @@ import ( // a specific service. type EventSubjectService struct { Key string - EnterpriseMeta structs.EnterpriseMeta + EnterpriseMeta acl.EnterpriseMeta overrideKey string overrideNamespace string @@ -128,7 +128,7 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc { type nodeServiceTuple struct { Node string ServiceID string - EntMeta structs.EnterpriseMeta + EntMeta acl.EnterpriseMeta } func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple { @@ -553,7 +553,7 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod // given node. This mirrors some of the the logic in the oddly-named // parseCheckServiceNodes but is more efficient since we know they are all on // the same node. -func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *structs.EnterpriseMeta) ([]stream.Event, error) { +func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta) ([]stream.Event, error) { services, err := tx.Get(tableServices, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, @@ -580,7 +580,7 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta // getNodeAndNodeChecks returns a the node structure and a function that returns // the full list of checks for a specific service on that node. -func getNodeAndChecks(tx ReadTxn, node string, entMeta *structs.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) { +func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) { // Fetch the node nodeRaw, err := tx.First(tableNodes, indexID, Query{ Value: node, diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go index f2902ca71..8a30d4589 100644 --- a/agent/consul/state/catalog_oss.go +++ b/agent/consul/state/catalog_oss.go @@ -9,20 +9,21 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) func withEnterpriseSchema(_ *memdb.DBSchema) {} -func serviceIndexName(name string, _ *structs.EnterpriseMeta) string { +func serviceIndexName(name string, _ *acl.EnterpriseMeta) string { return fmt.Sprintf("service.%s", name) } -func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) string { +func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta) string { return "service_kind." + kind.Normalized() } -func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *structs.EnterpriseMeta) error { +func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *acl.EnterpriseMeta) error { // overall nodes index if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil { return fmt.Errorf("failed updating index: %s", err) @@ -31,7 +32,7 @@ func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *structs.Enterpr return nil } -func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *structs.EnterpriseMeta) error { +func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { // overall services index if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil { return fmt.Errorf("failed updating index: %s", err) @@ -40,7 +41,7 @@ func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *structs.Enterprise return nil } -func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx uint64, _ *structs.EnterpriseMeta) error { +func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx uint64, _ *acl.EnterpriseMeta) error { // service-kind index if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil { return fmt.Errorf("failed updating index: %s", err) @@ -49,7 +50,7 @@ func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx return nil } -func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ *structs.EnterpriseMeta) error { +func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ *acl.EnterpriseMeta) error { // per-service index if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil { return fmt.Errorf("failed updating index: %s", err) @@ -58,7 +59,7 @@ func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ return nil } -func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *structs.EnterpriseMeta) error { +func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { if err := tx.Insert(tableIndex, &IndexEntry{indexServiceExtinction, idx}); err != nil { return fmt.Errorf("failed updating missing service extinction index: %s", err) } @@ -109,49 +110,49 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error { return nil } -func catalogNodesMaxIndex(tx ReadTxn, entMeta *structs.EnterpriseMeta) uint64 { +func catalogNodesMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableNodes) } -func catalogServicesMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 { +func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableServices) } -func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { +func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(tableIndex, "id", serviceIndexName(serviceName, nil)) } -func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) uint64 { +func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil)) } -func catalogServiceListNoWildcard(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableServices, indexID) } -func catalogServiceListByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { +func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { return tx.Get(tableServices, indexNode, Query{Value: node}) } -func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *structs.EnterpriseMeta) (interface{}, error) { +func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta) (interface{}, error) { return tx.First(tableIndex, "id", indexServiceExtinction) } -func catalogMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta, checks bool) uint64 { +func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, checks bool) uint64 { if checks { return maxIndexTxn(tx, tableNodes, tableServices, tableChecks) } return maxIndexTxn(tx, tableNodes, tableServices) } -func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *structs.EnterpriseMeta, checks bool) uint64 { +func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, checks bool) uint64 { if checks { return maxIndexWatchTxn(tx, ws, tableNodes, tableServices, tableChecks) } return maxIndexWatchTxn(tx, ws, tableNodes, tableServices) } -func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *structs.EnterpriseMeta) error { +func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { // update the universal index entry if err := tx.Insert(tableIndex, &IndexEntry{tableChecks, idx}); err != nil { return fmt.Errorf("failed updating index: %s", err) @@ -159,7 +160,7 @@ func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *structs.EnterpriseMet return nil } -func catalogChecksMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 { +func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableChecks) } @@ -180,11 +181,11 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error return nil } -func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*structs.EnterpriseMeta, error) { +func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*acl.EnterpriseMeta, error) { return nil, nil } -func (s *Store) ValidateRegisterRequest(_ *structs.RegisterRequest) (*structs.EnterpriseMeta, error) { +func (s *Store) ValidateRegisterRequest(_ *structs.RegisterRequest) (*acl.EnterpriseMeta, error) { return nil, nil } diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 5811416b1..9edaff833 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -6,6 +6,7 @@ package state import ( "net" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/types" ) @@ -33,7 +34,7 @@ func testIndexerTableChecks() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { @@ -193,11 +194,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: (*structs.EnterpriseMeta)(nil), + source: (*acl.EnterpriseMeta)(nil), expected: nil, }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { @@ -220,11 +221,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: (*structs.EnterpriseMeta)(nil), + source: (*acl.EnterpriseMeta)(nil), expected: nil, }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { // partial length @@ -286,11 +287,11 @@ func testIndexerTableServices() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: (*structs.EnterpriseMeta)(nil), + source: (*acl.EnterpriseMeta)(nil), expected: nil, }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index 9d0b447dc..b2d0907dc 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -589,7 +590,7 @@ type upstreamDownstream struct { type NodeCheckQuery struct { Node string CheckID string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer @@ -709,7 +710,7 @@ func kindServiceNameTableSchema() *memdb.TableSchema { type KindServiceNameQuery struct { Kind structs.ServiceKind Name string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index 507388283..1ae05bc11 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -6,6 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/discoverychain" @@ -95,13 +96,13 @@ func (s *Restore) ConfigEntry(c structs.ConfigEntry) error { } // ConfigEntry is called to get a given config entry. -func (s *Store) ConfigEntry(ws memdb.WatchSet, kind, name string, entMeta *structs.EnterpriseMeta) (uint64, structs.ConfigEntry, error) { +func (s *Store) ConfigEntry(ws memdb.WatchSet, kind, name string, entMeta *acl.EnterpriseMeta) (uint64, structs.ConfigEntry, error) { tx := s.db.Txn(false) defer tx.Abort() return configEntryTxn(tx, ws, kind, name, entMeta) } -func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *structs.EnterpriseMeta) (uint64, structs.ConfigEntry, error) { +func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *acl.EnterpriseMeta) (uint64, structs.ConfigEntry, error) { // Get the index idx := maxIndexTxn(tx, tableConfigEntries) @@ -124,19 +125,19 @@ func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *s } // ConfigEntries is called to get all config entry objects. -func (s *Store) ConfigEntries(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { +func (s *Store) ConfigEntries(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { return s.ConfigEntriesByKind(ws, "", entMeta) } // ConfigEntriesByKind is called to get all config entry objects with the given kind. // If kind is empty, all config entries will be returned. -func (s *Store) ConfigEntriesByKind(ws memdb.WatchSet, kind string, entMeta *structs.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { +func (s *Store) ConfigEntriesByKind(ws memdb.WatchSet, kind string, entMeta *acl.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { tx := s.db.Txn(false) defer tx.Abort() return configEntriesByKindTxn(tx, ws, kind, entMeta) } -func configEntriesByKindTxn(tx ReadTxn, ws memdb.WatchSet, kind string, entMeta *structs.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { +func configEntriesByKindTxn(tx ReadTxn, ws memdb.WatchSet, kind string, entMeta *acl.EnterpriseMeta) (uint64, []structs.ConfigEntry, error) { // Get the index and watch for updates idx := maxIndexWatchTxn(tx, ws, tableConfigEntries) @@ -278,7 +279,7 @@ func (s *Store) DeleteConfigEntryCAS(idx, cidx uint64, conf structs.ConfigEntry) return err == nil, err } -func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -290,7 +291,7 @@ func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *struct } // TODO: accept structs.ConfigEntry instead of individual fields -func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error { +func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *acl.EnterpriseMeta) error { q := configentry.NewKindName(kind, name, entMeta) existing, err := tx.First(tableConfigEntries, indexID, q) if err != nil { @@ -423,7 +424,7 @@ var serviceGraphKinds = []string{ } // discoveryChainTargets will return a list of services listed as a target for the input's discovery chain -func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *structs.EnterpriseMeta) (uint64, []structs.ServiceName, error) { +func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *acl.EnterpriseMeta) (uint64, []structs.ServiceName, error) { source := structs.NewServiceName(service, entMeta) req := discoverychain.CompileRequest{ ServiceName: source.Name, @@ -438,7 +439,7 @@ func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, serv var resp []structs.ServiceName for _, t := range chain.Targets { - em := structs.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), t.Namespace) + em := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), t.Namespace) target := structs.NewServiceName(t.Service, &em) // TODO (freddy): Allow upstream DC and encode in response @@ -494,7 +495,7 @@ func (s *Store) discoveryChainSourcesTxn(tx ReadTxn, ws memdb.WatchSet, dc strin } for _, t := range chain.Targets { - em := structs.NewEnterpriseMetaWithPartition(sn.PartitionOrDefault(), t.Namespace) + em := acl.NewEnterpriseMetaWithPartition(sn.PartitionOrDefault(), t.Namespace) candidate := structs.NewServiceName(t.Service, &em) if !candidate.Matches(destination) { @@ -740,7 +741,7 @@ func testCompileDiscoveryChain( tx ReadTxn, chainName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (string, *structs.DiscoveryGraphNode, error) { _, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta) if err != nil { @@ -770,7 +771,7 @@ func testCompileDiscoveryChain( func (s *Store) ServiceDiscoveryChain( ws memdb.WatchSet, serviceName string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest, ) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) { tx := s.db.ReadTxn() @@ -783,7 +784,7 @@ func (s *Store) serviceDiscoveryChainTxn( tx ReadTxn, ws memdb.WatchSet, serviceName string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest, ) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) { @@ -821,7 +822,7 @@ func (s *Store) serviceDiscoveryChainTxn( func (s *Store) ReadResolvedServiceConfigEntries( ws memdb.WatchSet, serviceName string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, upstreamIDs []structs.ServiceID, proxyMode structs.ProxyMode, ) (uint64, *configentry.ResolvedServiceConfigSet, error) { @@ -941,7 +942,7 @@ func (s *Store) ReadResolvedServiceConfigEntries( func (s *Store) ReadDiscoveryChainConfigEntries( ws memdb.WatchSet, serviceName string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *configentry.DiscoveryChainSet, error) { return s.readDiscoveryChainConfigEntries(ws, serviceName, nil, entMeta) } @@ -960,7 +961,7 @@ func (s *Store) readDiscoveryChainConfigEntries( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *configentry.DiscoveryChainSet, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -972,7 +973,7 @@ func readDiscoveryChainConfigEntriesTxn( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *configentry.DiscoveryChainSet, error) { res := configentry.NewDiscoveryChainSet() @@ -1179,7 +1180,7 @@ func getProxyConfigEntryTxn( ws memdb.WatchSet, name string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ProxyConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ProxyDefaults, name, overrides, entMeta) if err != nil { @@ -1204,7 +1205,7 @@ func getServiceConfigEntryTxn( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceDefaults, serviceName, overrides, entMeta) if err != nil { @@ -1229,7 +1230,7 @@ func getRouterConfigEntryTxn( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceRouterConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceRouter, serviceName, overrides, entMeta) if err != nil { @@ -1254,7 +1255,7 @@ func getSplitterConfigEntryTxn( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceSplitterConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceSplitter, serviceName, overrides, entMeta) if err != nil { @@ -1279,7 +1280,7 @@ func getResolverConfigEntryTxn( ws memdb.WatchSet, serviceName string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceResolverConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceResolver, serviceName, overrides, entMeta) if err != nil { @@ -1304,7 +1305,7 @@ func getServiceIntentionsConfigEntryTxn( ws memdb.WatchSet, name string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceIntentionsConfigEntry, error) { idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceIntentions, name, overrides, entMeta) if err != nil { @@ -1326,7 +1327,7 @@ func configEntryWithOverridesTxn( kind string, name string, overrides map[configentry.KindName]structs.ConfigEntry, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, ) (uint64, structs.ConfigEntry, error) { if len(overrides) > 0 { kn := configentry.NewKindName(kind, name, entMeta) @@ -1389,7 +1390,7 @@ func newConfigEntryQuery(c structs.ConfigEntry) configentry.KindName { // ConfigEntryKindQuery is used to lookup config entries by their kind. type ConfigEntryKindQuery struct { Kind string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer diff --git a/agent/consul/state/config_entry_intention.go b/agent/consul/state/config_entry_intention.go index ad0c97694..27c4912e6 100644 --- a/agent/consul/state/config_entry_intention.go +++ b/agent/consul/state/config_entry_intention.go @@ -6,6 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -123,7 +124,7 @@ func (s *ServiceIntentionSourceIndex) FromArgs(args ...interface{}) ([]byte, err return []byte(arg.String() + "\x00"), nil } -func configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { +func configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { // unrolled part of configEntriesByKindTxn idx := maxIndexTxn(tx, tableConfigEntries) @@ -238,7 +239,7 @@ func configIntentionMatchOneTxn( } } -func readSourceIntentionsFromConfigEntriesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, error) { +func readSourceIntentionsFromConfigEntriesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, error) { idx := maxIndexTxn(tx, tableConfigEntries) var ( @@ -262,7 +263,7 @@ func readSourceIntentionsFromConfigEntriesTxn(tx ReadTxn, ws memdb.WatchSet, ser return idx, results, nil } -func readSourceIntentionsFromConfigEntriesForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta, results structs.Intentions) (structs.Intentions, error) { +func readSourceIntentionsFromConfigEntriesForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, results structs.Intentions) (structs.Intentions, error) { sn := structs.NewServiceName(serviceName, entMeta) iter, err := tx.Get(tableConfigEntries, indexSource, sn) @@ -283,7 +284,7 @@ func readSourceIntentionsFromConfigEntriesForServiceTxn(tx ReadTxn, ws memdb.Wat return results, nil } -func readDestinationIntentionsFromConfigEntriesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, error) { +func readDestinationIntentionsFromConfigEntriesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, error) { idx := maxIndexTxn(tx, tableConfigEntries) var results structs.Intentions diff --git a/agent/consul/state/config_entry_intention_oss.go b/agent/consul/state/config_entry_intention_oss.go index d6fafe621..c954c147c 100644 --- a/agent/consul/state/config_entry_intention_oss.go +++ b/agent/consul/state/config_entry_intention_oss.go @@ -4,10 +4,11 @@ package state import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func getIntentionPrecedenceMatchServiceNames(serviceName string, entMeta *structs.EnterpriseMeta) []structs.ServiceName { +func getIntentionPrecedenceMatchServiceNames(serviceName string, entMeta *acl.EnterpriseMeta) []structs.ServiceName { if serviceName == structs.WildcardSpecifier { return []structs.ServiceName{ structs.NewServiceName(structs.WildcardSpecifier, entMeta), diff --git a/agent/consul/state/config_entry_oss.go b/agent/consul/state/config_entry_oss.go index 9c3d6c7ea..66a47eb86 100644 --- a/agent/consul/state/config_entry_oss.go +++ b/agent/consul/state/config_entry_oss.go @@ -9,6 +9,7 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" ) @@ -17,9 +18,9 @@ func indexFromConfigEntryKindName(arg interface{}) ([]byte, error) { var b indexBuilder switch n := arg.(type) { - case *structs.EnterpriseMeta: + case *acl.EnterpriseMeta: return nil, nil - case structs.EnterpriseMeta: + case acl.EnterpriseMeta: return b.Bytes(), nil case ConfigEntryKindQuery: b.String(strings.ToLower(n.Kind)) @@ -37,7 +38,7 @@ func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error { return nil } -func getAllConfigEntriesWithTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func getAllConfigEntriesWithTxn(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableConfigEntries, indexID) } @@ -45,11 +46,11 @@ func getAllConfigEntriesByKindWithTxn(tx ReadTxn, kind string) (memdb.ResultIter return getConfigEntryKindsWithTxn(tx, kind, nil) } -func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(tableConfigEntries, indexID+"_prefix", ConfigEntryKindQuery{Kind: kind}) } -func configIntentionsConvertToList(iter memdb.ResultIterator, _ *structs.EnterpriseMeta) structs.Intentions { +func configIntentionsConvertToList(iter memdb.ResultIterator, _ *acl.EnterpriseMeta) structs.Intentions { var results structs.Intentions for v := iter.Next(); v != nil; v = iter.Next() { entry := v.(*structs.ServiceIntentionsConfigEntry) diff --git a/agent/consul/state/config_entry_oss_test.go b/agent/consul/state/config_entry_oss_test.go index 4c6595192..13a56e18c 100644 --- a/agent/consul/state/config_entry_oss_test.go +++ b/agent/consul/state/config_entry_oss_test.go @@ -4,6 +4,7 @@ package state import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" ) @@ -24,7 +25,7 @@ func testIndexerTableConfigEntries() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { diff --git a/agent/consul/state/coordinate.go b/agent/consul/state/coordinate.go index f294adb7b..0cbccf25c 100644 --- a/agent/consul/state/coordinate.go +++ b/agent/consul/state/coordinate.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" ) @@ -66,7 +67,7 @@ type CoordinateQuery struct { } func (c CoordinateQuery) PartitionOrDefault() string { - return structs.PartitionOrDefault(c.Partition) + return acl.PartitionOrDefault(c.Partition) } // coordinatesTableSchema returns a new table schema used for storing @@ -128,7 +129,7 @@ func (s *Restore) Coordinates(idx uint64, updates structs.Coordinates) error { // Coordinate returns a map of coordinates for the given node, indexed by // network segment. -func (s *Store) Coordinate(ws memdb.WatchSet, node string, entMeta *structs.EnterpriseMeta) (uint64, lib.CoordinateSet, error) { +func (s *Store) Coordinate(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta) (uint64, lib.CoordinateSet, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -157,7 +158,7 @@ func (s *Store) Coordinate(ws memdb.WatchSet, node string, entMeta *structs.Ente } // Coordinates queries for all nodes with coordinates. -func (s *Store) Coordinates(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Coordinates, error) { +func (s *Store) Coordinates(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Coordinates, error) { tx := s.db.Txn(false) defer tx.Abort() diff --git a/agent/consul/state/coordinate_oss.go b/agent/consul/state/coordinate_oss.go index d6b6042d7..8c86b768a 100644 --- a/agent/consul/state/coordinate_oss.go +++ b/agent/consul/state/coordinate_oss.go @@ -6,14 +6,15 @@ package state import ( "fmt" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func coordinatesMaxIndex(tx ReadTxn, entMeta *structs.EnterpriseMeta) uint64 { +func coordinatesMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableCoordinates) } -func updateCoordinatesIndexes(tx WriteTxn, idx uint64, entMeta *structs.EnterpriseMeta) error { +func updateCoordinatesIndexes(tx WriteTxn, idx uint64, entMeta *acl.EnterpriseMeta) error { // Update the index. if err := indexUpdateMaxTxn(tx, idx, tableCoordinates); err != nil { return fmt.Errorf("failed updating index: %s", err) diff --git a/agent/consul/state/coordinate_oss_test.go b/agent/consul/state/coordinate_oss_test.go index 0bb08c1df..d5d15547b 100644 --- a/agent/consul/state/coordinate_oss_test.go +++ b/agent/consul/state/coordinate_oss_test.go @@ -3,7 +3,10 @@ package state -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) func testIndexerTableCoordinates() map[string]indexerTestCase { return map[string]indexerTestCase{ @@ -24,11 +27,11 @@ func testIndexerTableCoordinates() map[string]indexerTestCase { }, prefix: []indexValue{ { - source: (*structs.EnterpriseMeta)(nil), + source: (*acl.EnterpriseMeta)(nil), expected: nil, }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { diff --git a/agent/consul/state/delay_oss.go b/agent/consul/state/delay_oss.go index 41b9a0405..8167d6bfe 100644 --- a/agent/consul/state/delay_oss.go +++ b/agent/consul/state/delay_oss.go @@ -4,9 +4,10 @@ package state import ( - "github.com/hashicorp/consul/agent/structs" "sync" "time" + + "github.com/hashicorp/consul/acl" ) // Delay is used to mark certain locks as unacquirable. When a lock is @@ -36,7 +37,7 @@ func NewDelay() *Delay { // GetExpiration returns the expiration time of a key lock delay. This must be // checked on the leader node, and not in KVSLock due to the variability of // clocks. -func (d *Delay) GetExpiration(key string, entMeta *structs.EnterpriseMeta) time.Time { +func (d *Delay) GetExpiration(key string, entMeta *acl.EnterpriseMeta) time.Time { d.lock.RLock() expires := d.delay[key] d.lock.RUnlock() @@ -45,7 +46,7 @@ func (d *Delay) GetExpiration(key string, entMeta *structs.EnterpriseMeta) time. // SetExpiration sets the expiration time for the lock delay to the given // delay from the given now time. -func (d *Delay) SetExpiration(key string, now time.Time, delay time.Duration, entMeta *structs.EnterpriseMeta) { +func (d *Delay) SetExpiration(key string, now time.Time, delay time.Duration, entMeta *acl.EnterpriseMeta) { d.lock.Lock() defer d.lock.Unlock() diff --git a/agent/consul/state/graveyard.go b/agent/consul/state/graveyard.go index 89601ea21..705846881 100644 --- a/agent/consul/state/graveyard.go +++ b/agent/consul/state/graveyard.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/go-memdb" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) // Tombstone is the internal type used to track tombstones. @@ -13,7 +13,7 @@ type Tombstone struct { Key string Index uint64 - structs.EnterpriseMeta + acl.EnterpriseMeta } func (t Tombstone) IDValue() string { @@ -33,7 +33,7 @@ func NewGraveyard(gc *TombstoneGC) *Graveyard { } // InsertTxn adds a new tombstone. -func (g *Graveyard) InsertTxn(tx WriteTxn, key string, idx uint64, entMeta *structs.EnterpriseMeta) error { +func (g *Graveyard) InsertTxn(tx WriteTxn, key string, idx uint64, entMeta *acl.EnterpriseMeta) error { stone := &Tombstone{ Key: key, Index: idx, diff --git a/agent/consul/state/graveyard_oss.go b/agent/consul/state/graveyard_oss.go index 71b6bd90b..bccbe1ec7 100644 --- a/agent/consul/state/graveyard_oss.go +++ b/agent/consul/state/graveyard_oss.go @@ -6,6 +6,7 @@ package state import ( "fmt" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -28,7 +29,7 @@ func (g *Graveyard) insertTombstoneWithTxn(tx WriteTxn, _ string, stone *Tombsto // GetMaxIndexTxn returns the highest index tombstone whose key matches the // given context, using a prefix match. -func (g *Graveyard) GetMaxIndexTxn(tx ReadTxn, prefix string, _ *structs.EnterpriseMeta) (uint64, error) { +func (g *Graveyard) GetMaxIndexTxn(tx ReadTxn, prefix string, _ *acl.EnterpriseMeta) (uint64, error) { var lindex uint64 q := Query{Value: prefix, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition()} stones, err := tx.Get(tableTombstones, indexID+"_prefix", q) diff --git a/agent/consul/state/indexer.go b/agent/consul/state/indexer.go index 7fa30a7d5..70b769c58 100644 --- a/agent/consul/state/indexer.go +++ b/agent/consul/state/indexer.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -196,7 +197,7 @@ func (b *indexBuilder) Bool(v bool) { type TimeQuery struct { Value time.Time - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index f2f64500f..2417f5741 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -150,7 +150,7 @@ func areIntentionsInConfigEntries(tx ReadTxn, ws memdb.WatchSet) (bool, error) { // LegacyIntentions is like Intentions() but only returns legacy intentions. // This is exposed for migration purposes. -func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, error) { +func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -159,7 +159,7 @@ func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *structs.EnterpriseM } // Intentions returns the list of all intentions. The boolean response value is true if it came from config entries. -func (s *Store) Intentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { +func (s *Store) Intentions(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -173,7 +173,7 @@ func (s *Store) Intentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) ( return configIntentionsListTxn(tx, ws, entMeta) } -func legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { +func legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { // Get the index idx := maxIndexTxn(tx, tableConnectIntentions) if idx < 1 { diff --git a/agent/consul/state/intention_oss.go b/agent/consul/state/intention_oss.go index e6872ab5b..6c99e6749 100644 --- a/agent/consul/state/intention_oss.go +++ b/agent/consul/state/intention_oss.go @@ -6,10 +6,10 @@ package state import ( memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) -func intentionListTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { +func intentionListTxn(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { // Get all intentions return tx.Get(tableConnectIntentions, "id") } diff --git a/agent/consul/state/kvs.go b/agent/consul/state/kvs.go index 34639ace0..82aa842e8 100644 --- a/agent/consul/state/kvs.go +++ b/agent/consul/state/kvs.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -176,7 +177,7 @@ func kvsSetTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry, updateSession b } // KVSGet is used to retrieve a key/value pair from the state store. -func (s *Store) KVSGet(ws memdb.WatchSet, key string, entMeta *structs.EnterpriseMeta) (uint64, *structs.DirEntry, error) { +func (s *Store) KVSGet(ws memdb.WatchSet, key string, entMeta *acl.EnterpriseMeta) (uint64, *structs.DirEntry, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -191,7 +192,7 @@ func (s *Store) KVSGet(ws memdb.WatchSet, key string, entMeta *structs.Enterpris // kvsGetTxn is the inner method that gets a KVS entry inside an existing // transaction. func kvsGetTxn(tx ReadTxn, - ws memdb.WatchSet, key string, entMeta structs.EnterpriseMeta) (uint64, *structs.DirEntry, error) { + ws memdb.WatchSet, key string, entMeta acl.EnterpriseMeta) (uint64, *structs.DirEntry, error) { // Get the table index. idx := kvsMaxIndex(tx, entMeta) @@ -212,7 +213,7 @@ func kvsGetTxn(tx ReadTxn, // is the max index of the returned kvs entries or applicable tombstones, or // else it's the full table indexes for kvs and tombstones. func (s *Store) KVSList(ws memdb.WatchSet, - prefix string, entMeta *structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { + prefix string, entMeta *acl.EnterpriseMeta) (uint64, structs.DirEntries, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -228,7 +229,7 @@ func (s *Store) KVSList(ws memdb.WatchSet, // kvsListTxn is the inner method that gets a list of KVS entries matching a // prefix. func (s *Store) kvsListTxn(tx ReadTxn, - ws memdb.WatchSet, prefix string, entMeta structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { + ws memdb.WatchSet, prefix string, entMeta acl.EnterpriseMeta) (uint64, structs.DirEntries, error) { // Get the table indexes. idx := kvsMaxIndex(tx, entMeta) @@ -262,7 +263,7 @@ func (s *Store) kvsListTxn(tx ReadTxn, // KVSDelete is used to perform a shallow delete on a single key in the // the state store. -func (s *Store) KVSDelete(idx uint64, key string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) KVSDelete(idx uint64, key string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -276,7 +277,7 @@ func (s *Store) KVSDelete(idx uint64, key string, entMeta *structs.EnterpriseMet // kvsDeleteTxn is the inner method used to perform the actual deletion // of a key/value pair within an existing transaction. -func (s *Store) kvsDeleteTxn(tx WriteTxn, idx uint64, key string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) kvsDeleteTxn(tx WriteTxn, idx uint64, key string, entMeta *acl.EnterpriseMeta) error { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -303,7 +304,7 @@ func (s *Store) kvsDeleteTxn(tx WriteTxn, idx uint64, key string, entMeta *struc // raft index. If the CAS index specified is not equal to the last // observed index for the given key, then the call is a noop, otherwise // a normal KV delete is invoked. -func (s *Store) KVSDeleteCAS(idx, cidx uint64, key string, entMeta *structs.EnterpriseMeta) (bool, error) { +func (s *Store) KVSDeleteCAS(idx, cidx uint64, key string, entMeta *acl.EnterpriseMeta) (bool, error) { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -318,7 +319,7 @@ func (s *Store) KVSDeleteCAS(idx, cidx uint64, key string, entMeta *structs.Ente // kvsDeleteCASTxn is the inner method that does a CAS delete within an existing // transaction. -func (s *Store) kvsDeleteCASTxn(tx WriteTxn, idx, cidx uint64, key string, entMeta *structs.EnterpriseMeta) (bool, error) { +func (s *Store) kvsDeleteCASTxn(tx WriteTxn, idx, cidx uint64, key string, entMeta *acl.EnterpriseMeta) (bool, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } @@ -390,7 +391,7 @@ func kvsSetCASTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error // KVSDeleteTree is used to do a recursive delete on a key prefix // in the state store. If any keys are modified, the last index is // set, otherwise this is a no-op. -func (s *Store) KVSDeleteTree(idx uint64, prefix string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) KVSDeleteTree(idx uint64, prefix string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -403,7 +404,7 @@ func (s *Store) KVSDeleteTree(idx uint64, prefix string, entMeta *structs.Enterp // KVSLockDelay returns the expiration time for any lock delay associated with // the given key. -func (s *Store) KVSLockDelay(key string, entMeta *structs.EnterpriseMeta) time.Time { +func (s *Store) KVSLockDelay(key string, entMeta *acl.EnterpriseMeta) time.Time { return s.lockDelay.GetExpiration(key, entMeta) } @@ -527,7 +528,7 @@ func kvsUnlockTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error // kvsCheckSessionTxn checks to see if the given session matches the current // entry for a key. func kvsCheckSessionTxn(tx WriteTxn, - key string, session string, entMeta *structs.EnterpriseMeta) (*structs.DirEntry, error) { + key string, session string, entMeta *acl.EnterpriseMeta) (*structs.DirEntry, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -552,7 +553,7 @@ func kvsCheckSessionTxn(tx WriteTxn, // kvsCheckIndexTxn checks to see if the given modify index matches the current // entry for a key. func kvsCheckIndexTxn(tx WriteTxn, - key string, cidx uint64, entMeta structs.EnterpriseMeta) (*structs.DirEntry, error) { + key string, cidx uint64, entMeta acl.EnterpriseMeta) (*structs.DirEntry, error) { entry, err := tx.First(tableKVs, indexID, Query{Value: key, EnterpriseMeta: entMeta}) if err != nil { diff --git a/agent/consul/state/kvs_oss.go b/agent/consul/state/kvs_oss.go index 598ffc39d..3ded43255 100644 --- a/agent/consul/state/kvs_oss.go +++ b/agent/consul/state/kvs_oss.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -25,7 +26,7 @@ func prefixIndexForIDValue(arg interface{}) ([]byte, error) { // DeletePrefix always uses a string, pass it along unmodified case string: return []byte(v), nil - case structs.EnterpriseMeta: + case acl.EnterpriseMeta: return nil, nil case singleValueID: var b indexBuilder @@ -56,7 +57,7 @@ func insertKVTxn(tx WriteTxn, entry *structs.DirEntry, updateMax bool, _ bool) e return nil } -func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { +func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta acl.EnterpriseMeta) (uint64, structs.DirEntries, error) { var ents structs.DirEntries var lindex uint64 @@ -79,7 +80,7 @@ func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta str // kvsDeleteTreeTxn is the inner method that does a recursive delete inside an // existing transaction. -func (s *Store) kvsDeleteTreeTxn(tx WriteTxn, idx uint64, prefix string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) kvsDeleteTreeTxn(tx WriteTxn, idx uint64, prefix string, entMeta *acl.EnterpriseMeta) error { // For prefix deletes, only insert one tombstone and delete the entire subtree deleted, err := tx.DeletePrefix(tableKVs, indexID+"_prefix", prefix) if err != nil { @@ -100,7 +101,7 @@ func (s *Store) kvsDeleteTreeTxn(tx WriteTxn, idx uint64, prefix string, entMeta return nil } -func kvsMaxIndex(tx ReadTxn, entMeta structs.EnterpriseMeta) uint64 { +func kvsMaxIndex(tx ReadTxn, entMeta acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, "kvs", "tombstones") } diff --git a/agent/consul/state/kvs_oss_test.go b/agent/consul/state/kvs_oss_test.go index 4ec7ac7a3..7cee36893 100644 --- a/agent/consul/state/kvs_oss_test.go +++ b/agent/consul/state/kvs_oss_test.go @@ -3,7 +3,10 @@ package state -import "github.com/hashicorp/consul/agent/structs" +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) func testIndexerTableKVs() map[string]indexerTestCase { return map[string]indexerTestCase{ @@ -22,7 +25,7 @@ func testIndexerTableKVs() map[string]indexerTestCase { expected: []byte("indexString"), }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { @@ -51,7 +54,7 @@ func testIndexerTableTombstones() map[string]indexerTestCase { expected: []byte("indexString"), }, { - source: structs.EnterpriseMeta{}, + source: acl.EnterpriseMeta{}, expected: nil, }, { diff --git a/agent/consul/state/operations_oss.go b/agent/consul/state/operations_oss.go index 7be71732d..c1a3300ad 100644 --- a/agent/consul/state/operations_oss.go +++ b/agent/consul/state/operations_oss.go @@ -6,11 +6,11 @@ package state import ( "github.com/hashicorp/go-memdb" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) func getCompoundWithTxn(tx ReadTxn, table, index string, - _ *structs.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) { + _ *acl.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) { return tx.Get(table, index, idxVals...) } diff --git a/agent/consul/state/query.go b/agent/consul/state/query.go index 7e0838448..b88fbe4fc 100644 --- a/agent/consul/state/query.go +++ b/agent/consul/state/query.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -12,7 +13,7 @@ import ( // enterprise identifier. type Query struct { Value string - structs.EnterpriseMeta + acl.EnterpriseMeta } func (q Query) IDValue() string { @@ -33,7 +34,7 @@ func (q Query) PartitionOrDefault() string { type MultiQuery struct { Value []string - structs.EnterpriseMeta + acl.EnterpriseMeta } func (q MultiQuery) IDValue() []string { @@ -118,7 +119,7 @@ func parseUUIDString(uuid string) ([]byte, error) { // enterprise identifier. type BoolQuery struct { Value bool - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer @@ -138,7 +139,7 @@ func (q BoolQuery) PartitionOrDefault() string { type KeyValueQuery struct { Key string Value string - structs.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer @@ -168,8 +169,8 @@ func indexFromKeyValueQuery(arg interface{}) ([]byte, error) { type AuthMethodQuery struct { Value string - AuthMethodEntMeta structs.EnterpriseMeta - structs.EnterpriseMeta + AuthMethodEntMeta acl.EnterpriseMeta + acl.EnterpriseMeta } // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer diff --git a/agent/consul/state/query_oss.go b/agent/consul/state/query_oss.go index 04fed3a6b..0f11dce5f 100644 --- a/agent/consul/state/query_oss.go +++ b/agent/consul/state/query_oss.go @@ -7,15 +7,15 @@ import ( "fmt" "strings" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) func prefixIndexFromQuery(arg interface{}) ([]byte, error) { var b indexBuilder switch v := arg.(type) { - case *structs.EnterpriseMeta: + case *acl.EnterpriseMeta: return nil, nil - case structs.EnterpriseMeta: + case acl.EnterpriseMeta: return nil, nil case Query: if v.Value == "" { diff --git a/agent/consul/state/schema_oss.go b/agent/consul/state/schema_oss.go index 758d22423..ea8e8a43e 100644 --- a/agent/consul/state/schema_oss.go +++ b/agent/consul/state/schema_oss.go @@ -3,12 +3,12 @@ package state -import "github.com/hashicorp/consul/agent/structs" +import "github.com/hashicorp/consul/acl" func partitionedIndexEntryName(entry string, _ string) string { return entry } -func partitionedAndNamespacedIndexEntryName(entry string, _ *structs.EnterpriseMeta) string { +func partitionedAndNamespacedIndexEntryName(entry string, _ *acl.EnterpriseMeta) string { return entry } diff --git a/agent/consul/state/session.go b/agent/consul/state/session.go index 876e67f50..cf2e78b6e 100644 --- a/agent/consul/state/session.go +++ b/agent/consul/state/session.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -291,7 +292,7 @@ func sessionCreateTxn(tx WriteTxn, idx uint64, sess *structs.Session) error { // SessionGet is used to retrieve an active session from the state store. func (s *Store) SessionGet(ws memdb.WatchSet, - sessionID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.Session, error) { + sessionID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.Session, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -318,7 +319,7 @@ func (s *Store) SessionGet(ws memdb.WatchSet, // NodeSessions returns a set of active sessions associated // with the given node ID. The returned index is the highest // index seen from the result set. -func (s *Store) NodeSessions(ws memdb.WatchSet, nodeID string, entMeta *structs.EnterpriseMeta) (uint64, structs.Sessions, error) { +func (s *Store) NodeSessions(ws memdb.WatchSet, nodeID string, entMeta *acl.EnterpriseMeta) (uint64, structs.Sessions, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -336,7 +337,7 @@ func (s *Store) NodeSessions(ws memdb.WatchSet, nodeID string, entMeta *structs. // SessionDestroy is used to remove an active session. This will // implicitly invalidate the session and invoke the specified // session destroy behavior. -func (s *Store) SessionDestroy(idx uint64, sessionID string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) SessionDestroy(idx uint64, sessionID string, entMeta *acl.EnterpriseMeta) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -350,7 +351,7 @@ func (s *Store) SessionDestroy(idx uint64, sessionID string, entMeta *structs.En // deleteSessionTxn is the inner method, which is used to do the actual // session deletion and handle session invalidation, etc. -func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entMeta *structs.EnterpriseMeta) error { +func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entMeta *acl.EnterpriseMeta) error { // Look up the session. if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() diff --git a/agent/consul/state/session_oss.go b/agent/consul/state/session_oss.go index d313fb5f9..96622387e 100644 --- a/agent/consul/state/session_oss.go +++ b/agent/consul/state/session_oss.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" ) @@ -121,7 +122,7 @@ func allNodeSessionsTxn(tx ReadTxn, node string, _ string) (structs.Sessions, er } func nodeSessionsTxn(tx ReadTxn, - ws memdb.WatchSet, node string, entMeta *structs.EnterpriseMeta) (structs.Sessions, error) { + ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta) (structs.Sessions, error) { sessions, err := tx.Get(tableSessions, indexNode, Query{Value: node}) if err != nil { @@ -136,7 +137,7 @@ func nodeSessionsTxn(tx ReadTxn, return result, nil } -func sessionMaxIndex(tx ReadTxn, entMeta *structs.EnterpriseMeta) uint64 { +func sessionMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, "sessions") } @@ -161,7 +162,7 @@ func validateSessionChecksTxn(tx ReadTxn, session *structs.Session) error { } // SessionList returns a slice containing all of the active sessions. -func (s *Store) SessionList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Sessions, error) { +func (s *Store) SessionList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Sessions, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -184,7 +185,7 @@ func (s *Store) SessionList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) return idx, result, nil } -func maxIndexTxnSessions(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 { +func maxIndexTxnSessions(tx *memdb.Txn, _ *acl.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableSessions) } diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 2689ac142..39a4371ef 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -8,6 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" ) @@ -143,7 +144,7 @@ type sessionCheck struct { Session string CheckID structs.CheckID - structs.EnterpriseMeta + acl.EnterpriseMeta } // NewStateStore creates a new in-memory state storage layer. diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index b617e0e46..0047d2531 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/types" ) @@ -229,7 +230,7 @@ func testRegisterConnectNativeService(t *testing.T, s *Store, idx uint64, nodeID require.NoError(t, s.EnsureService(idx, nodeID, svc)) } -func testSetKey(t *testing.T, s *Store, idx uint64, key, value string, entMeta *structs.EnterpriseMeta) { +func testSetKey(t *testing.T, s *Store, idx uint64, key, value string, entMeta *acl.EnterpriseMeta) { entry := &structs.DirEntry{ Key: key, Value: []byte(value), diff --git a/agent/consul/subscribe_backend.go b/agent/consul/subscribe_backend.go index 8dc2d3cb2..94b8671f4 100644 --- a/agent/consul/subscribe_backend.go +++ b/agent/consul/subscribe_backend.go @@ -18,7 +18,7 @@ type subscribeBackend struct { // the endpoints. func (s subscribeBackend) ResolveTokenAndDefaultMeta( token string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext, ) (acl.Authorizer, error) { return s.srv.ResolveTokenAndDefaultMeta(token, entMeta, authzContext) diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index 4f82c98d6..868ea3b81 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -7,9 +7,10 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go index 5ab34256f..9a25cb0ff 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss_test.go +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/serf/serf" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil" @@ -1028,8 +1029,8 @@ func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { require.NoError(t, s.KVSSet(5, &structs.DirEntry{Key: "b", Value: []byte{1}})) require.NoError(t, s.KVSSet(6, &structs.DirEntry{Key: "c", Value: []byte{1}})) require.NoError(t, s.KVSSet(7, &structs.DirEntry{Key: "d", Value: []byte{1}})) - require.NoError(t, s.KVSDelete(8, "d", &structs.EnterpriseMeta{})) - require.NoError(t, s.KVSDelete(9, "c", &structs.EnterpriseMeta{})) + require.NoError(t, s.KVSDelete(8, "d", &acl.EnterpriseMeta{})) + require.NoError(t, s.KVSDelete(9, "c", &acl.EnterpriseMeta{})) require.NoError(t, s.KVSSet(10, &structs.DirEntry{Key: "e", Value: []byte{1}})) require.NoError(t, s.KVSSet(11, &structs.DirEntry{Key: "f", Value: []byte{1}})) }, diff --git a/agent/delegate_mock_test.go b/agent/delegate_mock_test.go index 36b32f689..5498e5f04 100644 --- a/agent/delegate_mock_test.go +++ b/agent/delegate_mock_test.go @@ -38,16 +38,16 @@ func (m *delegateMock) AgentLocalMember() serf.Member { return m.Called().Get(0).(serf.Member) } -func (m *delegateMock) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (n int, err error) { +func (m *delegateMock) JoinLAN(addrs []string, entMeta *acl.EnterpriseMeta) (n int, err error) { ret := m.Called(addrs, entMeta) return ret.Int(0), ret.Error(1) } -func (m *delegateMock) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { +func (m *delegateMock) RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error { return m.Called(node, prune, entMeta).Error(0) } -func (m *delegateMock) ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { +func (m *delegateMock) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { ret := m.Called(token, entMeta, authzContext) return ret.Get(0).(consul.ACLResolveResult), ret.Error(1) } diff --git a/agent/discovery_chain_endpoint.go b/agent/discovery_chain_endpoint.go index 666841ef3..e9bb63185 100644 --- a/agent/discovery_chain_endpoint.go +++ b/agent/discovery_chain_endpoint.go @@ -7,6 +7,7 @@ import ( "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/acl" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/decode" @@ -28,7 +29,7 @@ func (s *HTTPHandlers) DiscoveryChainRead(resp http.ResponseWriter, req *http.Re } args.EvaluateInDatacenter = req.URL.Query().Get("compile-dc") - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } diff --git a/agent/dns.go b/agent/dns.go index 1deda3ebd..a973056a1 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/miekg/dns" + "github.com/hashicorp/consul/acl" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/config" agentdns "github.com/hashicorp/consul/agent/dns" @@ -103,7 +104,7 @@ type serviceLookup struct { MaxRecursionLevel int Connect bool Ingress bool - structs.EnterpriseMeta + acl.EnterpriseMeta } // DNSServer is used to wrap an Agent and expose various @@ -123,7 +124,7 @@ type DNSServer struct { // the recursor handler is only enabled if recursors are configured. This flag is used during config hot-reloading recursorEnabled uint32 - defaultEnterpriseMeta structs.EnterpriseMeta + defaultEnterpriseMeta acl.EnterpriseMeta } func NewDNSServer(a *Agent) (*DNSServer, error) { @@ -344,7 +345,7 @@ func serviceNodeCanonicalDNSName(sn *structs.ServiceNode, domain string) string return serviceCanonicalDNSName(sn.ServiceName, "service", sn.Datacenter, domain, &sn.EnterpriseMeta) } -func serviceIngressDNSName(service, datacenter, domain string, entMeta *structs.EnterpriseMeta) string { +func serviceIngressDNSName(service, datacenter, domain string, entMeta *acl.EnterpriseMeta) string { return serviceCanonicalDNSName(service, "ingress", datacenter, domain, entMeta) } diff --git a/agent/dns_oss.go b/agent/dns_oss.go index 1328195c8..9476e810f 100644 --- a/agent/dns_oss.go +++ b/agent/dns_oss.go @@ -6,8 +6,8 @@ package agent import ( "fmt" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/structs" ) type enterpriseDNSConfig struct{} @@ -16,7 +16,7 @@ func getEnterpriseDNSConfig(conf *config.RuntimeConfig) enterpriseDNSConfig { return enterpriseDNSConfig{} } -func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConfig, datacenter *string, _ *structs.EnterpriseMeta) bool { +func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConfig, datacenter *string, _ *acl.EnterpriseMeta) bool { switch len(labels) { case 1: *datacenter = labels[0] @@ -27,6 +27,6 @@ func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConf return false } -func serviceCanonicalDNSName(name, kind, datacenter, domain string, _ *structs.EnterpriseMeta) string { +func serviceCanonicalDNSName(name, kind, datacenter, domain string, _ *acl.EnterpriseMeta) string { return fmt.Sprintf("%s.%s.%s.%s", name, kind, datacenter, domain) } diff --git a/agent/grpc/private/services/subscribe/subscribe.go b/agent/grpc/private/services/subscribe/subscribe.go index 18372b200..c1b2f7e2d 100644 --- a/agent/grpc/private/services/subscribe/subscribe.go +++ b/agent/grpc/private/services/subscribe/subscribe.go @@ -36,7 +36,7 @@ type Logger interface { var _ pbsubscribe.StateChangeSubscriptionServer = (*Server)(nil) type Backend interface { - ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) + ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) } @@ -51,7 +51,7 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub logger.Trace("new subscription") defer logger.Trace("subscription closed") - entMeta := structs.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) + entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) authz, err := h.Backend.ResolveTokenAndDefaultMeta(req.Token, &entMeta, nil) if err != nil { return err @@ -91,7 +91,7 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub } } -func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta structs.EnterpriseMeta) *stream.SubscribeRequest { +func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest { return &stream.SubscribeRequest{ Topic: req.Topic, Subject: state.EventSubjectService{ diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index a5a47a077..d9d8d162d 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -313,13 +313,13 @@ func getEvent(t *testing.T, ch chan eventOrError) *pbsubscribe.Event { type testBackend struct { store *state.Store - authorizer func(token string, entMeta *structs.EnterpriseMeta) acl.Authorizer + authorizer func(token string, entMeta *acl.EnterpriseMeta) acl.Authorizer forwardConn *gogrpc.ClientConn } func (b testBackend) ResolveTokenAndDefaultMeta( token string, - entMeta *structs.EnterpriseMeta, + entMeta *acl.EnterpriseMeta, _ *acl.AuthorizerContext, ) (acl.Authorizer, error) { return b.authorizer(token, entMeta), nil @@ -342,7 +342,7 @@ func newTestBackend() (*testBackend, error) { return nil, err } store := state.NewStateStoreWithEventPublisher(gc) - allowAll := func(string, *structs.EnterpriseMeta) acl.Authorizer { + allowAll := func(string, *acl.EnterpriseMeta) acl.Authorizer { return acl.AllowAll() } return &testBackend{store: store, authorizer: allowAll}, nil @@ -663,7 +663,7 @@ node "node1" { require.Equal(t, acl.Deny, authorizer.NodeRead("denied", nil)) // TODO: is there any easy way to do this with the acl package? - backend.authorizer = func(tok string, _ *structs.EnterpriseMeta) acl.Authorizer { + backend.authorizer = func(tok string, _ *acl.EnterpriseMeta) acl.Authorizer { if tok == token { return authorizer } @@ -859,7 +859,7 @@ node "node1" { require.Equal(t, acl.Deny, authorizer.NodeRead("denied", nil)) // TODO: is there any easy way to do this with the acl package? - backend.authorizer = func(tok string, _ *structs.EnterpriseMeta) acl.Authorizer { + backend.authorizer = func(tok string, _ *acl.EnterpriseMeta) acl.Authorizer { if tok == token { return authorizer } diff --git a/agent/grpc/public/services/connectca/mock_ACLResolver.go b/agent/grpc/public/services/connectca/mock_ACLResolver.go index bbc462c44..6b6a6a771 100644 --- a/agent/grpc/public/services/connectca/mock_ACLResolver.go +++ b/agent/grpc/public/services/connectca/mock_ACLResolver.go @@ -3,10 +3,9 @@ package connectca import ( - acl "github.com/hashicorp/consul/acl" mock "github.com/stretchr/testify/mock" - structs "github.com/hashicorp/consul/agent/structs" + acl "github.com/hashicorp/consul/acl" ) // MockACLResolver is an autogenerated mock type for the ACLResolver type @@ -15,11 +14,11 @@ type MockACLResolver struct { } // ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 -func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { ret := _m.Called(_a0, _a1, _a2) var r0 acl.Authorizer - if rf, ok := ret.Get(0).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { + if rf, ok := ret.Get(0).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { r0 = rf(_a0, _a1, _a2) } else { if ret.Get(0) != nil { @@ -28,7 +27,7 @@ func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.E } var r1 error - if rf, ok := ret.Get(1).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) error); ok { + if rf, ok := ret.Get(1).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) error); ok { r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) diff --git a/agent/grpc/public/services/connectca/server.go b/agent/grpc/public/services/connectca/server.go index 64bced2dd..002f8e344 100644 --- a/agent/grpc/public/services/connectca/server.go +++ b/agent/grpc/public/services/connectca/server.go @@ -30,7 +30,7 @@ type StateStore interface { //go:generate mockery -name ACLResolver -inpkg type ACLResolver interface { - ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) + ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) } func NewServer(cfg Config) *Server { diff --git a/agent/grpc/public/services/dataplane/get_supported_features.go b/agent/grpc/public/services/dataplane/get_supported_features.go index 672e48f66..f9a817190 100644 --- a/agent/grpc/public/services/dataplane/get_supported_features.go +++ b/agent/grpc/public/services/dataplane/get_supported_features.go @@ -3,12 +3,13 @@ package dataplane import ( "context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + acl "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/grpc/public" structs "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbdataplane" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) func (d *Server) SupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.SupportedDataplaneFeaturesRequest) (*pbdataplane.SupportedDataplaneFeaturesResponse, error) { diff --git a/agent/grpc/public/services/dataplane/get_supported_features_test.go b/agent/grpc/public/services/dataplane/get_supported_features_test.go index 2b3c5e76d..36ac7400c 100644 --- a/agent/grpc/public/services/dataplane/get_supported_features_test.go +++ b/agent/grpc/public/services/dataplane/get_supported_features_test.go @@ -4,15 +4,16 @@ import ( "context" "testing" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/grpc/public" - "github.com/hashicorp/consul/agent/grpc/public/testutils" - "github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/go-hclog" mock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + "github.com/hashicorp/consul/proto-public/pbdataplane" ) const testACLToken = "acl-token" diff --git a/agent/grpc/public/services/dataplane/mock_ACLResolver.go b/agent/grpc/public/services/dataplane/mock_ACLResolver.go index 364e17e66..39d4b5477 100644 --- a/agent/grpc/public/services/dataplane/mock_ACLResolver.go +++ b/agent/grpc/public/services/dataplane/mock_ACLResolver.go @@ -3,10 +3,9 @@ package dataplane import ( - acl "github.com/hashicorp/consul/acl" mock "github.com/stretchr/testify/mock" - structs "github.com/hashicorp/consul/agent/structs" + acl "github.com/hashicorp/consul/acl" ) // MockACLResolver is an autogenerated mock type for the ACLResolver type @@ -15,11 +14,11 @@ type MockACLResolver struct { } // ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 -func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { ret := _m.Called(_a0, _a1, _a2) var r0 acl.Authorizer - if rf, ok := ret.Get(0).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { + if rf, ok := ret.Get(0).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { r0 = rf(_a0, _a1, _a2) } else { if ret.Get(0) != nil { @@ -28,7 +27,7 @@ func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *structs.E } var r1 error - if rf, ok := ret.Get(1).(func(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) error); ok { + if rf, ok := ret.Get(1).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) error); ok { r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) diff --git a/agent/grpc/public/services/dataplane/server.go b/agent/grpc/public/services/dataplane/server.go index 90a050e22..6c05a0d08 100644 --- a/agent/grpc/public/services/dataplane/server.go +++ b/agent/grpc/public/services/dataplane/server.go @@ -1,11 +1,11 @@ package dataplane import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto-public/pbdataplane" ) type Server struct { @@ -19,7 +19,7 @@ type Config struct { //go:generate mockery -name ACLResolver -inpkg type ACLResolver interface { - ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) + ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) } func NewServer(cfg Config) *Server { diff --git a/agent/grpc/public/testutils/acl.go b/agent/grpc/public/testutils/acl.go index 0c640d266..3bea248ba 100644 --- a/agent/grpc/public/testutils/acl.go +++ b/agent/grpc/public/testutils/acl.go @@ -3,8 +3,9 @@ package testutils import ( "testing" - "github.com/hashicorp/consul/acl" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" ) func TestAuthorizer(t *testing.T) acl.Authorizer { diff --git a/agent/http_oss.go b/agent/http_oss.go index 797070ea1..c14c31d8d 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -8,10 +8,11 @@ import ( "net/http" "strings" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func (s *HTTPHandlers) parseEntMeta(req *http.Request, entMeta *structs.EnterpriseMeta) error { +func (s *HTTPHandlers) parseEntMeta(req *http.Request, entMeta *acl.EnterpriseMeta) error { if headerNS := req.Header.Get("X-Consul-Namespace"); headerNS != "" { return BadRequestError{Reason: "Invalid header: \"X-Consul-Namespace\" - Namespaces are a Consul Enterprise feature"} } @@ -46,7 +47,7 @@ func (s *HTTPHandlers) validateEnterpriseIntentionNamespace(logName, ns string, return BadRequestError{Reason: "Invalid " + logName + "(" + ns + ")" + ": Namespaces is a Consul Enterprise feature"} } -func (s *HTTPHandlers) parseEntMetaNoWildcard(req *http.Request, _ *structs.EnterpriseMeta) error { +func (s *HTTPHandlers) parseEntMetaNoWildcard(req *http.Request, _ *acl.EnterpriseMeta) error { return s.parseEntMeta(req, nil) } @@ -88,7 +89,7 @@ func (s *HTTPHandlers) uiTemplateDataTransform(data map[string]interface{}) erro return nil } -func (s *HTTPHandlers) parseEntMetaPartition(req *http.Request, meta *structs.EnterpriseMeta) error { +func (s *HTTPHandlers) parseEntMetaPartition(req *http.Request, meta *acl.EnterpriseMeta) error { if headerAP := req.Header.Get("X-Consul-Partition"); headerAP != "" { return BadRequestError{Reason: "Invalid header: \"X-Consul-Partition\" - Partitions are a Consul Enterprise feature"} } diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 4c326b4f1..b99911f7f 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + "github.com/hashicorp/consul/acl" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" @@ -51,11 +52,11 @@ func (s *HTTPHandlers) IntentionList(resp http.ResponseWriter, req *http.Request func (s *HTTPHandlers) IntentionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Method is tested in IntentionEndpoint - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } - if entMeta.PartitionOrDefault() != structs.PartitionOrDefault("") { + if entMeta.PartitionOrDefault() != acl.PartitionOrDefault("") { return nil, BadRequestError{Reason: "Cannot use a partition with this endpoint"} } @@ -114,7 +115,7 @@ func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Reques return nil, nil } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -207,7 +208,7 @@ func (s *HTTPHandlers) IntentionCheck(resp http.ResponseWriter, req *http.Reques return nil, nil } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -275,7 +276,7 @@ func (s *HTTPHandlers) IntentionExact(resp http.ResponseWriter, req *http.Reques // GET /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -349,7 +350,7 @@ func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Req // PUT /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionPutExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -391,7 +392,7 @@ func (s *HTTPHandlers) IntentionPutExact(resp http.ResponseWriter, req *http.Req // DELETE /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionDeleteExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -427,7 +428,7 @@ func (s *HTTPHandlers) IntentionDeleteExact(resp http.ResponseWriter, req *http. // intentionCreateResponse is the response structure for creating an intention. type intentionCreateResponse struct{ ID string } -func parseIntentionQueryExact(req *http.Request, entMeta *structs.EnterpriseMeta) (*structs.IntentionQueryExact, error) { +func parseIntentionQueryExact(req *http.Request, entMeta *acl.EnterpriseMeta) (*structs.IntentionQueryExact, error) { q := req.URL.Query() // Extract the source/destination @@ -464,7 +465,7 @@ func parseIntentionQueryExact(req *http.Request, entMeta *structs.EnterpriseMeta return &exact, nil } -func parseIntentionStringComponent(input string, entMeta *structs.EnterpriseMeta) (string, string, string, error) { +func parseIntentionStringComponent(input string, entMeta *acl.EnterpriseMeta) (string, string, string, error) { ss := strings.Split(input, "/") switch len(ss) { case 1: // Name only @@ -547,11 +548,11 @@ func (s *HTTPHandlers) IntentionSpecificGet(id string, resp http.ResponseWriter, func (s *HTTPHandlers) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Method is tested in IntentionEndpoint - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } - if entMeta.PartitionOrDefault() != structs.PartitionOrDefault("") { + if entMeta.PartitionOrDefault() != acl.PartitionOrDefault("") { return nil, BadRequestError{Reason: "Cannot use a partition with this endpoint"} } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index 5b7965c5c..62190cf9b 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" @@ -786,7 +787,7 @@ func TestParseIntentionStringComponent(t *testing.T) { for _, tc := range cases { t.Run(tc.TestName, func(t *testing.T) { - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta ap, ns, name, err := parseIntentionStringComponent(tc.Input, &entMeta) if tc.Err { require.Error(t, err) diff --git a/agent/local/state.go b/agent/local/state.go index e0bc8ae11..9a2e00a94 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -154,7 +154,7 @@ func (c *CheckState) CriticalFor() time.Duration { type rpc interface { RPC(method string, args interface{}, reply interface{}) error - ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) + ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) } // State is used to represent the node's services, @@ -181,7 +181,7 @@ type State struct { // Config is the agent config config Config - agentEnterpriseMeta structs.EnterpriseMeta + agentEnterpriseMeta acl.EnterpriseMeta // nodeInfoInSync tracks whether the server has our correct top-level // node information in sync @@ -411,11 +411,11 @@ func (l *State) AllServices() map[structs.ServiceID]*structs.NodeService { // and are being kept in sync with the server // // Results are scoped to the provided namespace and partition. -func (l *State) Services(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService { +func (l *State) Services(entMeta *acl.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService { return l.listServices(true, entMeta) } -func (l *State) listServices(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService { +func (l *State) listServices(filtered bool, entMeta *acl.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService { l.RLock() defer l.RUnlock() @@ -487,7 +487,7 @@ func (l *State) setServiceStateLocked(s *ServiceState) { // ServiceStates returns a shallow copy of all service state records. // The service record still points to the original service record and // must not be modified. -func (l *State) ServiceStates(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*ServiceState { +func (l *State) ServiceStates(entMeta *acl.EnterpriseMeta) map[structs.ServiceID]*ServiceState { l.RLock() defer l.RUnlock() @@ -553,7 +553,7 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error { // hard-set the node name and partition check.Node = l.config.NodeName - check.EnterpriseMeta = structs.NewEnterpriseMetaWithPartition( + check.EnterpriseMeta = acl.NewEnterpriseMetaWithPartition( l.agentEnterpriseMeta.PartitionOrEmpty(), check.NamespaceOrEmpty(), ) @@ -752,11 +752,11 @@ func (l *State) AllChecks() map[structs.CheckID]*structs.HealthCheck { // agent is aware of and are being kept in sync with the server // // Results are scoped to the provided namespace and partition. -func (l *State) Checks(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { +func (l *State) Checks(entMeta *acl.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { return l.listChecks(true, entMeta) } -func (l *State) listChecks(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { +func (l *State) listChecks(filtered bool, entMeta *acl.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { m := make(map[structs.CheckID]*structs.HealthCheck) for id, c := range l.listCheckStates(filtered, entMeta) { m[id] = c.Check @@ -846,11 +846,11 @@ func (l *State) AllCheckStates() map[structs.CheckID]*CheckState { // The defer timers still point to the original values and must not be modified. // // Results are scoped to the provided namespace and partition. -func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { +func (l *State) CheckStates(entMeta *acl.EnterpriseMeta) map[structs.CheckID]*CheckState { return l.listCheckStates(true, entMeta) } -func (l *State) listCheckStates(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { +func (l *State) listCheckStates(filtered bool, entMeta *acl.EnterpriseMeta) map[structs.CheckID]*CheckState { l.RLock() defer l.RUnlock() @@ -883,11 +883,11 @@ func (l *State) AllCriticalCheckStates() map[structs.CheckID]*CheckState { // The defer timers still point to the original values and must not be modified. // // Results are scoped to the provided namespace and partition. -func (l *State) CriticalCheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { +func (l *State) CriticalCheckStates(entMeta *acl.EnterpriseMeta) map[structs.CheckID]*CheckState { return l.listCriticalCheckStates(true, entMeta) } -func (l *State) listCriticalCheckStates(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { +func (l *State) listCriticalCheckStates(filtered bool, entMeta *acl.EnterpriseMeta) map[structs.CheckID]*CheckState { l.RLock() defer l.RUnlock() diff --git a/agent/local/state_test.go b/agent/local/state_test.go index be4cb6aa4..c75d0234c 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -2156,7 +2156,7 @@ func TestAgent_sendCoordinate(t *testing.T) { }) } -func servicesInSync(state *local.State, wantServices int, entMeta *structs.EnterpriseMeta) error { +func servicesInSync(state *local.State, wantServices int, entMeta *acl.EnterpriseMeta) error { services := state.ServiceStates(entMeta) if got, want := len(services), wantServices; got != want { return fmt.Errorf("got %d services want %d", got, want) @@ -2169,7 +2169,7 @@ func servicesInSync(state *local.State, wantServices int, entMeta *structs.Enter return nil } -func checksInSync(state *local.State, wantChecks int, entMeta *structs.EnterpriseMeta) error { +func checksInSync(state *local.State, wantChecks int, entMeta *acl.EnterpriseMeta) error { checks := state.CheckStates(entMeta) if got, want := len(checks), wantChecks; got != want { return fmt.Errorf("got %d checks want %d", got, want) @@ -2421,6 +2421,6 @@ func (f *fakeRPC) RPC(method string, args interface{}, reply interface{}) error return nil } -func (f *fakeRPC) ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (consul.ACLResolveResult, error) { +func (f *fakeRPC) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (consul.ACLResolveResult, error) { return consul.ACLResolveResult{}, nil } diff --git a/agent/operator_endpoint.go b/agent/operator_endpoint.go index e43302aef..4a33497a8 100644 --- a/agent/operator_endpoint.go +++ b/agent/operator_endpoint.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" ) @@ -175,7 +176,7 @@ func keyringErrorsOrNil(responses []*structs.KeyringResponse) error { } if response.Segment != "" { pool += " [segment: " + response.Segment + "]" - } else if !structs.IsDefaultPartition(response.Partition) { + } else if !acl.IsDefaultPartition(response.Partition) { pool += " [partition: " + response.Partition + "]" } errs = multierror.Append(errs, fmt.Errorf("%s error: %s", pool, response.Error)) diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index 61454a074..9b6289703 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/time/rate" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/connect" @@ -251,7 +252,7 @@ func TestManager_BasicLifecycle(t *testing.T) { IntentionsSet: true, }, Datacenter: "dc1", - Locality: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")}, + Locality: GatewayKey{Datacenter: "dc1", Partition: acl.PartitionOrDefault("")}, }, }, { @@ -311,7 +312,7 @@ func TestManager_BasicLifecycle(t *testing.T) { IntentionsSet: true, }, Datacenter: "dc1", - Locality: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")}, + Locality: GatewayKey{Datacenter: "dc1", Partition: acl.PartitionOrDefault("")}, }, }, } diff --git a/agent/proxycfg/naming.go b/agent/proxycfg/naming.go index 5a5f20975..e222c8fe3 100644 --- a/agent/proxycfg/naming.go +++ b/agent/proxycfg/naming.go @@ -3,6 +3,7 @@ package proxycfg import ( "strings" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) @@ -10,7 +11,7 @@ type UpstreamID struct { Type string Name string Datacenter string - structs.EnterpriseMeta + acl.EnterpriseMeta } func NewUpstreamID(u *structs.Upstream) UpstreamID { @@ -18,7 +19,7 @@ func NewUpstreamID(u *structs.Upstream) UpstreamID { Type: u.DestinationType, Name: u.DestinationName, Datacenter: u.Datacenter, - EnterpriseMeta: structs.NewEnterpriseMetaWithPartition( + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition( u.DestinationPartition, u.DestinationNamespace, ), @@ -57,7 +58,7 @@ func NewUpstreamIDFromTargetID(tid string) UpstreamID { id := UpstreamID{ Name: split[0], - EnterpriseMeta: structs.NewEnterpriseMetaWithPartition(split[2], split[1]), + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), Datacenter: split[3], } id.normalize() @@ -96,7 +97,7 @@ func UpstreamIDFromString(input string) UpstreamID { const upstreamTypePreparedQueryPrefix = structs.UpstreamDestTypePreparedQuery + ":" -func ParseUpstreamIDString(input string) (typ, dc, name string, meta *structs.EnterpriseMeta) { +func ParseUpstreamIDString(input string) (typ, dc, name string, meta *acl.EnterpriseMeta) { if strings.HasPrefix(input, upstreamTypePreparedQueryPrefix) { typ = structs.UpstreamDestTypePreparedQuery input = strings.TrimPrefix(input, upstreamTypePreparedQueryPrefix) diff --git a/agent/proxycfg/naming_oss.go b/agent/proxycfg/naming_oss.go index bbcf1d0e8..2ba2d9996 100644 --- a/agent/proxycfg/naming_oss.go +++ b/agent/proxycfg/naming_oss.go @@ -4,10 +4,11 @@ package proxycfg import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func UpstreamIDString(typ, dc, name string, _ *structs.EnterpriseMeta) string { +func UpstreamIDString(typ, dc, name string, _ *acl.EnterpriseMeta) string { ret := name if dc != "" { @@ -21,7 +22,7 @@ func UpstreamIDString(typ, dc, name string, _ *structs.EnterpriseMeta) string { return typ + ":" + ret } -func parseInnerUpstreamIDString(input string) (string, *structs.EnterpriseMeta) { +func parseInnerUpstreamIDString(input string) (string, *acl.EnterpriseMeta) { return input, structs.DefaultEnterpriseMetaInDefaultPartition() } diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index cebf0b2e9..7cf669a89 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -86,7 +86,7 @@ type GatewayKey struct { func (k GatewayKey) String() string { resp := k.Datacenter - if !structs.IsDefaultPartition(k.Partition) { + if !acl.IsDefaultPartition(k.Partition) { resp = k.Partition + "." + resp } return resp @@ -97,7 +97,7 @@ func (k GatewayKey) IsEmpty() bool { } func (k GatewayKey) Matches(dc, partition string) bool { - return structs.EqualPartitions(k.Partition, partition) && k.Datacenter == dc + return acl.EqualPartitions(k.Partition, partition) && k.Datacenter == dc } func gatewayKeyFromString(s string) GatewayKey { diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 5a88c2880..29004eeaf 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/consul/discoverychain" @@ -2581,7 +2582,7 @@ func Test_hostnameEndpoints(t *testing.T) { cases := []testCase{ { name: "same locality and no LAN hostname endpoints", - localKey: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")}, + localKey: GatewayKey{Datacenter: "dc1", Partition: acl.PartitionOrDefault("")}, nodes: structs.CheckServiceNodes{ { Node: &structs.Node{ @@ -2608,7 +2609,7 @@ func Test_hostnameEndpoints(t *testing.T) { }, { name: "same locality and one LAN hostname endpoint", - localKey: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")}, + localKey: GatewayKey{Datacenter: "dc1", Partition: acl.PartitionOrDefault("")}, nodes: structs.CheckServiceNodes{ { Node: &structs.Node{ @@ -2646,7 +2647,7 @@ func Test_hostnameEndpoints(t *testing.T) { }, { name: "different locality and one WAN hostname endpoint", - localKey: GatewayKey{Datacenter: "dc2", Partition: structs.PartitionOrDefault("")}, + localKey: GatewayKey{Datacenter: "dc2", Partition: acl.PartitionOrDefault("")}, nodes: structs.CheckServiceNodes{ { Node: &structs.Node{ diff --git a/agent/proxycfg/testing_ingress_gateway.go b/agent/proxycfg/testing_ingress_gateway.go index 7686993ba..b0f09449d 100644 --- a/agent/proxycfg/testing_ingress_gateway.go +++ b/agent/proxycfg/testing_ingress_gateway.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/discoverychain" @@ -882,13 +883,13 @@ func TestConfigSnapshotIngress_MultipleListenersDuplicateService(t testing.T) *C func TestConfigSnapshotIngressGatewayWithChain( t testing.T, variant string, - webEntMeta, fooEntMeta *structs.EnterpriseMeta, + webEntMeta, fooEntMeta *acl.EnterpriseMeta, ) *ConfigSnapshot { if webEntMeta == nil { - webEntMeta = &structs.EnterpriseMeta{} + webEntMeta = &acl.EnterpriseMeta{} } if fooEntMeta == nil { - fooEntMeta = &structs.EnterpriseMeta{} + fooEntMeta = &acl.EnterpriseMeta{} } var ( diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index f8daf340f..e38ddeb63 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" @@ -283,7 +284,7 @@ func (s *handlerUpstreams) resetWatchesFromChain( // Outside of transparent mode we only watch the chain target, B, // since A is a virtual service and traffic will not be sent to it. if !watchedChainEndpoints && s.proxyCfg.Mode == structs.ProxyModeTransparent { - chainEntMeta := structs.NewEnterpriseMetaWithPartition(chain.Partition, chain.Namespace) + chainEntMeta := acl.NewEnterpriseMetaWithPartition(chain.Partition, chain.Namespace) opts := targetWatchOpts{ upstreamID: uid, @@ -356,7 +357,7 @@ type targetWatchOpts struct { service string filter string datacenter string - entMeta *structs.EnterpriseMeta + entMeta *acl.EnterpriseMeta } func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error { @@ -366,7 +367,7 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config "target", opts.chainID, ) - var finalMeta structs.EnterpriseMeta + var finalMeta acl.EnterpriseMeta finalMeta.Merge(opts.entMeta) correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String() diff --git a/agent/structs/acl_cache_test.go b/agent/structs/acl_cache_test.go index 2d527b7aa..9a5ba3707 100644 --- a/agent/structs/acl_cache_test.go +++ b/agent/structs/acl_cache_test.go @@ -3,8 +3,9 @@ package structs import ( "testing" - "github.com/hashicorp/consul/acl" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" ) func TestStructs_ACLCaches(t *testing.T) { diff --git a/agent/structs/check_definition.go b/agent/structs/check_definition.go index c6967d2fc..5e7768270 100644 --- a/agent/structs/check_definition.go +++ b/agent/structs/check_definition.go @@ -1,9 +1,10 @@ package structs import ( - "github.com/hashicorp/consul/acl" "time" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/types" diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 1fe7cad73..fabb5dae9 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -6,11 +6,12 @@ import ( "strings" "time" - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/lib" diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index 5203bcc00..90931907e 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -7,12 +7,13 @@ import ( "time" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/hcl" "github.com/mitchellh/copystructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/sdk/testutil" diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 17b9ee77a..c2738f842 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -3,9 +3,10 @@ package structs import ( "encoding/json" "fmt" - "github.com/hashicorp/consul/acl" "time" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/lib" ) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 46dbbe7c5..1ea8eb3ca 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -6,8 +6,6 @@ import ( "crypto/sha256" "encoding/json" "fmt" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" "math/rand" "reflect" "regexp" @@ -16,13 +14,18 @@ import ( "strings" "time" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/proto" - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" "github.com/mitchellh/hashstructure" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" + ptypes "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/api" diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index 69dab7cfc..49cb67677 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -142,7 +142,7 @@ type backend struct { pub *stream.EventPublisher } -func (b backend) ResolveTokenAndDefaultMeta(string, *structs.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) { +func (b backend) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) { return acl.AllowAll(), nil } diff --git a/agent/txn_endpoint.go b/agent/txn_endpoint.go index 58a1cd4b0..54338c86b 100644 --- a/agent/txn_endpoint.go +++ b/agent/txn_endpoint.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" @@ -147,7 +148,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( Value: in.KV.Value, Flags: in.KV.Flags, Session: in.KV.Session, - EnterpriseMeta: structs.NewEnterpriseMetaWithPartition( + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition( in.KV.Partition, in.KV.Namespace, ), @@ -211,7 +212,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( Warning: svc.Weights.Warning, }, EnableTagOverride: svc.EnableTagOverride, - EnterpriseMeta: structs.NewEnterpriseMetaWithPartition( + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition( svc.Partition, svc.Namespace, ), @@ -274,7 +275,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( Timeout: timeout, DeregisterCriticalServiceAfter: deregisterCriticalServiceAfter, }, - EnterpriseMeta: structs.NewEnterpriseMetaWithPartition( + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition( check.Partition, check.Namespace, ), diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 1defb241b..dfe14e9d5 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -37,7 +37,7 @@ type ServiceSummary struct { transparentProxySet bool ConnectNative bool - structs.EnterpriseMeta + acl.EnterpriseMeta } func (s *ServiceSummary) LessThan(other *ServiceSummary) bool { @@ -582,7 +582,7 @@ func (s *HTTPHandlers) UIGatewayIntentions(resp http.ResponseWriter, req *http.R return nil, nil } - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { return nil, err } @@ -642,7 +642,7 @@ func (s *HTTPHandlers) UIMetricsProxy(resp http.ResponseWriter, req *http.Reques // Clear the token from the headers so we don't end up proxying it. s.clearTokenFromHeaders(req) - var entMeta structs.EnterpriseMeta + var entMeta acl.EnterpriseMeta if err := s.parseEntMetaPartition(req, &entMeta); err != nil { return nil, err } diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index b1a38f0cd..711f854b2 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -11,6 +11,7 @@ import ( "github.com/golang/protobuf/proto" bexpr "github.com/hashicorp/go-bexpr" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" @@ -564,7 +565,7 @@ func makeLoadAssignmentEndpointGroup( gatewayKey = localKey } - if gatewayKey.IsEmpty() || (structs.EqualPartitions(localKey.Partition, target.Partition) && localKey.Datacenter == target.Datacenter) { + if gatewayKey.IsEmpty() || (acl.EqualPartitions(localKey.Partition, target.Partition) && localKey.Datacenter == target.Datacenter) { // Gateways are not needed if the request isn't for a remote DC or partition. return loadAssignmentEndpointGroup{ Endpoints: realEndpoints, diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index bca152ad9..b9b7855b9 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/types" @@ -192,7 +193,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // The virtualIPTag is used by consul-k8s to store the ClusterIP for a service. // We only match on this virtual IP if the upstream is in the proxy's partition. // This is because the IP is not guaranteed to be unique across k8s clusters. - if structs.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) { + if acl.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) { if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { uniqueAddrs[vip.Address] = struct{}{} } diff --git a/agent/xds/server_oss.go b/agent/xds/server_oss.go index a9a01908d..dc5ab309c 100644 --- a/agent/xds/server_oss.go +++ b/agent/xds/server_oss.go @@ -6,9 +6,10 @@ package xds import ( envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" ) -func parseEnterpriseMeta(node *envoy_core_v3.Node) *structs.EnterpriseMeta { +func parseEnterpriseMeta(node *envoy_core_v3.Node) *acl.EnterpriseMeta { return structs.DefaultEnterpriseMetaInDefaultPartition() } diff --git a/command/acl/bindingrule/delete/bindingrule_delete.go b/command/acl/bindingrule/delete/bindingrule_delete.go index 49dac4e59..938699251 100644 --- a/command/acl/bindingrule/delete/bindingrule_delete.go +++ b/command/acl/bindingrule/delete/bindingrule_delete.go @@ -4,9 +4,10 @@ import ( "flag" "fmt" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/bindingrule/update/bindingrule_update.go b/command/acl/bindingrule/update/bindingrule_update.go index d3d0eb462..aa667f01e 100644 --- a/command/acl/bindingrule/update/bindingrule_update.go +++ b/command/acl/bindingrule/update/bindingrule_update.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/bindingrule" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/policy/create/policy_create.go b/command/acl/policy/create/policy_create.go index 00546ff18..46bc2b596 100644 --- a/command/acl/policy/create/policy_create.go +++ b/command/acl/policy/create/policy_create.go @@ -6,13 +6,14 @@ import ( "io" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/api" aclhelpers "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/policy" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/helpers" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/policy/delete/policy_delete.go b/command/acl/policy/delete/policy_delete.go index 109cdb9ac..69d23d2a8 100644 --- a/command/acl/policy/delete/policy_delete.go +++ b/command/acl/policy/delete/policy_delete.go @@ -4,9 +4,10 @@ import ( "flag" "fmt" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/policy/read/policy_read.go b/command/acl/policy/read/policy_read.go index c5be7f63b..455f5e5f7 100644 --- a/command/acl/policy/read/policy_read.go +++ b/command/acl/policy/read/policy_read.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/policy" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/policy/update/policy_update.go b/command/acl/policy/update/policy_update.go index 4af67cfe2..5d0768a80 100644 --- a/command/acl/policy/update/policy_update.go +++ b/command/acl/policy/update/policy_update.go @@ -6,12 +6,13 @@ import ( "io" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/policy" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/helpers" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/role/create/role_create.go b/command/acl/role/create/role_create.go index c4f480464..9afbfe841 100644 --- a/command/acl/role/create/role_create.go +++ b/command/acl/role/create/role_create.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/role" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/role/delete/role_delete.go b/command/acl/role/delete/role_delete.go index d43858379..41fbb26a3 100644 --- a/command/acl/role/delete/role_delete.go +++ b/command/acl/role/delete/role_delete.go @@ -4,9 +4,10 @@ import ( "flag" "fmt" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/role/read/role_read.go b/command/acl/role/read/role_read.go index 5d46dbad2..c22c48477 100644 --- a/command/acl/role/read/role_read.go +++ b/command/acl/role/read/role_read.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/role" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/role/update/role_update.go b/command/acl/role/update/role_update.go index c9b116a30..d229fd47b 100644 --- a/command/acl/role/update/role_update.go +++ b/command/acl/role/update/role_update.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/role" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/rules/translate.go b/command/acl/rules/translate.go index cdd423943..b8053ba74 100644 --- a/command/acl/rules/translate.go +++ b/command/acl/rules/translate.go @@ -6,11 +6,12 @@ import ( "io" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/acl" aclhelpers "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/helpers" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/token/clone/token_clone.go b/command/acl/token/clone/token_clone.go index 129a451cf..c7c90435f 100644 --- a/command/acl/token/clone/token_clone.go +++ b/command/acl/token/clone/token_clone.go @@ -5,10 +5,11 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/token" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/token/create/token_create.go b/command/acl/token/create/token_create.go index 15ac10cb9..8be472475 100644 --- a/command/acl/token/create/token_create.go +++ b/command/acl/token/create/token_create.go @@ -6,11 +6,12 @@ import ( "strings" "time" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/token" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/token/delete/token_delete.go b/command/acl/token/delete/token_delete.go index 94af0c1bc..4d265bcf6 100644 --- a/command/acl/token/delete/token_delete.go +++ b/command/acl/token/delete/token_delete.go @@ -4,9 +4,10 @@ import ( "flag" "fmt" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/token/formatter.go b/command/acl/token/formatter.go index a1eb050ba..cc5671002 100644 --- a/command/acl/token/formatter.go +++ b/command/acl/token/formatter.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" ) @@ -165,7 +166,7 @@ func (f *prettyFormatter) FormatTokenExpanded(token *api.ACLTokenExpanded) (stri } } - entMeta := structs.NewEnterpriseMetaWithPartition(token.Partition, token.Namespace) + entMeta := acl.NewEnterpriseMetaWithPartition(token.Partition, token.Namespace) formatServiceIdentity := func(svcIdentity *api.ACLServiceIdentity, indent string) { if len(svcIdentity.Datacenters) > 0 { buffer.WriteString(fmt.Sprintf(indent+"Name: %s (Datacenters: %s)\n", svcIdentity.ServiceName, strings.Join(svcIdentity.Datacenters, ", "))) diff --git a/command/acl/token/read/token_read.go b/command/acl/token/read/token_read.go index 885d7d916..4e66d9ea7 100644 --- a/command/acl/token/read/token_read.go +++ b/command/acl/token/read/token_read.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/token" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/acl/token/update/token_update.go b/command/acl/token/update/token_update.go index 4a8b97b4b..7c9b4f20f 100644 --- a/command/acl/token/update/token_update.go +++ b/command/acl/token/update/token_update.go @@ -5,11 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/command/acl/token" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { diff --git a/command/keyring/keyring.go b/command/keyring/keyring.go index 6c73c7429..b2f418c82 100644 --- a/command/keyring/keyring.go +++ b/command/keyring/keyring.go @@ -7,8 +7,8 @@ import ( "github.com/mitchellh/cli" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent" - "github.com/hashicorp/consul/agent/structs" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" ) @@ -202,7 +202,7 @@ func poolName(dc string, wan bool, partition, segment string) string { var suffix string if segment != "" { suffix = fmt.Sprintf(" [%s]", segment) - } else if !structs.IsDefaultPartition(partition) { + } else if !acl.IsDefaultPartition(partition) { suffix = fmt.Sprintf(" [partition: %s]", partition) } return fmt.Sprintf("%s%s:\n", pool, suffix) diff --git a/command/logout/logout_test.go b/command/logout/logout_test.go index 82e82bc03..92520976d 100644 --- a/command/logout/logout_test.go +++ b/command/logout/logout_test.go @@ -4,15 +4,16 @@ import ( "strings" "testing" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/consul/authmethod/kubeauth" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/go-uuid" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" ) func TestLogout_noTabs(t *testing.T) { diff --git a/command/members/members.go b/command/members/members.go index b7ce700ab..2541799cb 100644 --- a/command/members/members.go +++ b/command/members/members.go @@ -12,7 +12,7 @@ import ( "github.com/mitchellh/cli" "github.com/ryanuber/columnize" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" ) @@ -94,7 +94,7 @@ func (c *cmd) Run(args []string) int { if member.Tags[consulapi.MemberTagKeyPartition] == "" { member.Tags[consulapi.MemberTagKeyPartition] = "default" } - if structs.IsDefaultPartition(member.Tags[consulapi.MemberTagKeyPartition]) { + if acl.IsDefaultPartition(member.Tags[consulapi.MemberTagKeyPartition]) { if c.segment == consulapi.AllSegments && member.Tags[consulapi.MemberTagKeyRole] == consulapi.MemberTagValueRoleServer { member.Tags[consulapi.MemberTagKeySegment] = "" } else if member.Tags[consulapi.MemberTagKeySegment] == "" { diff --git a/proto/pbcommon/common_oss.go b/proto/pbcommon/common_oss.go index 2dc2026e8..0df88ec20 100644 --- a/proto/pbcommon/common_oss.go +++ b/proto/pbcommon/common_oss.go @@ -3,21 +3,19 @@ package pbcommon -import ( - "github.com/hashicorp/consul/agent/structs" -) +import "github.com/hashicorp/consul/acl" var DefaultEnterpriseMeta = &EnterpriseMeta{} -func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *EnterpriseMeta { +func NewEnterpriseMetaFromStructs(_ acl.EnterpriseMeta) *EnterpriseMeta { return &EnterpriseMeta{} } -func EnterpriseMetaToStructs(s *EnterpriseMeta, t *structs.EnterpriseMeta) { +func EnterpriseMetaToStructs(s *EnterpriseMeta, t *acl.EnterpriseMeta) { if s == nil { return } } -func EnterpriseMetaFromStructs(t *structs.EnterpriseMeta, s *EnterpriseMeta) { +func EnterpriseMetaFromStructs(t *acl.EnterpriseMeta, s *EnterpriseMeta) { if s == nil { return } diff --git a/proto/pbconnect/connect.go b/proto/pbconnect/connect.go index 2b13a12b3..ae279b31a 100644 --- a/proto/pbconnect/connect.go +++ b/proto/pbconnect/connect.go @@ -1,6 +1,7 @@ package pbconnect import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbcommon" ) @@ -29,14 +30,14 @@ func RaftIndexTo(f *pbcommon.RaftIndex) structs.RaftIndex { return *t } -func EnterpriseMetaFrom(f structs.EnterpriseMeta) *pbcommon.EnterpriseMeta { +func EnterpriseMetaFrom(f acl.EnterpriseMeta) *pbcommon.EnterpriseMeta { t := new(pbcommon.EnterpriseMeta) pbcommon.EnterpriseMetaFromStructs(&f, t) return t } -func EnterpriseMetaTo(f *pbcommon.EnterpriseMeta) structs.EnterpriseMeta { - t := new(structs.EnterpriseMeta) +func EnterpriseMetaTo(f *pbcommon.EnterpriseMeta) acl.EnterpriseMeta { + t := new(acl.EnterpriseMeta) pbcommon.EnterpriseMetaToStructs(f, t) return *t } diff --git a/proto/pbservice/convert_oss.go b/proto/pbservice/convert_oss.go index 4efb78bef..5ecd2f7f4 100644 --- a/proto/pbservice/convert_oss.go +++ b/proto/pbservice/convert_oss.go @@ -4,14 +4,14 @@ package pbservice import ( - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/proto/pbcommon" ) -func EnterpriseMetaToStructs(_ *pbcommon.EnterpriseMeta) structs.EnterpriseMeta { - return structs.EnterpriseMeta{} +func EnterpriseMetaToStructs(_ *pbcommon.EnterpriseMeta) acl.EnterpriseMeta { + return acl.EnterpriseMeta{} } -func NewEnterpriseMetaFromStructs(_ structs.EnterpriseMeta) *pbcommon.EnterpriseMeta { +func NewEnterpriseMetaFromStructs(_ acl.EnterpriseMeta) *pbcommon.EnterpriseMeta { return &pbcommon.EnterpriseMeta{} } diff --git a/proto/pbservice/convert_oss_test.go b/proto/pbservice/convert_oss_test.go index 17717f058..253da69e5 100644 --- a/proto/pbservice/convert_oss_test.go +++ b/proto/pbservice/convert_oss_test.go @@ -6,8 +6,8 @@ package pbservice import ( fuzz "github.com/google/gofuzz" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/acl" ) -func randEnterpriseMeta(_ *structs.EnterpriseMeta, _ fuzz.Continue) { +func randEnterpriseMeta(_ *acl.EnterpriseMeta, _ fuzz.Continue) { } From b3db499c746564bec63eef12e1db787b4deae1d3 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 1 Apr 2022 14:31:15 -0700 Subject: [PATCH 092/785] Add a field to disable following redirects on http checks --- .changelog/12685.txt | 3 + agent/agent.go | 25 +- agent/checks/check.go | 30 +- agent/checks/check_test.go | 40 +++ agent/config/builder.go | 1 + agent/config/config.go | 1 + agent/config/runtime.go | 1 + agent/config/runtime_test.go | 7 + .../TestRuntimeConfig_Sanitize.golden | 4 +- agent/config/testdata/full-config.hcl | 8 + agent/config/testdata/full-config.json | 8 + agent/structs/check_definition.go | 6 + agent/structs/check_type.go | 1 + agent/structs/structs.go | 7 +- proto/pbservice/healthcheck.gen.go | 4 + proto/pbservice/healthcheck.pb.go | 271 ++++++++++-------- proto/pbservice/healthcheck.proto | 2 + website/content/api-docs/agent/check.mdx | 4 + website/content/docs/discovery/checks.mdx | 5 + 19 files changed, 276 insertions(+), 152 deletions(-) create mode 100644 .changelog/12685.txt diff --git a/.changelog/12685.txt b/.changelog/12685.txt new file mode 100644 index 000000000..727d3556e --- /dev/null +++ b/.changelog/12685.txt @@ -0,0 +1,3 @@ +```release-note:security +agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. +``` diff --git a/agent/agent.go b/agent/agent.go index 7a313cb4f..a23700ebd 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2683,18 +2683,19 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName) http := &checks.CheckHTTP{ - CheckID: cid, - ServiceID: sid, - HTTP: chkType.HTTP, - Header: chkType.Header, - Method: chkType.Method, - Body: chkType.Body, - Interval: chkType.Interval, - Timeout: chkType.Timeout, - Logger: a.logger, - OutputMaxSize: maxOutputSize, - TLSClientConfig: tlsClientConfig, - StatusHandler: statusHandler, + CheckID: cid, + ServiceID: sid, + HTTP: chkType.HTTP, + Header: chkType.Header, + Method: chkType.Method, + Body: chkType.Body, + DisableRedirects: chkType.DisableRedirects, + Interval: chkType.Interval, + Timeout: chkType.Timeout, + Logger: a.logger, + OutputMaxSize: maxOutputSize, + TLSClientConfig: tlsClientConfig, + StatusHandler: statusHandler, } if proxy != nil && proxy.Proxy.Expose.Checks { diff --git a/agent/checks/check.go b/agent/checks/check.go index 8f910901f..3e3ce44f8 100644 --- a/agent/checks/check.go +++ b/agent/checks/check.go @@ -334,18 +334,19 @@ func (c *CheckTTL) SetStatus(status, output string) string { // or if the request returns an error // Supports failures_before_critical and success_before_passing. type CheckHTTP struct { - CheckID structs.CheckID - ServiceID structs.ServiceID - HTTP string - Header map[string][]string - Method string - Body string - Interval time.Duration - Timeout time.Duration - Logger hclog.Logger - TLSClientConfig *tls.Config - OutputMaxSize int - StatusHandler *StatusHandler + CheckID structs.CheckID + ServiceID structs.ServiceID + HTTP string + Header map[string][]string + Method string + Body string + Interval time.Duration + Timeout time.Duration + Logger hclog.Logger + TLSClientConfig *tls.Config + OutputMaxSize int + StatusHandler *StatusHandler + DisableRedirects bool httpClient *http.Client stop bool @@ -392,6 +393,11 @@ func (c *CheckHTTP) Start() { Timeout: 10 * time.Second, Transport: trans, } + if c.DisableRedirects { + c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + } if c.Timeout > 0 { c.httpClient.Timeout = c.Timeout } diff --git a/agent/checks/check_test.go b/agent/checks/check_test.go index ba61aeeab..caddee424 100644 --- a/agent/checks/check_test.go +++ b/agent/checks/check_test.go @@ -459,6 +459,46 @@ func TestCheckHTTP_NotProxied(t *testing.T) { }) } +func TestCheckHTTP_DisableRedirects(t *testing.T) { + t.Parallel() + + server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "server1") + })) + defer server1.Close() + + server2 := httptest.NewServer(http.RedirectHandler(server1.URL, 301)) + defer server2.Close() + + notif := mock.NewNotify() + logger := testutil.Logger(t) + statusHandler := NewStatusHandler(notif, logger, 0, 0, 0) + cid := structs.NewCheckID("foo", nil) + + check := &CheckHTTP{ + CheckID: cid, + HTTP: server2.URL, + Method: "GET", + OutputMaxSize: DefaultBufSize, + Interval: 10 * time.Millisecond, + DisableRedirects: true, + Logger: logger, + StatusHandler: statusHandler, + } + check.Start() + defer check.Stop() + + retry.Run(t, func(r *retry.R) { + output := notif.Output(cid) + if !strings.Contains(output, "Moved Permanently") { + r.Fatalf("should have returned 301 body instead of redirecting") + } + if strings.Contains(output, "server1") { + r.Fatalf("followed redirect") + } + }) +} + func TestCheckHTTPTCP_BigTimeout(t *testing.T) { testCases := []struct { timeoutIn, intervalIn, timeoutWant time.Duration diff --git a/agent/config/builder.go b/agent/config/builder.go index b3bdc46f3..d9b0aff91 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1546,6 +1546,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition { Header: v.Header, Method: stringVal(v.Method), Body: stringVal(v.Body), + DisableRedirects: boolVal(v.DisableRedirects), TCP: stringVal(v.TCP), Interval: b.durationVal(fmt.Sprintf("check[%s].interval", id), v.Interval), DockerContainerID: stringVal(v.DockerContainerID), diff --git a/agent/config/config.go b/agent/config/config.go index 42d43f1d9..c5936afa7 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -399,6 +399,7 @@ type CheckDefinition struct { Header map[string][]string `mapstructure:"header"` Method *string `mapstructure:"method"` Body *string `mapstructure:"body"` + DisableRedirects *bool `mapstructure:"disable_redirects"` OutputMaxSize *int `mapstructure:"output_max_size"` TCP *string `mapstructure:"tcp"` Interval *string `mapstructure:"interval"` diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 99c51f335..eb59edc84 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -424,6 +424,7 @@ type RuntimeConfig struct { // http = string // header = map[string][]string // method = string + // disable_redirects = (true|false) // tcp = string // h2ping = string // interval = string diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 408241e40..44b10008a 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5743,6 +5743,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "aldrIQ4l", Body: "wSjTy7dg", + DisableRedirects: true, TCP: "RJQND605", H2PING: "9N1cSb5B", H2PingUseTLS: false, @@ -5770,6 +5771,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "gLrztrNw", Body: "0jkKgGUC", + DisableRedirects: false, OutputMaxSize: checks.DefaultBufSize, TCP: "4jG5casb", H2PING: "HCHU7gEb", @@ -5797,6 +5799,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "Dou0nGT5", Body: "5PBQd2OT", + DisableRedirects: true, OutputMaxSize: checks.DefaultBufSize, TCP: "JY6fTTcw", H2PING: "rQ8eyCSF", @@ -6008,6 +6011,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "X5DrovFc", Body: "WeikigLh", + DisableRedirects: true, OutputMaxSize: checks.DefaultBufSize, TCP: "ICbxkpSF", H2PING: "7s7BbMyb", @@ -6204,6 +6208,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "T66MFBfR", Body: "OwGjTFQi", + DisableRedirects: true, OutputMaxSize: checks.DefaultBufSize, TCP: "bNnNfx2A", H2PING: "qC1pidiW", @@ -6229,6 +6234,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "ciYHWors", Body: "lUVLGYU7", + DisableRedirects: false, OutputMaxSize: checks.DefaultBufSize, TCP: "FfvCwlqH", H2PING: "spI3muI3", @@ -6254,6 +6260,7 @@ func TestLoad_FullConfig(t *testing.T) { }, Method: "9afLm3Mj", Body: "wVVL2V6f", + DisableRedirects: true, OutputMaxSize: checks.DefaultBufSize, TCP: "fjiLFqVd", H2PING: "5NbNWhan", diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 5356761e4..ae0be95cb 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -90,6 +90,7 @@ "AliasService": "", "Body": "", "DeregisterCriticalServiceAfter": "0s", + "DisableRedirects": false, "DockerContainerID": "", "EnterpriseMeta": {}, "FailuresBeforeCritical": 0, @@ -297,6 +298,7 @@ "Body": "", "CheckID": "", "DeregisterCriticalServiceAfter": "0s", + "DisableRedirects": false, "DockerContainerID": "", "FailuresBeforeCritical": 0, "FailuresBeforeWarning": 0, @@ -456,4 +458,4 @@ "Version": "", "VersionPrerelease": "", "Watches": [] -} +} \ No newline at end of file diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index 48b6e9a1a..c52488751 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -110,6 +110,7 @@ check = { } method = "Dou0nGT5" body = "5PBQd2OT" + disable_redirects = true tcp = "JY6fTTcw" h2ping = "rQ8eyCSF" h2ping_use_tls = false @@ -138,6 +139,7 @@ checks = [ } method = "aldrIQ4l" body = "wSjTy7dg" + disable_redirects = true tcp = "RJQND605" h2ping = "9N1cSb5B" h2ping_use_tls = false @@ -165,6 +167,7 @@ checks = [ } method = "gLrztrNw" body = "0jkKgGUC" + disable_redirects = false tcp = "4jG5casb" h2ping = "HCHU7gEb" h2ping_use_tls = false @@ -389,6 +392,7 @@ service = { } method = "9afLm3Mj" body = "wVVL2V6f" + disable_redirects = true tcp = "fjiLFqVd" h2ping = "5NbNWhan" h2ping_use_tls = false @@ -414,6 +418,7 @@ service = { } method = "T66MFBfR" body = "OwGjTFQi" + disable_redirects = true tcp = "bNnNfx2A" h2ping = "qC1pidiW" h2ping_use_tls = false @@ -439,6 +444,7 @@ service = { } method = "ciYHWors" body = "lUVLGYU7" + disable_redirects = false tcp = "FfvCwlqH" h2ping = "spI3muI3" h2ping_use_tls = false @@ -478,6 +484,7 @@ services = [ } method = "X5DrovFc" body = "WeikigLh" + disable_redirects = true tcp = "ICbxkpSF" h2ping = "7s7BbMyb" h2ping_use_tls = false @@ -520,6 +527,7 @@ services = [ } method = "5wkAxCUE" body = "7CRjCJyz" + disable_redirects = false tcp = "MN3oA9D2" h2ping = "OV6Q2XEg" h2ping_use_tls = false diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 1c92d6d02..f051b9d81 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -111,6 +111,7 @@ }, "method": "Dou0nGT5", "body": "5PBQd2OT", + "disable_redirects": true, "output_max_size": 4096, "tcp": "JY6fTTcw", "h2ping": "rQ8eyCSF", @@ -139,6 +140,7 @@ }, "method": "aldrIQ4l", "body": "wSjTy7dg", + "disable_redirects": true, "tcp": "RJQND605", "h2ping": "9N1cSb5B", "h2ping_use_tls": false, @@ -166,6 +168,7 @@ }, "method": "gLrztrNw", "body": "0jkKgGUC", + "disable_redirects": false, "tcp": "4jG5casb", "h2ping": "HCHU7gEb", "h2ping_use_tls": false, @@ -385,6 +388,7 @@ }, "method": "9afLm3Mj", "body": "wVVL2V6f", + "disable_redirects": true, "tcp": "fjiLFqVd", "h2ping": "5NbNWhan", "h2ping_use_tls": false, @@ -411,6 +415,7 @@ }, "method": "T66MFBfR", "body": "OwGjTFQi", + "disable_redirects": true, "tcp": "bNnNfx2A", "h2ping": "qC1pidiW", "h2ping_use_tls": false, @@ -436,6 +441,7 @@ }, "method": "ciYHWors", "body": "lUVLGYU7", + "disable_redirects": false, "tcp": "FfvCwlqH", "h2ping": "spI3muI3", "h2ping_use_tls": false, @@ -475,6 +481,7 @@ }, "method": "X5DrovFc", "body": "WeikigLh", + "disable_redirects": true, "tcp": "ICbxkpSF", "h2ping": "7s7BbMyb", "h2ping_use_tls": false, @@ -517,6 +524,7 @@ }, "method": "5wkAxCUE", "body": "7CRjCJyz", + "disable_redirects": false, "tcp": "MN3oA9D2", "h2ping": "OV6Q2XEg", "h2ping_use_tls": false, diff --git a/agent/structs/check_definition.go b/agent/structs/check_definition.go index 434f35e65..e650122fc 100644 --- a/agent/structs/check_definition.go +++ b/agent/structs/check_definition.go @@ -29,6 +29,7 @@ type CheckDefinition struct { Header map[string][]string Method string Body string + DisableRedirects bool TCP string Interval time.Duration DockerContainerID string @@ -71,6 +72,7 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) { GRPCUseTLSSnake bool `json:"grpc_use_tls"` ServiceIDSnake string `json:"service_id"` H2PingUseTLSSnake bool `json:"h2ping_use_tls"` + DisableRedirectsSnake bool `json:"disable_redirects"` *Alias }{ @@ -116,6 +118,9 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) { if t.ServiceID == "" { t.ServiceID = aux.ServiceIDSnake } + if aux.DisableRedirectsSnake { + t.DisableRedirects = aux.DisableRedirectsSnake + } if (aux.H2PING != "" && !aux.H2PingUseTLSSnake) || (aux.H2PING == "" && aux.H2PingUseTLSSnake) { t.H2PingUseTLS = aux.H2PingUseTLSSnake @@ -205,6 +210,7 @@ func (c *CheckDefinition) CheckType() *CheckType { Header: c.Header, Method: c.Method, Body: c.Body, + DisableRedirects: c.DisableRedirects, OutputMaxSize: c.OutputMaxSize, TCP: c.TCP, Interval: c.Interval, diff --git a/agent/structs/check_type.go b/agent/structs/check_type.go index 7f3b58370..0c89a00f2 100644 --- a/agent/structs/check_type.go +++ b/agent/structs/check_type.go @@ -37,6 +37,7 @@ type CheckType struct { Header map[string][]string Method string Body string + DisableRedirects bool TCP string Interval time.Duration AliasNode string diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 1a678f5c0..9b32f6cb9 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -6,8 +6,6 @@ import ( "crypto/sha256" "encoding/json" "fmt" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" "math/rand" "reflect" "regexp" @@ -16,6 +14,9 @@ import ( "strings" "time" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/proto" "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" @@ -1572,6 +1573,7 @@ type HealthCheckDefinition struct { Header map[string][]string `json:",omitempty"` Method string `json:",omitempty"` Body string `json:",omitempty"` + DisableRedirects bool `json:",omitempty"` TCP string `json:",omitempty"` H2PING string `json:",omitempty"` H2PingUseTLS bool `json:",omitempty"` @@ -1720,6 +1722,7 @@ func (c *HealthCheck) CheckType() *CheckType { Header: c.Definition.Header, Method: c.Definition.Method, Body: c.Definition.Body, + DisableRedirects: c.Definition.DisableRedirects, TCP: c.Definition.TCP, H2PING: c.Definition.H2PING, H2PingUseTLS: c.Definition.H2PingUseTLS, diff --git a/proto/pbservice/healthcheck.gen.go b/proto/pbservice/healthcheck.gen.go index 6bdb63b4b..a38fd30c2 100644 --- a/proto/pbservice/healthcheck.gen.go +++ b/proto/pbservice/healthcheck.gen.go @@ -19,6 +19,7 @@ func CheckTypeToStructs(s *CheckType, t *structs.CheckType) { t.Header = MapHeadersToStructs(s.Header) t.Method = s.Method t.Body = s.Body + t.DisableRedirects = s.DisableRedirects t.TCP = s.TCP t.Interval = structs.DurationFromProto(s.Interval) t.AliasNode = s.AliasNode @@ -54,6 +55,7 @@ func CheckTypeFromStructs(t *structs.CheckType, s *CheckType) { s.Header = NewMapHeadersFromStructs(t.Header) s.Method = t.Method s.Body = t.Body + s.DisableRedirects = t.DisableRedirects s.TCP = t.TCP s.Interval = structs.DurationToProto(t.Interval) s.AliasNode = t.AliasNode @@ -132,6 +134,7 @@ func HealthCheckDefinitionToStructs(s *HealthCheckDefinition, t *structs.HealthC t.Header = MapHeadersToStructs(s.Header) t.Method = s.Method t.Body = s.Body + t.DisableRedirects = s.DisableRedirects t.TCP = s.TCP t.H2PING = s.H2PING t.H2PingUseTLS = s.H2PingUseTLS @@ -158,6 +161,7 @@ func HealthCheckDefinitionFromStructs(t *structs.HealthCheckDefinition, s *Healt s.Header = NewMapHeadersFromStructs(t.Header) s.Method = t.Method s.Body = t.Body + s.DisableRedirects = t.DisableRedirects s.TCP = t.TCP s.H2PING = t.H2PING s.H2PingUseTLS = t.H2PingUseTLS diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index a0dbe715c..d28ed185c 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -268,10 +268,11 @@ type HealthCheckDefinition struct { TLSServerName string `protobuf:"bytes,19,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"` TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - Header map[string]*HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Method string `protobuf:"bytes,4,opt,name=Method,proto3" json:"Method,omitempty"` - Body string `protobuf:"bytes,18,opt,name=Body,proto3" json:"Body,omitempty"` - TCP string `protobuf:"bytes,5,opt,name=TCP,proto3" json:"TCP,omitempty"` + Header map[string]*HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Method string `protobuf:"bytes,4,opt,name=Method,proto3" json:"Method,omitempty"` + Body string `protobuf:"bytes,18,opt,name=Body,proto3" json:"Body,omitempty"` + DisableRedirects bool `protobuf:"varint,22,opt,name=DisableRedirects,proto3" json:"DisableRedirects,omitempty"` + TCP string `protobuf:"bytes,5,opt,name=TCP,proto3" json:"TCP,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto Interval *durationpb.Duration `protobuf:"bytes,6,opt,name=Interval,proto3" json:"Interval,omitempty"` // mog: func-to=uint func-from=uint32 @@ -367,6 +368,13 @@ func (x *HealthCheckDefinition) GetBody() string { return "" } +func (x *HealthCheckDefinition) GetDisableRedirects() bool { + if x != nil { + return x.DisableRedirects + } + return false +} + func (x *HealthCheckDefinition) GetTCP() string { if x != nil { return x.TCP @@ -497,10 +505,11 @@ type CheckType struct { ScriptArgs []string `protobuf:"bytes,5,rep,name=ScriptArgs,proto3" json:"ScriptArgs,omitempty"` HTTP string `protobuf:"bytes,6,opt,name=HTTP,proto3" json:"HTTP,omitempty"` // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs - Header map[string]*HeaderValue `protobuf:"bytes,20,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Method string `protobuf:"bytes,7,opt,name=Method,proto3" json:"Method,omitempty"` - Body string `protobuf:"bytes,26,opt,name=Body,proto3" json:"Body,omitempty"` - TCP string `protobuf:"bytes,8,opt,name=TCP,proto3" json:"TCP,omitempty"` + Header map[string]*HeaderValue `protobuf:"bytes,20,rep,name=Header,proto3" json:"Header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Method string `protobuf:"bytes,7,opt,name=Method,proto3" json:"Method,omitempty"` + Body string `protobuf:"bytes,26,opt,name=Body,proto3" json:"Body,omitempty"` + DisableRedirects bool `protobuf:"varint,31,opt,name=DisableRedirects,proto3" json:"DisableRedirects,omitempty"` + TCP string `protobuf:"bytes,8,opt,name=TCP,proto3" json:"TCP,omitempty"` // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto Interval *durationpb.Duration `protobuf:"bytes,9,opt,name=Interval,proto3" json:"Interval,omitempty"` AliasNode string `protobuf:"bytes,10,opt,name=AliasNode,proto3" json:"AliasNode,omitempty"` @@ -630,6 +639,13 @@ func (x *CheckType) GetBody() string { return "" } +func (x *CheckType) GetDisableRedirects() bool { + if x != nil { + return x.DisableRedirects + } + return false +} + func (x *CheckType) GetTCP() string { if x != nil { return x.TCP @@ -822,7 +838,7 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x86, 0x07, 0x0a, 0x15, 0x48, 0x65, + 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x07, 0x0a, 0x15, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, @@ -837,127 +853,132 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, - 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, + 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, + 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, + 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, + 0x65, 0x54, 0x4c, 0x53, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, + 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, + 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x1c, 0x0a, 0x09, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2b, + 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, 0x51, 0x0a, 0x0b, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, + 0x09, 0x0a, 0x09, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x38, 0x0a, 0x06, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, + 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, + 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, + 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, + 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, + 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, + 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, + 0x43, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, + 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, + 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, + 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, + 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, + 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, + 0x34, 0x0a, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, + 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, + 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, - 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, - 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, - 0x65, 0x6c, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, - 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, - 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, - 0x47, 0x52, 0x50, 0x43, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, - 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, - 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, - 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, - 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xa8, 0x09, 0x0a, 0x09, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, - 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x48, 0x54, 0x54, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, - 0x12, 0x38, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, - 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, - 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, - 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x1e, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, - 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, - 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, - 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, - 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, - 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, - 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, - 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, - 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, - 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, - 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, - 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, - 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, - 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, - 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, - 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x4f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, - 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, + 0x7a, 0x65, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbservice/healthcheck.proto b/proto/pbservice/healthcheck.proto index 6d709d874..67629ba98 100644 --- a/proto/pbservice/healthcheck.proto +++ b/proto/pbservice/healthcheck.proto @@ -62,6 +62,7 @@ message HealthCheckDefinition { map Header = 3; string Method = 4; string Body = 18; + bool DisableRedirects = 22; string TCP = 5; // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto google.protobuf.Duration Interval = 6; @@ -110,6 +111,7 @@ message CheckType { map Header = 20; string Method = 7; string Body = 26; + bool DisableRedirects = 31; string TCP = 8; // mog: func-to=structs.DurationFromProto func-from=structs.DurationToProto google.protobuf.Duration Interval = 9; diff --git a/website/content/api-docs/agent/check.mdx b/website/content/api-docs/agent/check.mdx index e0b69258f..e3f50f2c3 100644 --- a/website/content/api-docs/agent/check.mdx +++ b/website/content/api-docs/agent/check.mdx @@ -206,6 +206,9 @@ The table below shows this endpoint's support for - `Body` `(string: "")` - Specifies a body that should be sent with `HTTP` checks. +- `DisableRedirects` `(bool: false)` - Specifies whether to disable following HTTP + redirects when performing an HTTP check. + - `Header` `(map[string][]string: {})` - Specifies a set of headers that should be set for `HTTP` checks. Each header can have multiple values. @@ -273,6 +276,7 @@ The table below shows this endpoint's support for "Method": "POST", "Header": { "Content-Type": ["application/json"] }, "Body": "{\"check\":\"mem\"}", + "DisableRedirects": true, "TCP": "example.com:22", "Interval": "10s", "Timeout": "5s", diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index 61fec921a..64dc3de11 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -57,6 +57,7 @@ There are several different kinds of checks: fields can be set through the `header` field which is a map of lists of strings, e.g. `{"x-foo": ["bar", "baz"]}`. By default, HTTP checks will be configured with a request timeout equal to 10 seconds. + It is possible to configure a custom HTTP check timeout value by specifying the `timeout` field in the check definition. The output of the check is limited to roughly 4KB. Responses larger than this will be truncated. @@ -66,6 +67,9 @@ There are several different kinds of checks: automatically from the URL if it uses a hostname (as opposed to an IP address); the value can be overridden by setting `tls_server_name`. + Consul follows HTTP redirects by default. Set the `disable_redirects` field to + `true` to disable redirects. + - `TCP + Interval` - These checks make a TCP connection attempt to the specified IP/hostname and port, waiting `interval` amount of time between attempts (e.g. 30 seconds). If no hostname @@ -185,6 +189,7 @@ check = { Content-Type = ["application/json"] } body = "{\"method\":\"health\"}" + disable_redirects = true interval = "10s" timeout = "1s" } From d3c71d3e01cb89d87f782d10ae102d534b93268c Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Tue, 5 Apr 2022 10:48:26 -0700 Subject: [PATCH 093/785] Update branch list Signed-off-by: Mark Anderson --- .release/ci.hcl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.release/ci.hcl b/.release/ci.hcl index 746e7476c..8452ef3b2 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -11,10 +11,10 @@ project "consul" { repository = "consul" release_branches = [ "main", - "release/1.8.x", "release/1.9.x", "release/1.10.x", - "release/1.11.x" + "release/1.11.x", + "release/1.12.x", ] } } @@ -227,4 +227,4 @@ event "promote-production-packaging" { notification { on = "always" } -} \ No newline at end of file +} From 6bdde40d5e3949b407e1f2cf7655c53f898e3c88 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Wed, 6 Apr 2022 11:37:08 -0700 Subject: [PATCH 094/785] lower log to trace (#12708) --- agent/rpc/middleware/interceptors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index c7ac72f35..e57ab647d 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -54,7 +54,7 @@ func (r *RequestRecorder) Record(requestName string, rpcType string, start time. // math.MaxInt64 < math.MaxFloat32 is true so we should be good! r.recorderFunc(metricRPCRequest, float32(elapsed), labels) - r.Logger.Debug(requestLogName, + r.Logger.Trace(requestLogName, "method", requestName, "errored", respErrored, "request_type", reqType, From 0f68bf879a869f93861c48084732c2db3efc1482 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Wed, 6 Apr 2022 14:33:05 -0700 Subject: [PATCH 095/785] [rpc/middleware][consul] plumb intercept off, add server level happy test (#12692) --- agent/consul/client_test.go | 7 +- agent/consul/options.go | 8 + agent/consul/server.go | 48 +++-- agent/consul/server_test.go | 226 +++++++++++++++++++++- agent/rpc/middleware/interceptors.go | 6 +- agent/rpc/middleware/interceptors_test.go | 14 +- agent/setup.go | 4 + 7 files changed, 287 insertions(+), 26 deletions(-) diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 06ef80efe..d593f5aa9 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/consul/agent/grpc/private/resolver" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/sdk/freeport" @@ -542,8 +543,10 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { DialingFromServer: true, DialingFromDatacenter: c.Datacenter, }), - LeaderForwarder: builder, - EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), + LeaderForwarder: builder, + NewRequestRecorderFunc: middleware.NewRequestRecorder, + GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, + EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), } } diff --git a/agent/consul/options.go b/agent/consul/options.go index 3440b0245..e253864a5 100644 --- a/agent/consul/options.go +++ b/agent/consul/options.go @@ -1,11 +1,13 @@ package consul import ( + "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/tlsutil" ) @@ -18,6 +20,12 @@ type Deps struct { ConnPool *pool.ConnPool GRPCConnPool GRPCClientConner LeaderForwarder LeaderForwarder + // GetNetRPCInterceptorFunc, if not nil, sets the net/rpc rpc.ServerServiceCallInterceptor on + // the server side to record metrics around the RPC requests. If nil, no interceptor is added to + // the rpc server. + GetNetRPCInterceptorFunc func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor + // NewRequestRecorderFunc provides a middleware.RequestRecorder for the server to use; it cannot be nil + NewRequestRecorderFunc func(logger hclog.Logger) *middleware.RequestRecorder EnterpriseDeps } diff --git a/agent/consul/server.go b/agent/consul/server.go index c48204bb5..3ec3d61dd 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -379,24 +379,40 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve serverLogger := flat.Logger.NamedIntercept(logging.ConsulServer) loggers := newLoggerStore(serverLogger) - recorder := middleware.NewRequestRecorder(serverLogger) + var recorder *middleware.RequestRecorder + if flat.NewRequestRecorderFunc == nil { + return nil, fmt.Errorf("cannot initialize server without an RPC request recorder provider") + } + recorder = flat.NewRequestRecorderFunc(serverLogger) + if recorder == nil { + return nil, fmt.Errorf("cannot initialize server without a non nil RPC request recorder") + } + + var rpcServer, insecureRPCServer *rpc.Server + if flat.GetNetRPCInterceptorFunc == nil { + rpcServer = rpc.NewServer() + insecureRPCServer = rpc.NewServer() + } else { + rpcServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) + insecureRPCServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) + } + // Create server. s := &Server{ - config: config, - tokens: flat.Tokens, - connPool: flat.ConnPool, - grpcConnPool: flat.GRPCConnPool, - eventChLAN: make(chan serf.Event, serfEventChSize), - eventChWAN: make(chan serf.Event, serfEventChSize), - logger: serverLogger, - loggers: loggers, - leaveCh: make(chan struct{}), - reconcileCh: make(chan serf.Member, reconcileChSize), - router: flat.Router, - rpcRecorder: recorder, - // TODO(rpc-metrics-improv): consider pulling out the interceptor from config in order to isolate testing - rpcServer: rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(middleware.GetNetRPCInterceptor(recorder))), - insecureRPCServer: rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(middleware.GetNetRPCInterceptor(recorder))), + config: config, + tokens: flat.Tokens, + connPool: flat.ConnPool, + grpcConnPool: flat.GRPCConnPool, + eventChLAN: make(chan serf.Event, serfEventChSize), + eventChWAN: make(chan serf.Event, serfEventChSize), + logger: serverLogger, + loggers: loggers, + leaveCh: make(chan struct{}), + reconcileCh: make(chan serf.Member, reconcileChSize), + router: flat.Router, + rpcRecorder: recorder, + rpcServer: rpcServer, + insecureRPCServer: insecureRPCServer, tlsConfigurator: flat.TLSConfigurator, publicGRPCServer: publicGRPCServer, reassertLeaderCh: make(chan chan error), diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 6f953dd1c..5c06fb4d9 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -5,16 +5,20 @@ import ( "fmt" "net" "os" + "reflect" "strings" "sync/atomic" "testing" "time" + "github.com/armon/go-metrics" "github.com/google/tcpproxy" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/memberlist" "github.com/hashicorp/raft" "google.golang.org/grpc" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/go-uuid" @@ -254,6 +258,10 @@ func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToke } func newServer(t *testing.T, c *Config) (*Server, error) { + return newServerWithDeps(t, c, newDefaultDeps(t, c)) +} + +func newServerWithDeps(t *testing.T, c *Config, deps Deps) (*Server, error) { // chain server up notification oldNotify := c.NotifyListen up := make(chan struct{}) @@ -264,7 +272,8 @@ func newServer(t *testing.T, c *Config) (*Server, error) { } } - srv, err := NewServer(c, newDefaultDeps(t, c), grpc.NewServer()) + srv, err := NewServer(c, deps, grpc.NewServer()) + if err != nil { return nil, err } @@ -1130,6 +1139,221 @@ func TestServer_RPC(t *testing.T) { } } +// TestServer_RPC_MetricsIntercept_Off proves that we can turn off net/rpc interceptors all together. +func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + storage := make(map[string]float32) + keyMakingFunc := func(key []string, labels []metrics.Label) string { + allKey := strings.Join(key, "+") + + for _, label := range labels { + if label.Name == "method" { + allKey = allKey + "+" + label.Value + } + } + + return allKey + } + + simpleRecorderFunc := func(key []string, val float32, labels []metrics.Label) { + storage[keyMakingFunc(key, labels)] = val + } + + t.Run("test no net/rpc interceptor metric with nil func", func(t *testing.T) { + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + + // "disable" metrics net/rpc interceptor + deps.GetNetRPCInterceptorFunc = nil + // "hijack" the rpc recorder for asserts; + // note that there will be "internal" net/rpc calls made + // that will still show up; those don't go thru the net/rpc interceptor; + // see consul.agent.rpc.middleware.RPCTypeInternal for context + deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + return &middleware.RequestRecorder{ + Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), + RecorderFunc: simpleRecorderFunc, + } + } + + s1, err := NewServer(conf, deps, grpc.NewServer()) + if err != nil { + t.Fatalf("err: %v", err) + } + t.Cleanup(func() { s1.Shutdown() }) + + var out struct{} + if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil { + t.Fatalf("err: %v", err) + } + + key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, []metrics.Label{{Name: "method", Value: "Status.Ping"}}) + + if _, ok := storage[key]; ok { + t.Fatalf("Did not expect to find key %s in the metrics log, ", key) + } + }) + + t.Run("test no net/rpc interceptor metric with func that gives nil", func(t *testing.T) { + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + + // "hijack" the rpc recorder for asserts; + // note that there will be "internal" net/rpc calls made + // that will still show up; those don't go thru the net/rpc interceptor; + // see consul.agent.rpc.middleware.RPCTypeInternal for context + deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + return &middleware.RequestRecorder{ + Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), + RecorderFunc: simpleRecorderFunc, + } + } + + deps.GetNetRPCInterceptorFunc = func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor { + return nil + } + + s2, err := NewServer(conf, deps, grpc.NewServer()) + if err != nil { + t.Fatalf("err: %v", err) + } + t.Cleanup(func() { s2.Shutdown() }) + if err != nil { + t.Fatalf("err: %v", err) + } + + var out struct{} + if err := s2.RPC("Status.Ping", struct{}{}, &out); err != nil { + t.Fatalf("err: %v", err) + } + + key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, []metrics.Label{{Name: "method", Value: "Status.Ping"}}) + + if _, ok := storage[key]; ok { + t.Fatalf("Did not expect to find key %s in the metrics log, ", key) + } + }) +} + +// TestServer_RPC_RequestRecorder proves that we cannot make a server without a valid RequestRecorder provider func +// or a non nil RequestRecorder. +func TestServer_RPC_RequestRecorder(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Run("test nil func provider", func(t *testing.T) { + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + deps.NewRequestRecorderFunc = nil + + s1, err := NewServer(conf, deps, grpc.NewServer()) + + require.Error(t, err, "need err when provider func is nil") + require.Equal(t, err.Error(), "cannot initialize server without an RPC request recorder provider") + + t.Cleanup(func() { + if s1 != nil { + s1.Shutdown() + } + }) + }) + + t.Run("test nil RequestRecorder", func(t *testing.T) { + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + return nil + } + + s2, err := NewServer(conf, deps, grpc.NewServer()) + + require.Error(t, err, "need err when RequestRecorder is nil") + require.Equal(t, err.Error(), "cannot initialize server without a non nil RPC request recorder") + + t.Cleanup(func() { + if s2 != nil { + s2.Shutdown() + } + }) + }) +} + +// TestServer_RPC_MetricsIntercept mocks a request recorder and asserts that RPC calls are observed. +func TestServer_RPC_MetricsIntercept(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + + // The method used to record metric observations here is similar to that used in + // interceptors_test.go; at present, we don't have a need to lock yet but if we do + // we can imitate that set up further or just factor it out as a "mock" metrics backend + storage := make(map[string]float32) + keyMakingFunc := func(key []string, labels []metrics.Label) string { + allKey := strings.Join(key, "+") + + for _, label := range labels { + allKey = allKey + "+" + label.Value + } + + return allKey + } + + simpleRecorderFunc := func(key []string, val float32, labels []metrics.Label) { + storage[keyMakingFunc(key, labels)] = val + } + deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + return &middleware.RequestRecorder{ + Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), + RecorderFunc: simpleRecorderFunc, + } + } + + deps.GetNetRPCInterceptorFunc = func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor { + return func(reqServiceMethod string, argv, replyv reflect.Value, handler func() error) { + reqStart := time.Now() + + err := handler() + + recorder.Record(reqServiceMethod, "test", reqStart, argv.Interface(), err != nil) + } + } + + s, err := newServerWithDeps(t, conf, deps) + if err != nil { + t.Fatalf("err: %v", err) + } + defer s.Shutdown() + testrpc.WaitForTestAgent(t, s.RPC, "dc1") + + // asserts + t.Run("test happy path for metrics interceptor", func(t *testing.T) { + var out struct{} + if err := s.RPC("Status.Ping", struct{}{}, &out); err != nil { + t.Fatalf("err: %v", err) + } + + expectedLabels := []metrics.Label{ + {Name: "method", Value: "Status.Ping"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "read"}, + {Name: "rpc_type", Value: "test"}, + } + + key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, expectedLabels) + + if _, ok := storage[key]; !ok { + t.Fatalf("Did not find key %s in the metrics log, ", key) + } + }) +} + func TestServer_JoinLAN_TLS(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index e57ab647d..ba6747c3a 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -34,11 +34,11 @@ var OneTwelveRPCSummary = []prometheus.SummaryDefinition{ type RequestRecorder struct { Logger hclog.Logger - recorderFunc func(key []string, val float32, labels []metrics.Label) + RecorderFunc func(key []string, val float32, labels []metrics.Label) } func NewRequestRecorder(logger hclog.Logger) *RequestRecorder { - return &RequestRecorder{Logger: logger, recorderFunc: metrics.AddSampleWithLabels} + return &RequestRecorder{Logger: logger, RecorderFunc: metrics.AddSampleWithLabels} } func (r *RequestRecorder) Record(requestName string, rpcType string, start time.Time, request interface{}, respErrored bool) { @@ -53,7 +53,7 @@ func (r *RequestRecorder) Record(requestName string, rpcType string, start time. } // math.MaxInt64 < math.MaxFloat32 is true so we should be good! - r.recorderFunc(metricRPCRequest, float32(elapsed), labels) + r.RecorderFunc(metricRPCRequest, float32(elapsed), labels) r.Logger.Trace(requestLogName, "method", requestName, "errored", respErrored, diff --git a/agent/rpc/middleware/interceptors_test.go b/agent/rpc/middleware/interceptors_test.go index 23d764962..63fbefecb 100644 --- a/agent/rpc/middleware/interceptors_test.go +++ b/agent/rpc/middleware/interceptors_test.go @@ -18,7 +18,7 @@ type obs struct { labels []metrics.Label } -// recorderStore acts as an in-mem mock storage for all the RequestRecorder.Record() recorderFunc calls. +// recorderStore acts as an in-mem mock storage for all the RequestRecorder.Record() RecorderFunc calls. type recorderStore struct { lock sync.Mutex store map[string]obs @@ -59,9 +59,11 @@ func (wr writeRequest) IsRead() bool { // TestRequestRecorder_SimpleOK tests that the RequestRecorder can record a simple request. func TestRequestRecorder_SimpleOK(t *testing.T) { + t.Parallel() + r := RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - recorderFunc: simpleRecorderFunc, + RecorderFunc: simpleRecorderFunc, } start := time.Now() @@ -82,9 +84,11 @@ func TestRequestRecorder_SimpleOK(t *testing.T) { // TestRequestRecorder_ReadRequest tests that RequestRecorder can record a read request AND a responseErrored arg. func TestRequestRecorder_ReadRequest(t *testing.T) { + t.Parallel() + r := RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - recorderFunc: simpleRecorderFunc, + RecorderFunc: simpleRecorderFunc, } start := time.Now() @@ -104,9 +108,11 @@ func TestRequestRecorder_ReadRequest(t *testing.T) { // TestRequestRecorder_WriteRequest tests that RequestRecorder can record a write request. func TestRequestRecorder_WriteRequest(t *testing.T) { + t.Parallel() + r := RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - recorderFunc: simpleRecorderFunc, + RecorderFunc: simpleRecorderFunc, } start := time.Now() diff --git a/agent/setup.go b/agent/setup.go index 0799c472a..322f170b2 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/submatview" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/xds" @@ -151,6 +152,9 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) return d, err } + d.NewRequestRecorderFunc = middleware.NewRequestRecorder + d.GetNetRPCInterceptorFunc = middleware.GetNetRPCInterceptor + return d, nil } From 6bf3de8e52145319ccbffaa644b8e3b41e670170 Mon Sep 17 00:00:00 2001 From: John Murret Date: Wed, 6 Apr 2022 15:54:27 -0600 Subject: [PATCH 096/785] k8s docs - ACLs refactor - Updating terminating gateway documentation to call out updating the role rather than the token with the policy (#12612) * k8s docs - ACLs refactor - Updating terminating gateway documentation to call out updating the role rather than the token with the policy * Modifying role and policy names based on naming convention change. --- .../docs/k8s/connect/terminating-gateways.mdx | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index c4a90a923..1522035dd 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -138,16 +138,13 @@ $ curl --request PUT --header "X-Consul-Token: $CONSUL_HTTP_TOKEN" --data @exter true ``` -### Update terminating gateway ACL token if ACLs are enabled +### Update terminating gateway ACL role if ACLs are enabled -If ACLs are enabled, update the terminating gateway acl token to have `service: write` permissions on all of the services +If ACLs are enabled, update the terminating gateway acl role to have `service: write` permissions on all of the services being represented by the gateway: - Create a new policy that includes these permissions -- Update the existing token to include the new policy - -~> The CLI command should be run with the `-merge-policies`, `-merge-roles` and `-merge-service-identities` so -nothing is removed from the terminating gateway token +- Update the existing rolc to include the new policy @@ -174,28 +171,28 @@ service "example-https" { Now fetch the ID of the terminating gateway token ```shell-session -consul acl token list | grep -B 6 -- "- terminating-gateway-terminating-gateway-token" | grep AccessorID +consul acl role list | grep -B 6 -- "- RELEASE_NAME-terminating-gateway-policy" | grep ID -AccessorID: +ID: ``` Update the terminating gateway acl token with the new policy ```shell-session -$ consul acl token update -id -policy-name example-https-write-policy -merge-policies -merge-roles -merge-service-identities -AccessorID: +$ consul acl role update -id -policy-name example-https-write-policy +AccessorID: SecretID: -Description: terminating-gateway-terminating-gateway-token Token +Description: RELEASE_NAME-terminating-gateway-acl-role Local: true Create Time: 2021-01-08 21:18:47.957450486 +0000 UTC Policies: - 63bf1d9b-a87d-8672-ddcb-d25e2d88adb8 - terminating-gateway-terminating-gateway-token + 63bf1d9b-a87d-8672-ddcb-d25e2d88adb8 - RELEASE_NAME-terminating-gateway-policy f63d1ae6-ffe7-44bd-bf7a-704a86939a63 - example-https-write-policy ``` ### Create the configuration entry for the terminating gateway -Once the tokens have been updated, create the [TerminatingGateway](/docs/connect/config-entries/terminating-gateway) +Once the roles have been updated, create the [TerminatingGateway](/docs/connect/config-entries/terminating-gateway) resource to configure the terminating gateway: From 264a3ed39e4172c20941fe998287085e58601d27 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 7 Apr 2022 11:34:24 +0100 Subject: [PATCH 097/785] ui: Amend UI branching docs (#12705) --- ui/packages/consul-ui/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ui/packages/consul-ui/README.md b/ui/packages/consul-ui/README.md index 88e79cba4..c87923c83 100644 --- a/ui/packages/consul-ui/README.md +++ b/ui/packages/consul-ui/README.md @@ -109,7 +109,11 @@ See [./docs/index.mdx](./docs/index.mdx#environment-variables) ### Branching -Follow a `ui/**/**` branch naming pattern. This branch naming pattern allows front-end focused builds, such as FE tests, to run automatically in Pull Requests. It also adds the `theme/ui` label to Pull Requests. +We follow a `ui/**/**` branch naming pattern. This branch naming pattern allows +front-end focused builds, such as FE tests, to run automatically in Pull +Requests. Please note this only works if you are a member of the HashiCorp +GitHub Org. If you are an external contributor these tests won't run and will +instead be run by a member of our team during review. Examples: - `ui/feature/add...` From 9516b96d9272ace457178baa75e7ca4ed84d7b74 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 7 Apr 2022 11:35:49 +0100 Subject: [PATCH 098/785] ui: Ignore Service/Node permissions for Overview just use operator (#12693) --- ui/packages/consul-ui/app/abilities/overview.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/app/abilities/overview.js b/ui/packages/consul-ui/app/abilities/overview.js index 0b9ee6461..eba021772 100644 --- a/ui/packages/consul-ui/app/abilities/overview.js +++ b/ui/packages/consul-ui/app/abilities/overview.js @@ -1,8 +1,9 @@ import BaseAbility from './base'; export default class OverviewAbility extends BaseAbility { + resource = 'operator'; + segmented = false; get canAccess() { - return ['read services', 'read nodes', 'read license'] - .some(item => this.permissions.can(item)) + return this.canRead; } } From 34478800912b089cb2334e8b04784f6281196e15 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 7 Apr 2022 10:48:48 -0400 Subject: [PATCH 099/785] Enable running autopilot state updates on all servers (#12617) * Fixes a lint warning about t.Errorf not supporting %w * Enable running autopilot on all servers On the non-leader servers all they do is update the state and do not attempt any modifications. * Fix the RPC conn limiting tests Technically they were relying on racey behavior before. Now they should be reliable. --- .changelog/12617.txt | 9 ++++ agent/consul/autopilot.go | 47 ++++++++++++--------- agent/consul/leader.go | 6 +-- agent/consul/operator_autopilot_endpoint.go | 9 +--- agent/consul/rpc_test.go | 9 ++-- agent/consul/server.go | 4 ++ agent/metrics_test.go | 4 +- go.mod | 2 +- go.sum | 4 +- website/content/docs/agent/telemetry.mdx | 6 +-- 10 files changed, 56 insertions(+), 44 deletions(-) create mode 100644 .changelog/12617.txt diff --git a/.changelog/12617.txt b/.changelog/12617.txt new file mode 100644 index 000000000..25ae7f9ec --- /dev/null +++ b/.changelog/12617.txt @@ -0,0 +1,9 @@ +```release-note:improvement +autopilot: Autopilot state is now tracked on Raft followers in addition to the leader. +Stale queries may be used to query for the non-leaders state. +``` + +```release-note:improvement +autopilot: The `autopilot.healthy` and `autopilot.failure_tolerance` metrics are now +regularly emitted by all servers. +``` diff --git a/agent/consul/autopilot.go b/agent/consul/autopilot.go index 8d17e4948..27471b533 100644 --- a/agent/consul/autopilot.go +++ b/agent/consul/autopilot.go @@ -9,10 +9,10 @@ import ( "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" "github.com/hashicorp/serf/serf" - "math" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/types" ) @@ -33,7 +33,7 @@ type AutopilotDelegate struct { } func (d *AutopilotDelegate) AutopilotConfig() *autopilot.Config { - return d.server.getOrCreateAutopilotConfig().ToAutopilotLibraryConfig() + return d.server.getAutopilotConfigOrDefault().ToAutopilotLibraryConfig() } func (d *AutopilotDelegate) KnownServers() map[raft.ServerID]*autopilot.Server { @@ -45,23 +45,11 @@ func (d *AutopilotDelegate) FetchServerStats(ctx context.Context, servers map[ra } func (d *AutopilotDelegate) NotifyState(state *autopilot.State) { - // emit metrics if we are the leader regarding overall healthiness and the failure tolerance - if d.server.raft.State() == raft.Leader { - metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(state.FailureTolerance)) - if state.Healthy { - metrics.SetGauge([]string{"autopilot", "healthy"}, 1) - } else { - metrics.SetGauge([]string{"autopilot", "healthy"}, 0) - } + metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(state.FailureTolerance)) + if state.Healthy { + metrics.SetGauge([]string{"autopilot", "healthy"}, 1) } else { - - // if we are not a leader, emit NaN per - // https://www.consul.io/docs/agent/telemetry#autopilot - metrics.SetGauge([]string{"autopilot", "healthy"}, float32(math.NaN())) - - // also emit NaN for failure tolerance to be backwards compatible - metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(math.NaN())) - + metrics.SetGauge([]string{"autopilot", "healthy"}, 0) } } @@ -84,10 +72,8 @@ func (s *Server) initAutopilot(config *Config) { autopilot.WithReconcileInterval(config.AutopilotInterval), autopilot.WithUpdateInterval(config.ServerHealthInterval), autopilot.WithPromoter(s.autopilotPromoter()), + autopilot.WithReconciliationDisabled(), ) - - metrics.SetGauge([]string{"autopilot", "healthy"}, float32(math.NaN())) - metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(math.NaN())) } func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server { @@ -154,3 +140,22 @@ func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.S return server, nil } + +func (s *Server) getAutopilotConfigOrDefault() *structs.AutopilotConfig { + logger := s.loggers.Named(logging.Autopilot) + state := s.fsm.State() + _, config, err := state.AutopilotConfig() + if err != nil { + logger.Error("failed to get config", "error", err) + return nil + } + + if config != nil { + return config + } + + // autopilot may start running prior to there ever being a leader + // and having an autopilot configuration created. In that case + // use the one from the local configuration for now. + return s.config.AutopilotConfig +} diff --git a/agent/consul/leader.go b/agent/consul/leader.go index f40faed42..456fbec1e 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -297,7 +297,7 @@ func (s *Server) establishLeadership(ctx context.Context) error { } s.getOrCreateAutopilotConfig() - s.autopilot.Start(ctx) + s.autopilot.EnableReconciliation() s.startConfigReplication(ctx) @@ -350,9 +350,7 @@ func (s *Server) revokeLeadership() { s.resetConsistentReadReady() - // Stop returns a chan and we want to block until it is closed - // which indicates that autopilot is actually stopped. - <-s.autopilot.Stop() + s.autopilot.DisableReconciliation() } // initializeACLs is used to setup the ACLs if we are the leader diff --git a/agent/consul/operator_autopilot_endpoint.go b/agent/consul/operator_autopilot_endpoint.go index 0b3aee53f..babbb7956 100644 --- a/agent/consul/operator_autopilot_endpoint.go +++ b/agent/consul/operator_autopilot_endpoint.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + autopilot "github.com/hashicorp/raft-autopilot" "github.com/hashicorp/serf/serf" @@ -75,10 +76,6 @@ func (op *Operator) AutopilotSetConfiguration(args *structs.AutopilotSetConfigRe // ServerHealth is used to get the current health of the servers. func (op *Operator) ServerHealth(args *structs.DCSpecificRequest, reply *structs.AutopilotHealthReply) error { - // This must be sent to the leader, so we fix the args since we are - // re-using a structure where we don't support all the options. - args.RequireConsistent = true - args.AllowStale = false if done, err := op.srv.ForwardRPC("Operator.ServerHealth", args, reply); done { return err } @@ -143,10 +140,6 @@ func (op *Operator) ServerHealth(args *structs.DCSpecificRequest, reply *structs } func (op *Operator) AutopilotState(args *structs.DCSpecificRequest, reply *autopilot.State) error { - // This must be sent to the leader, so we fix the args since we are - // re-using a structure where we don't support all the options. - args.RequireConsistent = true - args.AllowStale = false if done, err := op.srv.ForwardRPC("Operator.AutopilotState", args, reply); done { return err } diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index 0e236eed5..5e1323a1e 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -817,7 +817,8 @@ func TestRPC_RPCMaxConnsPerClient(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.RPCMaxConnsPerClient = 2 + // we have to set this to 3 because autopilot is going to keep a connection open + c.RPCMaxConnsPerClient = 3 if tc.tlsEnabled { c.TLSConfig.InternalRPC.CAFile = "../../test/hostname/CertAuth.crt" c.TLSConfig.InternalRPC.CertFile = "../../test/hostname/Alice.crt" @@ -831,6 +832,8 @@ func TestRPC_RPCMaxConnsPerClient(t *testing.T) { defer os.RemoveAll(dir1) defer s1.Shutdown() + waitForLeaderEstablishment(t, s1) + // Connect to the server with bare TCP conn1 := connectClient(t, s1, tc.magicByte, tc.tlsEnabled, true, "conn1") defer conn1.Close() @@ -847,7 +850,7 @@ func TestRPC_RPCMaxConnsPerClient(t *testing.T) { addr := conn1.RemoteAddr() conn1.Close() retry.Run(t, func(r *retry.R) { - if n := s1.rpcConnLimiter.NumOpen(addr); n >= 2 { + if n := s1.rpcConnLimiter.NumOpen(addr); n >= 3 { r.Fatal("waiting for open conns to drop") } }) @@ -1736,7 +1739,7 @@ func rpcBlockingQueryTestHarness( return case err := <-errCh: if err != nil { - t.Errorf("[%d] unexpected error: %w", i, err) + t.Errorf("[%d] unexpected error: %v", i, err) return } } diff --git a/agent/consul/server.go b/agent/consul/server.go index 3ec3d61dd..a3effba97 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -674,6 +674,10 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve go s.listen(listener) } + // start autopilot - this must happen after the RPC listeners get setup + // or else it may block + s.autopilot.Start(&lib.StopChannelContext{StopCh: s.shutdownCh}) + // Start the metrics handlers. go s.updateMetrics() diff --git a/agent/metrics_test.go b/agent/metrics_test.go index 2aedc0180..448694e3e 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -250,8 +250,8 @@ func TestHTTPHandlers_AgentMetrics_ConsulAutopilot_Prometheus(t *testing.T) { respRec := httptest.NewRecorder() recordPromMetrics(t, a, respRec) - assertMetricExistsWithValue(t, respRec, "agent_2_autopilot_healthy", "NaN") - assertMetricExistsWithValue(t, respRec, "agent_2_autopilot_failure_tolerance", "NaN") + assertMetricExistsWithValue(t, respRec, "agent_2_autopilot_healthy", "1") + assertMetricExistsWithValue(t, respRec, "agent_2_autopilot_failure_tolerance", "0") }) } diff --git a/go.mod b/go.mod index e99a098ba..47b494c46 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 github.com/hashicorp/memberlist v0.3.1 github.com/hashicorp/raft v1.3.6 - github.com/hashicorp/raft-autopilot v0.1.5 + github.com/hashicorp/raft-autopilot v0.1.6 github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect github.com/hashicorp/raft-boltdb/v2 v2.2.2 github.com/hashicorp/serf v0.9.7 diff --git a/go.sum b/go.sum index bf61a6bf0..fb093ee1e 100644 --- a/go.sum +++ b/go.sum @@ -363,8 +363,8 @@ github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7 github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft-autopilot v0.1.5 h1:onEfMH5uHVdXQqtas36zXUHEZxLdsJVu/nXHLcLdL1I= -github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= +github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 h1:Ye8SofeDHJzu9xvvaMmpMkqHELWW7rTcXwdUR0CWW48= diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 4f4ef8983..7296ed208 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -94,7 +94,7 @@ These are some metrics emitted that can help you understand the health of your c | Metric Name | Description | Unit | Type | | :------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------- | :---- | -| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. All non-leader servers will report `NaN`. | health state | gauge | +| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | health state | gauge | **Why it's important:** Autopilot can expose the overall health of your cluster with a simple boolean. @@ -592,8 +592,8 @@ These metrics give insight into the health of the cluster as a whole. | `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter | | `consul.serf.events` | Increments when an agent processes an [event](/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter | | `consul.serf.msgs.sent` | This metric is sample of the number of bytes of messages broadcast to the cluster. In a given time interval, the sum of this metric is the total number of bytes sent and the count is the number of messages sent. | message bytes / interval | counter | -| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers | gauge | -| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. All non-leader servers will report `NaN`. | boolean | gauge | +| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers   | gauge | +| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | boolean   | gauge | | `consul.session_ttl.active` | Tracks the active number of sessions being tracked. | sessions | gauge | | `consul.catalog.service.query.` | Increments for each catalog query for the given service. | queries | counter | | `consul.catalog.service.query-tag..` | Increments for each catalog query for the given service with the given tag. | queries | counter | From a20eed261e18c4c7abec4576e1f508dbf465cea7 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Thu, 7 Apr 2022 11:25:10 -0500 Subject: [PATCH 100/785] ci: run envoy integration tests on a real machine (#12715) --- .circleci/config.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c39652d7b..df724af17 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -840,11 +840,10 @@ jobs: - run: *notify-slack-failure envoy-integration-test-1_17_4: &ENVOY_TESTS - docker: - # We only really need bash and docker-compose which is installed on all - # Circle images but pick Go since we have to pick one of them. - - image: *GOLANG_IMAGE - parallelism: 2 + machine: + image: ubuntu-2004:202101-01 + parallelism: 4 + resource_class: medium environment: ENVOY_VERSION: "1.17.4" steps: &ENVOY_INTEGRATION_TEST_STEPS @@ -852,7 +851,7 @@ jobs: # Get go binary from workspace - attach_workspace: at: . - - setup_remote_docker + - run: *install-gotestsum # Build the consul-dev image from the already built binary - run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile . - run: From 2f7a30b393aac30a7937a1ca725409628128a654 Mon Sep 17 00:00:00 2001 From: John Murret Date: Thu, 7 Apr 2022 12:16:24 -0600 Subject: [PATCH 101/785] Update k8s docs for Vault as a Secrets Backend (#12691) * Updating k8s Vault as a Secrets Backend docs * Moving files in data-integration folder * Updating routes to moved files * Removing known limitations since we have delivered them. * Revise overview page to point towards the System Integration and Data Integration pages. * Updating Systems Overview page * Making corrections to Overview and Systems Integration page * Updating Data Integration page * Gossip page * Enterprise Licensepage * Bootstrap Token * Replication Token * Revisions to bootrap, replication, and enterprise license * snapshot agent page. revisiions to other data integration pages * Consul Service Mesh TLS Provider page * ServerTLS page * Spelling, grammar errors * Update website/content/docs/k8s/installation/vault/index.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/index.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/gossip.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx Co-authored-by: David Yu * Updating data center to datacenter * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * interim changes * more formatting changes * adding additional formatting changes * more formatting on systems integration page * remove TODO * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: David Yu * Update website/content/docs/k8s/installation/vault/index.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/index.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Update website/content/docs/k8s/installation/vault/systems-integration.mdx Co-authored-by: Iryna Shustava * Adding partition token * removing dangling word * Adding missing navlink for partitions page * Adding VAULT_TOKEN documentation and a note to VAULT_ADDR about https and the possible need for the VAULT_CACERT. * Fixing broken links and ordering lists * Fixing broken links. Changing pre-requisites to prerequisites. Co-authored-by: David Yu Co-authored-by: Iryna Shustava --- package-lock.json | 5 +- .../data-integration/bootstrap-token.mdx | 102 +++++++++ .../{ => data-integration}/connect-ca.mdx | 32 ++- .../data-integration/enterprise-license.mdx | 115 ++++++++++ .../vault/{ => data-integration}/gossip.mdx | 35 ++- .../vault/data-integration/index.mdx | 160 ++++++++++++++ .../data-integration/partition-token.mdx | 101 +++++++++ .../data-integration/replication-token.mdx | 101 +++++++++ .../{ => data-integration}/server-tls.mdx | 69 +++--- .../snapshot-agent-config.mdx | 104 +++++++++ .../installation/vault/enterprise-license.mdx | 98 --------- .../docs/k8s/installation/vault/index.mdx | 129 +++-------- .../vault/systems-integration.mdx | 202 ++++++++++++++++++ website/data/docs-nav-data.json | 54 +++-- website/package-lock.json | 36 +++- 15 files changed, 1088 insertions(+), 255 deletions(-) create mode 100644 website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx rename website/content/docs/k8s/installation/vault/{ => data-integration}/connect-ca.mdx (64%) create mode 100644 website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx rename website/content/docs/k8s/installation/vault/{ => data-integration}/gossip.mdx (57%) create mode 100644 website/content/docs/k8s/installation/vault/data-integration/index.mdx create mode 100644 website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx create mode 100644 website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx rename website/content/docs/k8s/installation/vault/{ => data-integration}/server-tls.mdx (65%) create mode 100644 website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx delete mode 100644 website/content/docs/k8s/installation/vault/enterprise-license.mdx create mode 100644 website/content/docs/k8s/installation/vault/systems-integration.mdx diff --git a/package-lock.json b/package-lock.json index 48e341a09..29027cb00 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,3 +1,6 @@ { - "lockfileVersion": 1 + "name": "consul", + "lockfileVersion": 2, + "requires": true, + "packages": {} } diff --git a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx new file mode 100644 index 000000000..a3145d775 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx @@ -0,0 +1,102 @@ +--- +layout: docs +page_title: Storing the ACL Bootstrap Token in Vault +description: >- + Configuring the Consul Helm chart to use an ACL bootstrap token stored in Vault. +--- + +# Storing the ACL Bootstrap Token in Vault + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview +To use an ACL bootstrap token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the Secret in Vault + +First, store the ACL bootstrap token in Vault: + +```shell-session +$ vault kv put secret/consul/boostrap-token token="" +``` + +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.bootstrapToken.secretName` Helm value. + +Next, you will need to create a Vault policy that allows read access to this secret: + + + +```HCL +path "secret/data/consul/boostrap-token" { + capabilities = ["read"] +} +``` + + + +Apply the Vault policy by issuing the `vault policy write` CLI command: + +```shell-session +$ vault policy write boostrap-token-policy boostrap-token-policy.hcl +``` + +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create Kubernetes auth roles for the Consul `server-acl-init` container that runs as part of the Consul server statefulset: + +```shell-session +$ vault write auth/kubernetes/role/consul-server-acl-init \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=boostrap-token-policy \ + ttl=1h +``` + +To find out the service account name of the Consul server-acl-init job (i.e. the Consul server service account name), +you can run the following `helm template` command with your Consul on Kubernetes values file: + +```shell-session +$ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul +``` + +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart + +Now that you have configured Vault, you can configure the Consul Helm chart to +use the ACL bootrap token in Vault: + + + +```yaml +global: + secretsBackend: + vault: + enabled: true + manageSystemACLsRole: consul-server-acl-init + acls: + bootstrapToken: + secretName: secret/data/consul/bootstrap-token + secretKey: token +``` + + + +Note that `global.acls.bootstrapToken.secretName` is the path of the secret in Vault. +This should be the same path as the one you included in your Vault policy. +`global.acls.bootstrapToken.secretKey` is the key inside the secret data. This should be the same +as the key you passed when creating the ACL replication token secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/connect-ca.mdx b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx similarity index 64% rename from website/content/docs/k8s/installation/vault/connect-ca.mdx rename to website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx index bf0c707cf..ad7b9c863 100644 --- a/website/content/docs/k8s/installation/vault/connect-ca.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx @@ -11,14 +11,39 @@ description: >- Consul allows using Kubernetes auth methods to configure Connect CA. This allows for automatic token rotation once the renewal is no longer possible. -In order to create Vault auth roles for the Consul servers for this feature, ensure that the Vault Kubernetes auth method is enabled as described in [Vault Kubernetes Auth Method](/docs/k8s/installation/vault#vault-kubernetes-auth-method). -To configure [Vault as the provider](/docs/connect/ca/vault) for the Consul service certificates, +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + + +## Overview +To use an Vault as the Service Mesh Certificate Provider on Kubernetes, we will need to modify the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the secret in Vault + +This step is not valid to this use case as we are not storing any secrets for Service Mesh certificate, and we instead are Leveraging Vault CA as a provider to mint certificates on an ongoing basis. + +### Create a Vault policy that authorizes the desired level of access to the secret + +To configure [Vault as the provider](/docs/connect/ca/vault) for the Consul service mesh certificates, you will first need to decide on the type of policy that is suitable for you. To see the permissions that Consul would need in Vault, please see [Vault ACL policies](/docs/connect/ca/vault#vault-acl-policies) documentation. -Once you have a policy, you will need to link that policy to the Consul server service account. +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create Kubernetes auth roles for the Consul servers: ```shell-session $ vault write auth/kubernetes/role/consul-server \ @@ -35,6 +60,7 @@ you can run: $ helm template --release-name ${RELEASE_NAME} --show-only templates/server-serviceaccount.yaml hashicorp/consul ``` +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart Now you can configure the Consul Helm chart to use Vault as the Connect CA provider: diff --git a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx new file mode 100644 index 000000000..8f89e4f54 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx @@ -0,0 +1,115 @@ +--- +layout: docs +page_title: Storing the Enterprise License in Vault +description: >- + Configuring the Consul Helm chart to use an enterprise license stored in Vault. +--- + +# Storing the Enterprise License in Vault + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview +To use an enterprise license stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the Secret in Vault + +First, store the enterprise license in Vault: + +```shell-session +$ vault kv put secret/consul/license key="" +``` + +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `global.enterpriseLicense.secretName` Helm value. + +Next, you will need to create a policy that allows read access to this secret: + + + +```HCL +path "secret/data/consul/license" { + capabilities = ["read"] +} +``` + + + +Apply the Vault policy by issuing the `vault policy write` CLI command: + +```shell-session +$ vault policy write license-policy license-policy.hcl +``` + +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create Kubernetes auth roles for the Consul server and client: + +```shell-session +$ vault write auth/kubernetes/role/consul-server \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=license-policy \ + ttl=1h +``` + +```shell-session +$ vault write auth/kubernetes/role/consul-client \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=license-policy \ + ttl=1h +``` + +To find out the service account names of the Consul server and client, +you can run the following `helm template` commands with your Consul on Kubernetes values file: + +- Generate Consul server service account name + ```shell-session + $ helm template --release-name ${RELEASE_NAME} -s templates/server-serviceaccount.yaml hashicorp/consul + ``` + +- Generate Consul client service account name + ```shell-session + $ helm template --release-name ${RELEASE_NAME} -s templates/client-serviceaccount.yaml hashicorp/consul + ``` + +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +Now that you have configured Vault, you can configure the Consul Helm chart to +use the enterprise enterprise license in Vault: + + + +```yaml +global: + secretsBackend: + vault: + enabled: true + consulServerRole: consul-server + consulClientRole: consul-client + enterpriseLicense: + secretName: secret/data/consul/enterpriselicense + secretKey: key +``` + + + +Note that `global.enterpriseLicense.secretName` is the path of the secret in Vault. +This should be the same path as the one you included in your Vault policy. +`global.enterpriseLicense.secretKey` is the key inside the secret data. This should be the same +as the key you passed when creating the enterprise license secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/gossip.mdx b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx similarity index 57% rename from website/content/docs/k8s/installation/vault/gossip.mdx rename to website/content/docs/k8s/installation/vault/data-integration/gossip.mdx index 184bb2d2e..e0360cc3c 100644 --- a/website/content/docs/k8s/installation/vault/gossip.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx @@ -1,29 +1,41 @@ --- layout: docs -page_title: Storing Gossip Encryption Key in Vault +page_title: Storing the Gossip Encryption Key in Vault description: >- - Configuring the Consul Helm chart to use gossip encryption key stored in Vault. + Configuring the Consul Helm chart to use a gossip encryption key stored in Vault. --- # Storing Gossip Encryption Key in Vault -To use a gossip encryption key stored in Vault we need the following: +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -1. Generate and store an encryption key in Vault. -1. Create policies that will allow Consul client and server to access that key. -1. Create a Kubernetes auth roles that link policies from step 2 to Kubernetes service accounts of the Consul servers and clients. +## Overview +To use a gossip encryption key stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: -## Configuring Vault +### One time setup in Vault + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. +## One time setup in Vault +### Store the Secret in Vault First, generate and store the gossip key in Vault: ```shell-session $ vault kv put secret/consul/gossip key="$(consul keygen)" ``` +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `global.gossipEncryption.secretName` Helm value. Next, we will need to create a policy that allows read access to this secret: - ```HCL @@ -34,11 +46,14 @@ path "secret/data/consul/gossip" { +Apply the Vault policy by issuing the `vault policy write` CLI command: + ```shell-session $ vault policy write gossip-policy gossip-policy.hcl ``` -Prior to creating Vault auth roles for the Consul servers and clients, ensure that the Vault Kubernetes auth method is enabled as described in [Vault Kubernetes Auth Method](/docs/k8s/installation/vault#vault-kubernetes-auth-method). +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, we will create Kubernetes auth roles for the Consul server and client: @@ -71,7 +86,7 @@ you can run the following `helm template` commands with your Consul on Kubernete $ helm template --release-name ${RELEASE_NAME} -s templates/client-serviceaccount.yaml hashicorp/consul ``` -## Deploying the Consul Helm chart +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart Now that we've configured Vault, you can configure the Consul Helm chart to use the gossip key in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/index.mdx b/website/content/docs/k8s/installation/vault/data-integration/index.mdx new file mode 100644 index 000000000..e8dea2183 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/index.mdx @@ -0,0 +1,160 @@ +--- +layout: docs +page_title: Vault as the Secrets Backend Data Integration Overview +description: >- + Overview of the data integration aspects to using Vault as the secrets backend for Consul on Kubernetes. +--- + +# Vault as the Secrets Backend - Data Integration + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview + +### General Integration Steps + +Generally, for each secret you wish to store in Vault, the process to integrate the date between Vault and Consul on Kubernetes is: + +#### One time setup in Vault + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +#### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +### Example - Gossip Encryption Key Integration + +Following the general integraiton steps, a more detailed workflow for integration of the [Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) with the Vault Secrets backend would like the following: + + +#### One time setup in Vault + 1. Store the secret in Vault. + - Save the gossip encryption key in Vault at the path `secret/consul/gossip`. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + - Create a Vault policy that you name `gossip-policy` which allows `read` access to the path `secret/consul/gossip`. + +#### setup per Consul datacenter + + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + - Both Consul servers and Consul clients need access to the gossip encryption key, so you create two Vault Kubernetes: + - A role called `consul-server` that maps the Kubernetes namespace and service account name for your consul servers to the `gossip-policy` created in [step 2](#one-time-setup-in-vault) of One time setup in Vault. + - A role called `consul-client` that maps the Kubernetes namespace and service account name for your consul clients to the `gossip-policy` created in [step 2](#one-time-setup-in-vault) of One time setup in Vault.. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + - Configure the Vault Kubernetes auth roles created for the gossip encryption key: + - [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole) is set to the `consul-server` Vault Kubernetes auth role created previously. + - [`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole) is set to the `consul-client` Vault Kubernetes auth role created previously. + +## Secrets to Service Account Mapping + +At the most basic level, the goal of this configuration is to authorize a Consul on Kubernetes service account to access a secret in Vault. +Below is a mapping of Vault secrets and the Consul on Kubernetes service accounts that need to access them. +(NOTE: `Consul components` refers to all other services and jobs that are not Consul servers or clients. +It includes things like terminating gateways, ingress gateways, etc.) + +### Primary Datacenter +| Secret | Service Account For | Configurable Role in Consul k8s Helm | +| ------ | ------------------- | ------------------------------------ | +|[ACL Bootstrap token](/docs/k8s/installation/vault/data-integration/bootstrap-token) | Consul server-acl-init job | [`global.secretsBackend.vault.manageSystemACLsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-managesystemaclsrole)| +|[ACL Partition token](/docs/k8s/installation/vault/data-integration/partition-token) | Consul server-acl-init job | [`global.secretsBackend.vault.manageSystemACLsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-managesystemaclsrole)| +|[ACL Replication token](/docs/k8s/installation/vault/data-integration/replication-token) | Consul server-acl-init job | [`global.secretsBackend.vault.manageSystemACLsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-managesystemaclsrole)| +|[Enterprise license](/docs/k8s/installation/vault/data-integration/enterprise-license) | Consul servers
Consul clients | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)| +|[Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) | Consul servers
Consul clients | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)| +|[Snapshot Agent config](/docs/k8s/installation/vault/data-integration/snapshot-agent-config) | Consul snapshot agent | [`global.secretsBackend.vault.consulSnapshotAgentRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulsnapshotagentrole)| +|[Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls) | Consul servers
Consul clients
Consul components | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)
[`global.secretsBackend.vault.consulCARole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulcarole)| +|[Service Mesh and Consul client TLS credentials](/docs/k8s/installation/vault/data-integration/connect-ca) | Consul servers | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)| + +### Secondary Datacenters +The mapping for secondary data centers is similar with the following differences: +- There is no use of bootstrap token because ACLs would have been bootstrapped in the primary datacenter. +- ACL Partition token is mapped to both the `server-acl-init` job and the `partition-init` job service accounts. +- ACL Replication token is mapped to both the `server-acl-init` job and Consul service accounts. + +| Secret | Service Account For | Configurable Role in Consul k8s Helm | +| ------ | ------------------- | ------------------------------------ | +|[ACL Partition token](/docs/k8s/installation/vault/data-integration/partition-token) | Consul server-acl-init job
Consul partition-init job | [`global.secretsBackend.vault.manageSystemACLsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-managesystemaclsrole)
[`global.secretsBackend.vault.adminPartitionsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-adminpartitionsrole)| +|[ACL Replication token](/docs/k8s/installation/vault/data-integration/replication-token) | Consul server-acl-init job
Consul servers | [`global.secretsBackend.vault.manageSystemACLsRole`](/docs/k8s/helm#v-global-secretsbackend-vault-managesystemaclsrole)
[`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)| +|[Enterprise license](/docs/k8s/installation/vault/data-integration/enterprise-license) | Consul servers
Consul clients | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)| +|[Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) | Consul servers
Consul clients | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)| +|[Snapshot Agent config](/docs/k8s/installation/vault/data-integration/snapshot-agent-config) | Consul snapshot agent | [`global.secretsBackend.vault.consulSnapshotAgentRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulsnapshotagentrole)| +|[Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls) | Consul servers
Consul clients
Consul components | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)
[`global.secretsBackend.vault.consulClientRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulclientrole)
[`global.secretsBackend.vault.consulCARole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulcarole)| +|[Service Mesh and Consul client TLS credentials](/docs/k8s/installation/vault/data-integration/connect-ca) | Consul servers | [`global.secretsBackend.vault.consulServerRole`](/docs/k8s/helm#v-global-secretsbackend-vault-consulserverrole)| + +### Combining policies within roles +As you can see in the table above, depending upon your needs, a Consul on Kubernetes service account could have the need to request more than one secret. In these cases, you will want to create one role for the Consul on Kubernetes service account that is mapped to multiple policies, each of which allows it access to a given secret. + +For example, if your Consul on Kubernetes servers need access to [Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip), [Consul Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls), and [Enterprise license](/docs/k8s/installation/vault/data-integration/enterprise-license), assuming you have already saved the secrets in vault, you would: +1. Create a policy for each secret. + 1. Gossip encryption key + + + + ```HCL + path "secret/data/consul/gossip" { + capabilities = ["read"] + } + ``` + + + + ```shell-session + $ vault policy write gossip-policy gossip-policy.hcl + ``` + + 1. Consul Server TLS credentials + + + + ```HCL + path "pki/cert/ca" { + capabilities = ["read"] + } + ``` + + + + ```shell-session + $ vault policy write ca-policy ca-policy.hcl + ``` + + 1. Enterprise License + + + + ```HCL + path "secret/data/consul/license" { + capabilities = ["read"] + } + ``` + + + + ```shell-session + $ vault policy write license-policy license-policy.hcl + ``` + +1. Create one role that maps the Consul on Kubernetes servicea account to the 3 policies. + ```shell-session + $ vault write auth/kubernetes/role/consul-server \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=gossip-policy,ca-policy,license-policy \ + ttl=1h + ``` + +## Detailed data integration guides +The following secrets can be stored in Vault KV secrets engine, which is meant to handle arbitraty secrets: +- [ACL Bootstrap token](/docs/k8s/installation/vault/data-integration/bootstrap-token) +- [ACL Partition token](/docs/k8s/installation/vault/data-integration/partition-token) +- [ACL Replication token](/docs/k8s/installation/vault/data-integration/replication-token) +- [Enterprise license](/docs/k8s/installation/vault/data-integration/enterprise-license) +- [Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) +- [Snapshot Agent config](/docs/k8s/installation/vault/data-integration/snapshot-agent-config) + +The following TLS certificates and keys can generated and managed by Vault the Vault PKI Engine, which is meant to handle things like certificate expiration and rotation: +- [Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls) +- [Service Mesh and Consul client TLS credentials](/docs/k8s/installation/vault/data-integration/connect-ca) + +## Secrets to Service Account Mapping +Read through the [detailed data integration guides](#detailed-data-integration-guides) that are pertinent to your environment. diff --git a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx new file mode 100644 index 000000000..0d0f9bb84 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx @@ -0,0 +1,101 @@ +--- +layout: docs +page_title: Storing the ACL Partition Token in Vault +description: >- + Configuring the Consul Helm chart to use an ACL partition token stored in Vault. +--- + +# Storing the ACL Partition Token in Vault + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview +To use an ACL partition token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the Secret in Vault + +First, store the ACL partition token in Vault: + +```shell-session +$ vault kv put secret/consul/partition-token token="" +``` + +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.partitionToken.secretName` Helm value. + +Next, you will need to create a policy that allows read access to this secret: + + + +```HCL +path "secret/data/consul/partition-token" { + capabilities = ["read"] +} +``` + + + +Apply the Vault policy by issuing the `vault policy write` CLI command: + +```shell-session +$ vault policy write partition-token-policy partition-token-policy.hcl +``` + +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: + +```shell-session +$ vault write auth/kubernetes/role/consul-server-acl-init \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=partition-token-policy \ + ttl=1h +``` + +To find out the service account name of the Consul server, +you can run the following `helm template` command with your Consul on Kubernetes values file: + +```shell-session +$ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul +``` + +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart + +Now that you have configured Vault, you can configure the Consul Helm chart to +use the ACL partition token key in Vault: + + + +```yaml +global: + secretsBackend: + vault: + enabled: true + manageSystemACLsRole: consul-server-acl-init + acls: + partitionToken: + secretName: secret/data/consul/partition-token + secretKey: token +``` + + + +Note that `global.acls.partitionToken.secretName` is the path of the secret in Vault. +This should be the same path as the one you included in your Vault policy. +`global.acls.partitionToken.secretKey` is the key inside the secret data. This should be the same +as the key you passed when creating the ACL partition token secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx new file mode 100644 index 000000000..74b854748 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx @@ -0,0 +1,101 @@ +--- +layout: docs +page_title: Storing the ACL Replication Token in Vault +description: >- + Configuring the Consul Helm chart to use an ACL replication token stored in Vault. +--- + +# Storing the ACL Replication Token in Vault + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview +To use an ACL replication token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the Secret in Vault + +First, store the ACL replication token in Vault: + +```shell-session +$ vault kv put secret/consul/replication-token token="" +``` + +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.replicationToken.secretName` Helm value. + +Next, you will need to create a policy that allows read access to this secret: + + + +```HCL +path "secret/data/consul/replication-token" { + capabilities = ["read"] +} +``` + + + +Apply the Vault policy by issuing the `vault policy write` CLI command: + +```shell-session +$ vault policy write replication-token-policy replication-token-policy.hcl +``` + +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: + +```shell-session +$ vault write auth/kubernetes/role/consul-server-acl-init \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=replication-token-policy \ + ttl=1h +``` + +To find out the service account name of the Consul server, +you can run the following `helm template` command with your Consul on Kubernetes values file: + +```shell-session +$ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul +``` + +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart + +Now that you have configured Vault, you can configure the Consul Helm chart to +use the ACL replication token key in Vault: + + + +```yaml +global: + secretsBackend: + vault: + enabled: true + manageSystemACLsRole: consul-server-acl-init + acls: + replicationToken: + secretName: secret/data/consul/replication-token + secretKey: token +``` + + + +Note that `global.acls.replicationToken.secretName` is the path of the secret in Vault. +This should be the same path as the one you included in your Vault policy. +`global.acls.replicationToken.secretKey` is the key inside the secret data. This should be the same +as the key you passed when creating the ACL replication token secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/server-tls.mdx b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx similarity index 65% rename from website/content/docs/k8s/installation/vault/server-tls.mdx rename to website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx index d669d97b9..35b187f00 100644 --- a/website/content/docs/k8s/installation/vault/server-tls.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx @@ -1,18 +1,16 @@ --- layout: docs -page_title: Storing Server TLS certificates in Vault +page_title: Vault as the Server TLS Certificate Provider on Kubernetes description: >- Configuring the Consul Helm chart to use TLS certificates issued by Vault for the Consul server. --- -# Storing Server TLS certificates in Vault - -To use Vault to issue Server TLS certificates the following will be needed: - -1. Bootstrap the Vault PKI engine and bootstrap it with any configuration required for your infrastructure. -1. Create Vault Policies that will allow the Consul server to access the certificate issuing url. -1. Create Vault Policies that will allow the Consul components, e.g. ingress gateways, controller, to access the CA url. -1. Create Kubernetes auth roles that link these policies to the Kubernetes service accounts of the Consul components. +# Vault as the Server TLS Certificate Provider on Kubernetes +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +3. Complete the [Bootstrapping the PKI Engine](#bootstrapping-the-pki-engine) section. ### Bootstrapping the PKI Engine @@ -32,7 +30,9 @@ which also uses an intermediate signing authority. $ vault secrets tune -max-lease-ttl=87600h pki ``` -* Generate the root CA +* Generate the root CA: + + -> **Note:** The `common_name` value is comprised of combining `global.datacenter` dot `global.domain`. ```shell-session $ vault write -field=certificate pki/root/generate/internal \ @@ -40,9 +40,32 @@ which also uses an intermediate signing authority. ttl=87600h ``` - -> **Note:** Where `common_name` is comprised of combining `global.datacenter` dot `global.domain`. -### Create Vault Policies for the Server TLS Certificates +## Overview +To use an Vault as the Server TLS Certificate Provider on Kubernetes, we will need to modify the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + 1. (Added) Create a Vault PKI role that establishes the domains that it is allowed to issue certificates for + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the secret in Vault + +This step is not valid to this use case because we are not storing a single secret. We are configuring Vault as a provider to mint certificates on an ongaing basis. + +### Create a Vault policy that authorizes the desired level of access to the secret +To use Vault to issue Server TLS certificates, you will need to create the following: + +1. Vault Policies that will allow the Consul server to access the certificate issuing url. +1. Vault Policies that will allow the Consul components, e.g. ingress gateways, controller, to access the CA url. + +#### Create Vault Policies for the Server TLS Certificates + +-> **Note:** The PKI secret path referenced by the Vault Policy below will be your `server.serverCert.secretName` Helm value. Next we will create a policy that allows `["create", "update"]` access to the [certificate issuing URL](https://www.vaultproject.io/api/secret/pki#generate-certificate) so the Consul servers can @@ -58,13 +81,13 @@ path "pki/issue/consul-server" {
+Apply the Vault policy by issuing the `vault policy write` CLI command: + ```shell-session $ vault policy write consul-server consul-server-policy.hcl ``` --> **Note:** The PKI secret path referenced by the above Policy will be your `server.serverCert.secretName` Helm value. - -### Create Vault Policies for the CA URL +#### Create Vault Policies for the CA URL Next, we will create a policy that allows `["read"]` access to the [CA URL](https://www.vaultproject.io/api/secret/pki#read-certificate), this is required for the Consul components to communicate with the Consul servers in order to fetch their auto-encryption certificates. @@ -85,7 +108,8 @@ $ vault policy write ca-policy ca-policy.hcl -> **Note:** The PKI secret path referenced by the above Policy will be your `global.tls.caCert.secretName` Helm value. -### Create Vault Roles for the PKI engine, Consul servers and components +## setup per Consul datacenter +### Create a Vault PKI role that establishes the domains that it is allowed to issue certificates for Next, a Vault role for the PKI engine will set the default certificate issuance parameters: @@ -114,9 +138,8 @@ export DATACENTER=dc1 echo allowed_domains=\"$DATACENTER.consul, $NAME-server, $NAME-server.$NAMESPACE, $NAME-server.$NAMESPACE.svc\" ``` -Prior to creating Vault auth roles for the Consul server and the Consul components, ensure that the Vault Kubernetes auth method is enabled as described in [Vault Kubernetes Auth Method](/docs/k8s/installation/vault#vault-kubernetes-auth-method). - -Finally, three Kubernetes auth roles need to be created, one for the Consul servers, one for the Consul clients, and one for the Consul components. +### Create a Vault auth roles that link the policy to each Consul on Kubernetes service account that requires access +Finally, three Kubernetes auth roles need to be created, one for the Consul servers, one for the Consul clients, and one for Consul components. Role for Consul servers: ```shell-session @@ -134,9 +157,6 @@ you can run: $ helm template --release-name ${RELEASE_NAME} --show-only templates/server-serviceaccount.yaml hashicorp/consul ``` --> **Note:** Should you enable other supported features such as gossip-encryption be sure to append additional policies to -the Kube auth role in a comma separated value e.g. `policies=consul-server,consul-gossip` - Role for Consul clients: ```shell-session @@ -152,9 +172,6 @@ To find out the service account name of the Consul client, use the command below $ helm template --release-name ${RELEASE_NAME} --show-only templates/client-serviceaccount.yaml hashicorp/consul ``` --> **Note:** Should you enable other supported features such as gossip-encryption, ensure you append additional policies to -the Kube auth role in a comma separated value e.g. `policies=ca-policy,consul-gossip` - Role for CA components: ```shell-session $ vault write auth/kubernetes/role/consul-ca \ @@ -167,7 +184,7 @@ $ vault write auth/kubernetes/role/consul-ca \ The above Vault Roles will now be your Helm values for `global.secretsBackend.vault.consulServerRole` and `global.secretsBackend.vault.consulCARole` respectively. -## Deploying the Consul Helm chart +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart Now that we've configured Vault, you can configure the Consul Helm chart to use the Server TLS certificates from Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx new file mode 100644 index 000000000..fee102dcb --- /dev/null +++ b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx @@ -0,0 +1,104 @@ +--- +layout: docs +page_title: Storing the Snapshot Agent Config in Vault +description: >- + Configuring the Consul Helm chart to use a snapshot agent config stored in Vault. +--- + +# Storing the Snapshot Agent Config in Vault + +## Prerequisites +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). +2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). + +## Overview +To use an ACL replication token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: + +### One time setup in Vault + + 1. Store the secret in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secret. + +### setup per Consul datacenter + + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +## One time setup in Vault +### Store the Secret in Vault + +First, store the snapshot agent config in Vault: + +```shell-session +$ vault kv put secret/consul/snapshot-agent-config key="" +``` + +### Create a Vault policy that authorizes the desired level of access to the secret + +-> **Note:** The secret path referenced by the Vault Policy below will be your `client.snapshotAgent.configSecret.secretName` Helm value. + +Next, you will need to create a policy that allows read access to this secret: + + + +```HCL +path "secret/data/consul/snapshot-agent-config" { + capabilities = ["read"] +} +``` + + + +Apply the Vault policy by issuing the `vault policy write` CLI command: + +```shell-session +$ vault policy write snapshot-agent-config-policy snapshot-agent-config-policy.hcl +``` + +## setup per Consul datacenter +### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access + +Next, you will create a Kubernetes auth role for the Consul snapshot agent: + +```shell-session +$ vault write auth/kubernetes/role/consul-server \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=snapshot-agent-config-policy \ + ttl=1h +``` + +To find out the service account name of the Consul snapshot agent, +you can run the following `helm template` command with your Consul on Kubernetes values file: + +```shell-session +$ helm template --release-name ${RELEASE_NAME} -s templates/client-snapshot-agent-serviceaccount.yaml hashicorp/consul +``` + +### Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart + +Now that you have configured Vault, you can configure the Consul Helm chart to +use the snapshot agent config in Vault: + + + +```yaml +global: + secretsBackend: + vault: + enabled: true + consulSnapshotAgentRole: snapshot-agent +client: + snapshotAgent: + configSecret: + secretName: secret/data/consul/snapshot-agent-config + secretKey: key +``` + + + +Note that `client.snapshotAgent.configSecret.secretName` is the path of the secret in Vault. +This should be the same path as the one you included in your Vault policy. +`client.snapshotAgent.configSecret.secretKey` is the key inside the secret data. This should be the same +as the key you passed when creating the snapshot agent config secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/enterprise-license.mdx b/website/content/docs/k8s/installation/vault/enterprise-license.mdx deleted file mode 100644 index 8cdd9f233..000000000 --- a/website/content/docs/k8s/installation/vault/enterprise-license.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -layout: docs -page_title: Storing Enterprise License in Vault -description: >- - Configuring the Consul Helm chart to use enterprise license stored in Vault. ---- - -# Storing the Enterprise License in Vault - -To use an enterprise license stored in Vault, the steps will be similar to [Storing Gossip Encryption Key in Vault](/docs/k8s/installation/vault/gossip). You need to do the following: - -1. Store an enterprise license key in Vault's KV2 secrets engine. -1. Create Vault Policies that allow read access to the key. -1. Create a Vault Kubernetes Auth Role that links policies from step 2 to the Kubernetes service accounts of the Consul servers and clients. - -## Configuring Vault - -First, store the license key in Vault: - -```shell-session -$ vault kv put secret/consul/enterpriselicense key="" -``` - -Next, you will need to create a policy that allows read access to this secret: - - - - -```HCL -path "secret/data/consul/enterpriselicense" { - capabilities = ["read"] -} -``` - - - -```shell-session -$ vault policy write enterpriselicense-policy enterpriselicense-policy.hcl -``` - -Prior to creating Vault auth roles for the Consul servers and clients, ensure that the Vault Kubernetes auth method is enabled as described in [Vault Kubernetes Auth Method](/docs/k8s/installation/vault#vault-kubernetes-auth-method). - -Next, you will create Kubernetes auth roles for the Consul server and client: - -```shell-session -$ vault write auth/kubernetes/role/consul-server \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=enterpriselicense-policy \ - ttl=1h -``` - -```shell-session -$ vault write auth/kubernetes/role/consul-client \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=enterpriselicense-policy \ - ttl=1h -``` - -To find out the service account names of the Consul server and client, -you can run the following `helm template` commands with your Consul on Kubernetes values file: - -- Generate Consul server service account name - ```shell-session - $ helm template --release-name ${RELEASE_NAME} -s templates/server-serviceaccount.yaml hashicorp/consul - ``` - -- Generate Consul client service account name - ```shell-session - $ helm template --release-name ${RELEASE_NAME} -s templates/client-serviceaccount.yaml hashicorp/consul - ``` - -## Deploying the Consul Helm chart - -Now that you have configured Vault, you can configure the Consul Helm chart to -use the enterprise license key in Vault: - - - -```yaml -global: - secretsBackend: - vault: - enabled: true - consulServerRole: consul-server - consulClientRole: consul-client - enterpriseLicense: - secretName: secret/data/consul/enterpriselicense - secretKey: key -``` - - - -Note that `global.enterpriseLicense.secretName` is the path of the secret in Vault. -This should be the same path as the one you included in your Vault policy. -`global.enterpriseLicense.secretKey` is the key inside the secret data. This should be the same -as the key you passed when creating the enterprise license secret in Vault. diff --git a/website/content/docs/k8s/installation/vault/index.mdx b/website/content/docs/k8s/installation/vault/index.mdx index 84dd345fc..bd51f43b7 100644 --- a/website/content/docs/k8s/installation/vault/index.mdx +++ b/website/content/docs/k8s/installation/vault/index.mdx @@ -1,115 +1,54 @@ --- layout: docs -page_title: Vault as Secrets Backend Overview +page_title: Vault as the Secrets Backend Overview description: >- - Using Vault as secrets backend for Consul on Kubernetes. + Using Vault as the secrets backend for Consul on Kubernetes. --- -# Vault as Secrets Backend Overview +# Vault as the Secrets Backend Overview By default, Consul Helm chart will expect that any credentials it needs are stored as Kubernetes secrets. As of Consul 1.11 and Consul Helm chart v0.38.0, we integrate more natively with Vault making it easier to use Consul Helm chart with Vault as the secrets storage backend. -At a high level, there are two points of integration with Vault: -- **Gossip encryption** - The encryption key for gossip communication is stored in Vault. -- **TLS certificates and keys**: - - **Consul Server TLS credentials** - TLS certificate and key for the Consul server is stored in Vault and issued from Vault. - - **Service Mesh and Consul client TLS credentials** - Consul uses Vault as the provider for mTLS certificates and keys for the service mesh services - and TLS certificates and keys for the Consul clients. +## Secrets Overview + +By default, Consul on Kubernetes leverages Kubernetes secrets which are base64 encoded and unencrypted. In addition, the following limitations exist with mangaging sensitive data within Kubernetes secrets: + +- There are no lease or time-to-live properties associated with these secrets. +- Kubernetes can only manage resources, such as secrets, within a cluster boundary. If you have sets of clusters, the resources across them need to be managed separately. + +By leveraging Vault as a secrets backend for Consul on Kubernetes, you can now manage and store Consul related secrets within a centralized Vault cluster to use across one or many Consul on Kubernetes datacenters. + +### Secrets stored in the Vault KV Secrets Engine + +The following secrets can be stored in Vault KV secrets engine, which is meant to handle arbitrary secrets: +- ACL Bootstrap token +- ACL Partition token +- ACL Replication token +- Enterprise license +- Gossip encryption key +- Snapshot Agent config + + +### Secrets generated and managed by the Vault PKI Engine + +The following TLS certificates and keys can be generated and managed by the Vault PKI Engine, which is meant to handle things like certificate expiration and rotation: +- Server TLS credentials +- Service Mesh and Consul client TLS credentials ## Requirements 1. Vault 1.9+ and Vault-k8s 0.14+ is required. 1. Vault must be installed and accessible to the Consul on Kubernetes installation. 1. `global.tls.enableAutoencrypt=true` is required if TLS is enabled for the Consul installation when using the Vault secrets backend. -1. The Vault installation must have been initialized, unsealed and the KV2 and PKI secrets engines enabled and the Kubernetes Auth Method enabled. - -### Vault Helm Config - -A minimal valid installation of Vault Kubernetes must include the Agent Injector which is utilized for accessing secrets from Vault. Vault servers could be deployed -external to Vault on Kubernetes as described via the [`externalvaultaddr`](https://www.vaultproject.io/docs/platform/k8s/helm/configuration#externalvaultaddr) value in the Vault Helm Configuration - - - -```yaml -injector: - enabled: "true" -``` - - - -### Vault Kubernetes Auth Method - -Prior to creating Vault auth roles for the Consul servers and clients, ensure that the Vault Kubernetes auth method is enabled: - -```shell-session -$ vault auth enable kubernetes -``` - -After enabling the Kubernetes auth method, in Vault, ensure that you have configured the Kubernetes Auth method properly as described in [Kubernetes Auth Method Configuration](https://www.vaultproject.io/docs/auth/kubernetes#configuration). The command should look similar to the following with a custom `kubernetes_host` config provided from the information provided via `kubectl cluster-info`. - -```shell-session -$ vault write auth/kubernetes/config \ - token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ - kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" \ - kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt -``` - -### Vault KV Secrets Engine - Version 2 - -In order to utilize Vault as a secrets backend, we must enable the [Vault KV secrets engine - Version 2](https://www.vaultproject.io/docs/secrets/kv/kv-v2). - -```shell-session -$ vault secrets enable -path=consul kv-v2 -``` - -### Vault PKI Engine - -The Vault PKI Engine must be enabled in order to leverage Vault for issuing Consul Server TLS certificates. More details for configuring the PKI Engine is found in [Bootstrapping the PKI Engine](/docs/k8s/installation/vault/server-tls#bootstrapping-the-pki-engine) under the Server TLS section. - -```shell-session -$ vault secrets enable pki -``` - -## Known Limitations - -- TLS - - Mesh gateway is not currently supported. -- Multi-DC Federation is not currently supported. -- Certificate rotation for Server TLS certs is not currently supported through the Helm chart. Ensure the TTL for your Server TLS certificates are sufficiently long. Should your certificates expire it will be necessary to issue a `consul reload` on each server after issuing new Server TLS certs from Vault. -- CA rotation is not currently supported through the Helm chart and must be manually rotated. - +1. The Vault installation must have been initialized, unsealed and the KV2 and PKI secrets engines and the Kubernetes Auth Method enabled. ## Next Steps -To utilize Vault as a secrets backend with Consul it is necessary to add several configuration fields to the Vault installation -which bootstrap Vault Auth roles and Policies for Consul to use. For the supported Vault secrets please see the individual secret -guides and ensure to, when combining the secrets, append the Vault Policies to your Vault Kube Auth Roles via a comma separated value (i.e. `policies=gossip-policy,consul-ca,consul-server,custom-policy`). -Ex: -```shell-session -$ vault write auth/kubernetes/role/consul-server \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=gossip-policy,consul-ca,consul-server \ - ttl=1h -``` +The Vault integration with Consul on Kubernetes has two aspects or phases: +- [Systems Integration](/docs/k8s/installation/vault/systems-integration) - Configure Vault and Consul on Kubernetes systems to leverage Vault as the secrets store. +- [Data Integration](/docs/k8s/installation/vault/data-integration) - Configure specific secrets to be stored and +retrieved from Vault for use with Consul on Kubernetes. +As a next step, please proceed to [Systems Integration](/docs/k8s/installation/vault/systems-integration) overview to understand how to first setup Vault and Consul on Kubernetes to leverage Vault as a secrets backend. -## Troubleshooting - -The Vault integration with Consul on Kubernetes makes use of the Vault Agent Injectors. Kubernetes annotations are added to the -deployments of the Consul components which cause the Vault Agent Injector to be added as an init-container that will then attach -Vault secrets to Consul's pods at startup. Additionally the Vault Agent sidecar is added to the Consul component pods which -is responsible for synchronizing and reissuing secrets at runtime. -As a result of these additional sidecar containers the typical location for logging is expanded in the Consul components. - -As a general rule the best way to troubleshoot startup issues for your Consul installation when using the Vault integration -is to establish if the `vault-agent-init` container has completed or not via `kubectl logs -f -c vault-agent-int` -and checking to see if the secrets have completed rendering. -* If the secrets are not properly rendered the underlying problem will be logged in `vault-agent-init` init-container - and generally is related to the Vault Kube Auth Role not having the correct policies for the specific secret - e.g. `global.secretsBackend.vault.consulServerRole` not having the correct policies for TLS. -* If the secrets are rendered and the `vault-agent-init` container has completed AND the Consul component has not become `Ready`, - this generally points to an issue with Consul being unable to utilize the Vault secret. This can occur if, for example, the Vault Role - created for the PKI engine does not have the correct `alt_names` or otherwise is not properly configured. The best logs for this - circumstance are the Consul container logs: `kubectl logs -f -c consul`. diff --git a/website/content/docs/k8s/installation/vault/systems-integration.mdx b/website/content/docs/k8s/installation/vault/systems-integration.mdx new file mode 100644 index 000000000..a34b3f0f2 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/systems-integration.mdx @@ -0,0 +1,202 @@ +--- +layout: docs +page_title: Vault as the Secrets Backend Systems Integration Overview +description: >- + Overview of the systems integration aspects to using Vault as the secrets backend for Consul on Kubernetes. +--- + +# Vault as the Secrets Backend - Systems Integration + +## Overview +At a high level, configuring a systems integration of Vault with Consul on Kubernetes consists of 1) a one time setup on Vault and 2) a setup of the secrets backend per Consul datacenter via Helm. + +### One time setup on Vault + - Enabling Vault KV Secrets Engine - Version 2 to store arbitrary secrets + - Enabling Vault PKI Engine if you are choosing to store and manage either [Consul Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls) or [Service Mesh and Consul client TLS credentials](/docs/k8s/installation/vault/data-integration/connect-ca) + +### Setup per Consul datacenter + - Installing the Vault Injector within the Consul datacenter installation + - Configuring a Kubernetes Auth Method in Vault to authenticate and authorize operations from the Consul datacenter + - Enable Vault as the Secrets Backend in the Consul datacenter + +## One time setup on Vault + +A one time setup on a Vault deployment is necessary to enable both the Vault KV Secrets Engine and the Vault PKI Engine. These docs assume that you have already setup a Vault cluster for use with Consul on Kubernetes. + +Please read [Run Vault on Kubernetes](https://www.vaultproject.io/docs/platform/k8s/helm/run) if instructions on setting up a Vault cluster are needed. + +### Vault KV Secrets Engine - Version 2 + +The following secrets can be stored in Vault KV secrets engine, which is meant to handle arbitrary secrets: +- ACL Bootstrap token ([`global.acls.bootstrapToken`](/docs/k8s/helm#v-global-acls-bootstraptoken)) +- ACL Partition token ([`global.acls.partitionToken`](/docs/k8s/helm#v-global-acls-partitiontoken)) +- ACL Replication token ([`global.acls.replicationToken`](/docs/k8s/helm#v-global-acls-replicationtoken)) +- Gossip encryption key ([`global.gossipEncryption`](/docs/k8s/helm#v-global-gossipencryption)) +- Enterprise license ([`global.enterpriseLicense`](/docs/k8s/helm#v-global-enterpriselicense)) +- Snapshot Agent config ([`client.snapshotAgent.configSecret`](/docs/k8s/helm#v-client-snapshotagent-configsecret)) + +In order to store any of these secrets, we must enable the [Vault KV secrets engine - Version 2](https://www.vaultproject.io/docs/secrets/kv/kv-v2). + +```shell-session +$ vault secrets enable -path=consul kv-v2 +``` + +### Vault PKI Engine + +The Vault PKI Engine must be enabled in order to leverage Vault for issuing Consul Server TLS certificates. More details for configuring the PKI Engine is found in [Bootstrapping the PKI Engine](/docs/k8s/installation/vault/data-integration/server-tls#bootstrapping-the-pki-engine) under the Server TLS section. + +```shell-session +$ vault secrets enable pki +``` + +## Setup per Consul datacenter + +After configuring Vault, Consul datacenters on Kubernetes must be deployed with the Vault Agent injector and configured to leverage the Vault Kubernetes Auth Method to read secrets from a Vault cluster. + +### Set Environment Variables to ensure integration consistency + +Before installing the Vault Injector and configuring the Vault Kubernetes Auth Method, some environment variables need to be set to better ensure consistent mapping between Vault and Consul on Kubernetes. + +#### DATACENTER + + - **Recommended value:** value of `global.datacenter` in your Consul Helm values file. + ```shell-session + $ export DATACENTER=dc1 + ``` +#### VAULT_AUTH_METHOD_NAME + + - **Recommended value:** a concatentation of a `kubernetes-` prefix (to denote the auth method type) with `DATACENTER` environment variable. + ```shell-session + $ export VAULT_AUTH_METHOD_NAME=kubernetes-${DATACENTER} + ``` + +#### VAULT_SERVER_HOST + + - **Recommended value:** find the external IP address of your Vault cluster. + - If Vault is installed in a Kubernetes cluster, get the external IP or DNS name of the Vault server load balancer. + - On GKE or AKS, it'll be an IP: + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + - On EKS, it'll be a hostname: + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + ``` + - If Vault is not running on Kubernetes, utilize the `api_addr` as defined in the Vault [High Availability Paremeters](https://www.vaultproject.io/docs/configuration#high-availability-parameters) configuration: + ```shell-session + $ export VAULT_SERVER_HOST= + ``` + +#### VAULT_ADDR + + - **Recommended value:** Connecting to port 8200 of the Vault server + ```shell-session + $ export VAULT_ADDR=http://${VAULT_SERVER_HOST}:8200 + ``` +-> **Note:** If your vault installation is current exposed using SSL, this address will need to use `https` instead of `http`. You will also need to setup the [`VAULT_CACERT`](https://www.vaultproject.io/docs/commands#vault_cacert) environment variable. + +#### VAULT_TOKEN + + - **Recommended value:** Your allocated Vault token. If running Vault in dev mode, this can be set to to `root`. + ```shell-session + $ export VAULT_ADDR= + ``` + +### Install Vault Injector in your Consul k8s cluster + +A minimal valid installation of Vault Kubernetes must include the Agent Injector which is utilized for accessing secrets from Vault. Vault servers could be deployed external to Vault on Kubernetes with the [`injector.externalVaultAddr`](https://www.vaultproject.io/docs/platform/k8s/helm/configuration#externalvaultaddr) value in the Vault Helm Configuration. + +```shell-session +$ cat <> vault-injector.yaml +# vault-injector.yaml +server: + enabled: false +injector: + enabled: true + externalVaultAddr: ${VAULT_ADDR} + authPath: auth/${VAULT_AUTH_METHOD_NAME} +EOF +``` + +Issue the Helm `install` command to install the Vault agent injector using the HashiCorp Vault Helm chart. + +```shell-session +$ helm install vault-${DATACENTER} -f vault-injector.yaml hashicorp/vault --wait +``` + +### Configure the Kubernetes Auth Method in Vault for the datacenter + +#### Enable the Auth Method + +Ensure that the Vault Kubernetes Auth method is enabled. + +```shell-session +$ vault auth enable -path=kubernetes-${DATACENTER} kubernetes +``` + +#### Configure Auth Method with JWT token of service account + +After enabling the Kubernetes auth method, in Vault, ensure that you have configured the Kubernetes Auth method properly as described in [Kubernetes Auth Method Configuration](https://www.vaultproject.io/docs/auth/kubernetes#configuration). + +First, while targeting your Consul cluster, get the externally reachable address of the Consul Kubernetes cluster. + +```shell-session +$ export KUBE_API_URL=$(kubectl config view -o jsonpath="{.clusters[?(@.name == \"$(kubectl config current-context)\")].cluster.server}") +``` + +Next, you will configure the Vault Kubernetes Auth Method for the datacenter. You will need to provide it with: +- `token_reviewer_jwt` - this a JWT token from the Consul datacenter cluster that the Vault Kubernetes Auth Method will use to query the Consul datacenter Kubernetes API when services in the Consul datacenter request data from Vault. +- `kubernetes_host` - this is the URL of the Consul datacenter's Kubernetes API that Vault will query to authenticate the service account of an incoming request from a Consul data ceneter kubernetes service. +- `kubernetes_ca_cert` - this is the CA certifcation that is currently being used by the Consul datacenter Kubernetes cluster. + +```shell-session +$ vault write auth/kubernetes/config \ + token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + kubernetes_host="https://${KUBE_API_URL}:443" \ + kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt +``` + +#### Enable Vault as the Secrets Backend in the Consul datacenter +Finally, you will configure the Consul on Kubernetes helm chart for the datacenter to expect to receive the following values (if you have configured them) to be retreived from Vault: +- ACL Bootstrap token ([`global.acls.bootstrapToken`](/docs/k8s/helm#v-global-acls-bootstraptoken)) +- ACL Partition token ([`global.acls.partitionToken`](/docs/k8s/helm#v-global-acls-partitiontoken)) +- ACL Replication token ([`global.acls.replicationToken`](/docs/k8s/helm#v-global-acls-replicationtoken)) +- Enterprise license ([`global.enterpriseLicense`](/docs/k8s/helm#v-global-enterpriselicense)) +- Gossip encryption key ([`global.gossipEncryption`](/docs/k8s/helm#v-global-gossipencryption)) +- Snapshot Agent config ([`client.snapshotAgent.configSecret`](/docs/k8s/helm#v-client-snapshotagent-configsecret)) +- TLS CA certificates ([`global.tls.caCert`](/docs/k8s/helm#v-global-tls-cacert)) +- Server TLS certificates ([`server.serverCert`](/docs/k8s/helm#v-server-servercert)) + + + +```yaml +global: + secretsBackend: + vault: + enabled: true +``` + + + +## Next Steps + +As a next step, please proceed to Vault integration with Consul on Kubernetes' [Data Integration](/docs/k8s/installation/vault/data-integration). + +## Troubleshooting + +The Vault integration with Consul on Kubernetes makes use of the Vault Agent Injectors. Kubernetes annotations are added to the +deployments of the Consul components which cause the Vault Agent Injector to be added as an init-container that will then attach +Vault secrets to Consul's pods at startup. Additionally the Vault Agent sidecar is added to the Consul component pods which +is responsible for synchronizing and reissuing secrets at runtime. +As a result of these additional sidecar containers the typical location for logging is expanded in the Consul components. + +As a general rule the best way to troubleshoot startup issues for your Consul installation when using the Vault integration +is to establish if the `vault-agent-init` container has completed or not via `kubectl logs -f -c vault-agent-int` +and checking to see if the secrets have completed rendering. +* If the secrets are not properly rendered the underlying problem will be logged in `vault-agent-init` init-container + and generally is related to the Vault Kube Auth Role not having the correct policies for the specific secret + e.g. `global.secretsBackend.vault.consulServerRole` not having the correct policies for TLS. +* If the secrets are rendered and the `vault-agent-init` container has completed AND the Consul component has not become `Ready`, + this generally points to an issue with Consul being unable to utilize the Vault secret. This can occur if, for example, the Vault Role + created for the PKI engine does not have the correct `alt_names` or otherwise is not properly configured. The best logs for this + circumstance are the Consul container logs: `kubectl logs -f -c consul`. diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index f6fc6bbd1..d8cf955f1 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -496,20 +496,49 @@ "path": "k8s/installation/vault" }, { - "title": "Gossip Encryption Key", - "path": "k8s/installation/vault/gossip" + "title": "Systems Integration", + "path": "k8s/installation/vault/systems-integration" }, { - "title": "Enterprise License", - "path": "k8s/installation/vault/enterprise-license" - }, - { - "title": "Server TLS", - "path": "k8s/installation/vault/server-tls" - }, - { - "title": "Service Mesh Certificates", - "path": "k8s/installation/vault/connect-ca" + "title": "Data Integration", + "routes": [ + { + "title": "Overview", + "path": "k8s/installation/vault/data-integration" + }, + { + "title": "Bootstrap Token", + "path": "k8s/installation/vault/data-integration/bootstrap-token" + }, + { + "title": "Enterprise License", + "path": "k8s/installation/vault/data-integration/enterprise-license" + }, + { + "title": "Gossip Encryption Key", + "path": "k8s/installation/vault/data-integration/gossip" + }, + { + "title": "Partition Token", + "path": "k8s/installation/vault/data-integration/partition-token" + }, + { + "title": "Replication Token", + "path": "k8s/installation/vault/data-integration/replication-token" + }, + { + "title": "Server TLS", + "path": "k8s/installation/vault/data-integration/server-tls" + }, + { + "title": "Service Mesh Certificates", + "path": "k8s/installation/vault/data-integration/connect-ca" + }, + { + "title": "Snapshot Agent Config", + "path": "k8s/installation/vault/data-integration/snapshot-agent-config" + } + ] } ] }, @@ -787,7 +816,6 @@ "title": "Overview", "path": "nia/enterprise" }, - { "title": "License", "path": "nia/enterprise/license" diff --git a/website/package-lock.json b/website/package-lock.json index ec5069512..9be17cbe7 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -2225,9 +2225,13 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-11.1.2.tgz", "integrity": "sha512-hZuwOlGOwBZADA8EyDYyjx3+4JGIGjSHDHWrmpI7g5rFmQNltjlbaefAbiU5Kk7j3BUSDwt30quJRFv3nyJQ0w==", - "cpu": ["arm64"], + "cpu": [ + "arm64" + ], "optional": true, - "os": ["darwin"], + "os": [ + "darwin" + ], "engines": { "node": ">= 10" } @@ -2236,9 +2240,13 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-11.1.2.tgz", "integrity": "sha512-PGOp0E1GisU+EJJlsmJVGE+aPYD0Uh7zqgsrpD3F/Y3766Ptfbe1lEPPWnRDl+OzSSrSrX1lkyM/Jlmh5OwNvA==", - "cpu": ["x64"], + "cpu": [ + "x64" + ], "optional": true, - "os": ["darwin"], + "os": [ + "darwin" + ], "engines": { "node": ">= 10" } @@ -2247,9 +2255,13 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-11.1.2.tgz", "integrity": "sha512-YcDHTJjn/8RqvyJVB6pvEKXihDcdrOwga3GfMv/QtVeLphTouY4BIcEUfrG5+26Nf37MP1ywN3RRl1TxpurAsQ==", - "cpu": ["x64"], + "cpu": [ + "x64" + ], "optional": true, - "os": ["linux"], + "os": [ + "linux" + ], "engines": { "node": ">= 10" } @@ -2258,9 +2270,13 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-11.1.2.tgz", "integrity": "sha512-e/pIKVdB+tGQYa1cW3sAeHm8gzEri/HYLZHT4WZojrUxgWXqx8pk7S7Xs47uBcFTqBDRvK3EcQpPLf3XdVsDdg==", - "cpu": ["x64"], + "cpu": [ + "x64" + ], "optional": true, - "os": ["win32"], + "os": [ + "win32" + ], "engines": { "node": ">= 10" } @@ -8415,7 +8431,9 @@ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "hasInstallScript": true, "optional": true, - "os": ["darwin"], + "os": [ + "darwin" + ], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } From 6feb7abe572aec9e7ae2eb930240faeb47ade2a4 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Thu, 7 Apr 2022 11:35:06 -0700 Subject: [PATCH 102/785] add changelog for rpc metric improv project (#12709) Co-authored-by: Dhia Ayachi --- .changelog/12311.txt | 3 +++ .changelog/12573.txt | 3 +++ 2 files changed, 6 insertions(+) create mode 100644 .changelog/12311.txt create mode 100644 .changelog/12573.txt diff --git a/.changelog/12311.txt b/.changelog/12311.txt new file mode 100644 index 000000000..42bcc8f0f --- /dev/null +++ b/.changelog/12311.txt @@ -0,0 +1,3 @@ +```release-note:note +Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . +``` \ No newline at end of file diff --git a/.changelog/12573.txt b/.changelog/12573.txt new file mode 100644 index 000000000..28c9825aa --- /dev/null +++ b/.changelog/12573.txt @@ -0,0 +1,3 @@ +```release-note:feature +rpc: (beta): add a new metric `consul.rpc.server.call` with labels +for `method`, `errored`, `rpc_type`, `request_type`. \ No newline at end of file From a125e12c785403cc227b2e7222d49ac1435faebd Mon Sep 17 00:00:00 2001 From: Chris Thain <32781396+cthain@users.noreply.github.com> Date: Thu, 7 Apr 2022 11:43:12 -0700 Subject: [PATCH 103/785] Consul on ECS 0.4.0 (#12694) Update website docs for Consul on ECS 0.4.0 --- website/content/docs/ecs/architecture.mdx | 23 ++++- website/content/docs/ecs/enterprise.mdx | 98 +++++++++++++++---- website/content/docs/ecs/index.mdx | 2 +- website/content/docs/ecs/manual/install.mdx | 10 +- website/content/docs/ecs/requirements.mdx | 19 ++-- .../ecs/terraform/secure-configuration.mdx | 2 +- 6 files changed, 120 insertions(+), 34 deletions(-) diff --git a/website/content/docs/ecs/architecture.mdx b/website/content/docs/ecs/architecture.mdx index 03d8bf7bc..83e46783d 100644 --- a/website/content/docs/ecs/architecture.mdx +++ b/website/content/docs/ecs/architecture.mdx @@ -73,7 +73,13 @@ This diagram shows an example timeline of a task shutting down: - Updates about this task have reached the rest of the Consul cluster, so downstream proxies have been updated to stopped sending traffic to this task. - **T4**: At this point task shutdown should be complete. Otherwise, ECS will send a KILL signal to any containers still running. The KILL signal cannot be ignored and will forcefully stop containers. This will interrupt in-progress operations and possibly cause errors. -## Automatic ACL Token Provisioning +## ACL Controller + +The ACL controller performs the following operations: +* Provisions Consul ACL tokens for Consul clients and service mesh services. +* Manages Consul admin partitions and namespaces. + +### Automatic ACL Token Provisioning Consul ACL tokens secure communication between agents and services. The following containers in a task require an ACL token: @@ -92,6 +98,21 @@ token does not yet exist. The ACL controller stores all ACL tokens in AWS Secrets Manager, and tasks are configured to pull these tokens from AWS Secrets Manager when they start. +### Admin Partitions and Namespaces + +When [admin partitions and namespaces](/docs/ecs/enterprise#admin-partitions-and-namespaces) are enabled, +the ACL controller is assigned to its configured admin partition. The ACL controller provisions ACL +tokens for tasks in a single admin partition and supports one ACL controller instance per ECS +cluster. This results in an architecture with one admin partition per ECS cluster. + +The ACL controller automatically performs the following actions: +* Creates its admin partition at startup if it does not exist. +* Inspects ECS task tags for the task's intended partition and namespace. + The ACL controller ignores tasks that do not match the `partition` tag. +* Creates namespaces when tasks start up. Namespaces are only created if they do not exist. +* Provisions ACL tokens and ACL policies that are scoped to the applicable admin partition and namespace. +* Provision ACL tokens that allow services to communicate with upstreams across admin partitions and namespaces. + ## ECS Health Check Syncing If the following conditions apply, ECS health checks automatically sync with Consul health checks for all application containers: diff --git a/website/content/docs/ecs/enterprise.mdx b/website/content/docs/ecs/enterprise.mdx index b181b5e13..6ee42d5be 100644 --- a/website/content/docs/ecs/enterprise.mdx +++ b/website/content/docs/ecs/enterprise.mdx @@ -38,28 +38,86 @@ run Consul Enterprise clients then you must enable ACLs. ## Running Open Source Consul Clients -Consul supports running Consul Enterprise servers with Consul OSS (Open Source) clients. Since -currently no Consul Enterprise features are supported that require Consul client support, -you can run Consul OSS clients with Consul Enterprise servers without issue. +You can operate Consul Enterprise servers with Consul OSS (open source) clients as long as the features you are using do not require Consul Enterprise client support. Admin partitions and namespaces, for example, require Consul Enterprise clients and are not supported with Consul OSS. ## Feature Support -Consul on ECS does not currently support any Consul Enterprise features that require -support from Consul clients. That being said, there are many enterprise features that -are activated only on Consul servers and so Consul on ECS will run fine with those -features. +Consul on ECS supports the following Consul Enterprise features. +If you are only using features that run on Consul servers, then you can use an OSS client in your service mesh tasks on ECS. +If client support is required for any of the features, then you must use a Consul Enterprise client in your `mesh-tasks`. -| Feature | Supported | Description | -|-----------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Automated Backups/Snapshot Agent | Yes* | Running the snapshot agent on ECS is not currently supported but you are able to run the snapshot agent alongside your Consul servers on VMs. | -| Automated Upgrades | Yes (servers) | This feature runs on Consul servers. | -| Enhanced Read Scalability | Yes (servers) | This feature runs on Consul servers. | -| Single Sign-On/OIDC | Yes (servers) | This feature runs on Consul servers. | -| Redundancy Zones | Yes (servers) | This feature runs on Consul servers. | -| Advanced Federation/Network Areas | Yes (servers) | This feature runs on Consul servers. | -| Sentinel | Yes (servers) | This feature runs on Consul servers. | -| Network Segments | No | Currently there is no capability to configure the network segment Consul clients on ECS run in. | -| Namespaces | No | Currently there is no capability to configure the Consul namespace for a service on ECS. | -| Admin Partitions | No* | Supported if Consul ECS clients run in the default partition. Otherwise there is currently no capability to configure the admin partition Consul clients in ECS run in. | -| Audit Logging | No* | Audit logging can be enabled on Consul servers that run outside of ECS but is not currently supported on the Consul clients that run inside ECS. | +| Feature | Supported | Description | +|-----------------------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Automated Backups/Snapshot Agent | Yes* | Running the snapshot agent on ECS is not currently supported but you are able to run the snapshot agent alongside your Consul servers on VMs. | +| Automated Upgrades | Yes (servers) | This feature runs on Consul servers. | +| Enhanced Read Scalability | Yes (servers) | This feature runs on Consul servers. | +| Single Sign-On/OIDC | Yes (servers) | This feature runs on Consul servers. | +| Redundancy Zones | Yes (servers) | This feature runs on Consul servers. | +| Advanced Federation/Network Areas | Yes (servers) | This feature runs on Consul servers. | +| Sentinel | Yes (servers) | This feature runs on Consul servers. | +| Network Segments | No | Currently there is no capability to configure the network segment Consul clients on ECS run in. | +| Namespaces | Yes | This feature requires Consul Enterprise servers. OSS clients can register into the `default` namespace. Registration into a non-default namespace requires a Consul Enterprise client. | +| Admin Partitions | Yes | This feature requires Consul Enterprise servers. OSS clients can register into the `default` admin partition. Registration into a non-default partition requires a Consul Enterprise client. | +| Audit Logging | No* | Audit logging can be enabled on Consul servers that run outside of ECS but is not currently supported on the Consul clients that run inside ECS. | +### Admin Partitions and Namespaces + +Consul on ECS supports [admin partitions](/docs/enterprise/admin-partitions) and [namespaces](/docs/enterprise/namespaces) when Consul Enterprise servers and clients are used. +These features have the following requirements: +* ACLs must be enabled. +* ACL controller must run in the ECS cluster. +* `mesh-tasks` must use a Consul Enterprise client image. + +The ACL controller automatically manages ACL policies and token provisioning for clients and services on the service mesh. +It also creates admin partitions and namespaces if they do not already exist. + +-> **NOTE:** The ACL controller does not delete admin partitions or namespaces once they are created. + +Each ACL controller manages a single admin partition. Consul on ECS supports one ACL controller per ECS cluster; +therefore, the administrative boundary for admin partitions is one admin partition per ECS cluster. + +The following example demonstrates how to configure the ACL controller to enable admin partitions +and manage an admin partition named `my-partition`. The `consul_partition` field is optional and if it +is not provided when `consul_partitions_enabled = true`, will default to the `default` admin partition. + + + +```hcl +module "acl_controller" { + source = "hashicorp/consul/aws-ecs//modules/acl-controller" + + ... + + consul_partitions_enabled = true + consul_partition = "my-partition" +} +``` + + + +Services are assigned to admin partitions and namespaces through the use of [task tags](/docs/ecs/manual/install#task-tags). +The `mesh-task` module automatically adds the necessary tags to the task definition. +If the ACL controller is configured for admin partitions, services on the mesh will +always be assigned to an admin partition and namespace. If the `mesh-task` does not define +the partition it will default to the `default` admin partition. Similarly, if a `mesh-task` does +not define the namespace it will default to the `default` namespace. + +The following example demonstrates how to create a `mesh-task` assigned to the admin partition named +`my-partition`, in the `my-namespace` namespace. + + + +```hcl +module "my_task" { + source = "hashicorp/consul/aws-ecs//modules/mesh-task" + family = "my_task" + + ... + + consul_image = "hashicorp/consul-enterprise:-ent" + consul_partition = "my-partition" + consul_namespace = "my-namespace" +} +``` + + diff --git a/website/content/docs/ecs/index.mdx b/website/content/docs/ecs/index.mdx index b62f2175d..e5c1f2e42 100644 --- a/website/content/docs/ecs/index.mdx +++ b/website/content/docs/ecs/index.mdx @@ -24,7 +24,7 @@ traffic policy, and more. Consul on ECS follows an [architecture](/docs/internals/architecture) similar to other platforms, but each ECS task is a Consul node. An ECS task runs the user application container(s), as well as a Consul client container for control plane -communication and an [Envoy](https://envoyproxy.io/) sidecar proxy container to faciliate data plane communication for +communication and an [Envoy](https://envoyproxy.io/) sidecar proxy container to facilitate data plane communication for [Consul Connect](/docs/connect). For a detailed architecture overview, see the [Architecture](/docs/ecs/architecture) page. diff --git a/website/content/docs/ecs/manual/install.mdx b/website/content/docs/ecs/manual/install.mdx index 1f6f5239f..6b01e08e1 100644 --- a/website/content/docs/ecs/manual/install.mdx +++ b/website/content/docs/ecs/manual/install.mdx @@ -72,10 +72,12 @@ during task startup. The `tags` list must include the following if you are using the ACL controller in a [secure configuration](/docs/manual/secure-configuration). Without these tags, the ACL controller will be unable to provision a service token for the task. -| Tag Key | Tag Value | Description | -| ----------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `consul.hashicorp.com/mesh` | `true` (string) | The ACL controller ignores tasks without this tag set to `true`. | -| `consul.hashicorp.com/service-name` | Consul service name | Specifies the Consul service associated with this task. Required if the service name is different than the task `family`. | +| Tag Key | Tag Value | Description | +| ----------------------------------- | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `consul.hashicorp.com/mesh` | `true` (string) | The ACL controller ignores tasks without this tag set to `true`. | +| `consul.hashicorp.com/service-name` | Consul service name | Specifies the Consul service associated with this task. Required if the service name is different than the task `family`. | +| `consul.hashicorp.com/partition` | Consul admin partition | Specifies the Consul admin partition associated with this task. Defaults to the `default` admin partition if omitted. | +| `consul.hashicorp.com/namespace` | Consul namespace | Specifies the Consul namespace associated with this task. Defaults to the `default` namespace if omitted. | ## Application container diff --git a/website/content/docs/ecs/requirements.mdx b/website/content/docs/ecs/requirements.mdx index 972b545a5..e7c0a8ea8 100644 --- a/website/content/docs/ecs/requirements.mdx +++ b/website/content/docs/ecs/requirements.mdx @@ -9,18 +9,23 @@ description: >- The following requirements must be met in order to install Consul on ECS: -1. **Launch Type:** Fargate and EC2 launch types are supported. -1. **Subnets:** ECS Tasks can run in private or public subnets. Tasks must have [network access](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/) to Amazon ECR or other public container registries to pull images. -1. **Consul Servers:** You can use your own Consul servers running on virtual machines or use [HashiCorp Cloud Platform Consul](https://www.hashicorp.com/cloud-platform) to host the servers for you. For development purposes or testing, you may use the `dev-server` [Terraform module](https://github.com/hashicorp/terraform-aws-consul-ecs/tree/main) that runs the Consul server as an ECS task. The `dev-server` does not support persistent storage. -1. **ACL Controller:** If you are running a secure Consul installation with ACLs enabled, configure the ACL controller. -1. **Sidecar containers:** Consul on ECS requires two sidecar containers to run in each ECS task: a +* **Launch Type:** Fargate and EC2 launch types are supported. +* **Subnets:** ECS Tasks can run in private or public subnets. Tasks must have [network access](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/) to Amazon ECR or other public container registries to pull images. +* **Consul Servers:** You can use your own Consul servers running on virtual machines or use [HashiCorp Cloud Platform Consul](https://www.hashicorp.com/cloud-platform) to host the servers for you. For development purposes or testing, you may use the `dev-server` [Terraform module](https://github.com/hashicorp/terraform-aws-consul-ecs/tree/main) that runs the Consul server as an ECS task. The `dev-server` does not support persistent storage. +* **ACL Controller:** If you are running a secure Consul installation with ACLs enabled, configure the ACL controller. + * **Admin Partitions:** Consul on ECS supports [admin partitions](/docs/enterprise/admin-partitions) when ACLs are enabled and the ACL controller is configured. + The ACL controller manages one admin partition and each ECS cluster requires an ACL controller. + Each `mesh-task` must also be configured to use a Consul Enterprise client. + * **Namespaces:** [Namespaces](/docs/enterprise/namespaces) are supported when ACLs are enabled and the ACL controller is configured. + Each `mesh-task` must also be configured to use a Consul Enterprise client. +* **Sidecar containers:** Consul on ECS requires two sidecar containers to run in each ECS task: a Consul agent container and a sidecar proxy container. These additional sidecar containers must be included in the ECS task definition. The [Consul ECS Terraform module](/docs/ecs/terraform/install) will include these sidecar containers for you. If you do not use Terraform, you can construct the task definition yourself by following [our documentation](/docs/ecs/manual/install). -1. **Routing:** With your application running in tasks as part of the mesh, you must specify the +* **Routing:** With your application running in tasks as part of the mesh, you must specify the upstream services that your application calls. You will also need to change the URLs your application uses to ensure the application is making requests through the service mesh. -1. **Bind Address:** Once all communication is flowing through the service mesh, you should change +* **Bind Address:** Once all communication is flowing through the service mesh, you should change the address your application is listening on to `127.0.0.1` so that it only receives requests through the sidecar proxy. diff --git a/website/content/docs/ecs/terraform/secure-configuration.mdx b/website/content/docs/ecs/terraform/secure-configuration.mdx index 0d6e9ef48..6a932960e 100644 --- a/website/content/docs/ecs/terraform/secure-configuration.mdx +++ b/website/content/docs/ecs/terraform/secure-configuration.mdx @@ -21,7 +21,7 @@ A secure Consul cluster should include the following: Before deploying your service, you will need to deploy the [ACL controller](https://registry.terraform.io/modules/hashicorp/consul-ecs/aws/latest/submodules/acl-controller) so that it can provision the necessary tokens for tasks on the service mesh. To learn more about the ACL Controller, please see [Automatic ACL Token Provisioning](/docs/ecs/architecture#automatic-acl-token-provisioning). -To deploy the controller, you will first need store an ACL token with `acl:write` privileges +To deploy the controller, you will first need to store an ACL token with `acl:write` and `operator:write` privileges, and a CA certificate for the Consul server in AWS Secrets Manager. ```hcl From 9e06543a4f1ef88e9185a1749f8b819808fbda34 Mon Sep 17 00:00:00 2001 From: John Murret Date: Thu, 7 Apr 2022 13:41:42 -0600 Subject: [PATCH 104/785] docs: Updating Gossip EncryptionKey Rotation page with Vault use case (#12720) * docs: Updating Gossip EncryptionKey Rotation page with Vault use case * Adding a note to the vault instructions linking to the gossip key encryption using Vault page. * Correcting Vault guide for storing the rotated gossip key. * adding $ to shell sessions where it is missing on the gossip rotation page * adding $ to more shell sessions where it is missing on the gossip rotation page --- .../gossip-encryption-key-rotation.mdx | 52 +++++++++++++++---- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/website/content/docs/k8s/operations/gossip-encryption-key-rotation.mdx b/website/content/docs/k8s/operations/gossip-encryption-key-rotation.mdx index 0161ec323..6270f926f 100644 --- a/website/content/docs/k8s/operations/gossip-encryption-key-rotation.mdx +++ b/website/content/docs/k8s/operations/gossip-encryption-key-rotation.mdx @@ -15,7 +15,7 @@ The following steps need only be performed once in any single datacenter if your 1. (Optional) If Consul is installed in a dedicated namespace, set the kubeConfig context to the consul namespace. Otherwise, subsequent commands will need to include -n consul. ```shell-session - kubectl config set-context --current --namespace=consul + $ kubectl config set-context --current --namespace=consul ``` 1. Generate a new key and store in safe place for retrieval in the future ([Vault KV Secrets Engine](https://www.vaultproject.io/docs/secrets/kv/kv-v2#usage) is a recommended option). @@ -31,7 +31,7 @@ The following steps need only be performed once in any single datacenter if your 1. `kubectl exec` into a Consul Agent pod (Server or Client) and add the new key to the Consul Keyring. This can be performed by running the following command: ```shell-session - kubectl exec -it consul-server-0 -- /bin/sh + $ kubectl exec -it consul-server-0 -- /bin/sh ``` 1. **Note:** If ACLs are enabled, export the bootstrap token as the CONSUL_HTTP_TOKEN to perform all `consul keyring` operations. The bootstrap token can be found in the Kubernetes secret `consul-bootstrap-acl-token` of the primary datacenter. @@ -43,7 +43,7 @@ The following steps need only be performed once in any single datacenter if your 1. Install the new Gossip encryption key with the `consul keyring` command: ```shell-session - consul keyring -install="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" + $ consul keyring -install="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" ==> Installing new gossip encryption key... ``` Consul automatically propagates this encryption key across all clients and servers across the cluster and the federation if Consul federation is enabled. @@ -51,7 +51,7 @@ The following steps need only be performed once in any single datacenter if your 1. List the keys in the keyring to verify the new key has been installed successfully. ```shell-session - consul keyring -list + $ consul keyring -list ==> Gathering installed encryption keys... WAN: @@ -72,14 +72,14 @@ The following steps need only be performed once in any single datacenter if your 1. After the new key has been added to the keychain, you can install it as the new gossip encryption key. Run the following command in the Consul Agent pod using `kubectl exec`: ```shell-session - consul keyring -use="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" + $ consul keyring -use="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" ==> Changing primary gossip encryption key... ``` 1. You can ensure that the key has been propagated to all agents by verifying the number of agents that recognize the key over the number of total agents in the datacenter. Listing them provides that information. ```shell-session - consul keyring -list + $ consul keyring -list ==> Gathering installed encryption keys... WAN: @@ -95,7 +95,9 @@ The following steps need only be performed once in any single datacenter if your Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w= [4/4] ``` -1. Update the Kubernetes secrets with the latest gossip encryption key. +1. Update the Kubernetes or Vault secrets with the latest gossip encryption key. + + Update the gossip encryption Kubernetes Secret with the value of the new gossip encryption key to ensure that subsequent `helm upgrades` commands execute successfully. The name of the secret that stores the value of the gossip encryption key can be found in the Helm values file: @@ -122,6 +124,38 @@ The following steps need only be performed once in any single datacenter if your ```shell-session $ kubectl patch secret consul-federation --patch='{"stringData":{"gossipEncryptionKey": "Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w="}}' ``` + + + + -> **Note:** These Vault instructions assume that you have integrated your [Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) using [Vault as a Secrets Backend](/docs/k8s/installation/vault). + + Update the gossip encryption Vault Secret with the value of the new gossip encryption key to ensure that subsequent `helm upgrades` commands execute successfully. + The name of the secret that stores the value of the gossip encryption key can be found in the Helm values file: + ```yaml + global: + gossipEncryption: + secretName: secret/data/consul/gossip-encryption + secretKey: key + ``` + + ```shell-session + $ vault kv put secret/consul/gossip-encryption key="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" + ``` + + **Note:** In the case of federated Consul clusters, update the federation-secret value for the gossip encryption key. The name of the secret and key can be found in the values file of the secondary datacenter. + + ```yaml + global: + gossipEncryption: + secretName: consul-federation + secretKey: gossip-key + ``` + + ```shell-session + $ vault kv put secret/consul/consul-federation gossip-key="Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=" + ``` + + 1. Remove the old key once the new one has been installed successfully. @@ -132,10 +166,10 @@ The following steps need only be performed once in any single datacenter if your 1. **Note:** If ACLs are enabled, export the bootstrap token as the CONSUL_HTTP_TOKEN to perform all `consul keyring` operations. ```shell-session - export CONSUL_HTTP_TOKEN= + $ export CONSUL_HTTP_TOKEN= ``` 1. Remove old Gossip encryption key with the `consul keyring` command: ```shell-session - consul keyring -remove="CL6M+jKj3630CZLXI0IRVeyci1jgIAveiZKvdtTybbA=" + $ consul keyring -remove="CL6M+jKj3630CZLXI0IRVeyci1jgIAveiZKvdtTybbA=" ==> Removing gossip encryption key... ``` From 1835e761fd9a8156d85b0df7346dd087fe569c1a Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Thu, 7 Apr 2022 13:10:20 -0700 Subject: [PATCH 105/785] improve error msg for deregister critical service If a service is automatically registered because it has a critical health check for longer than deregister_critical_service_after, the error message will now include: - mention of the deregister_critical_service_after option - the value of deregister_critical_service_after for that check --- .changelog/12725.txt | 3 +++ agent/agent.go | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 .changelog/12725.txt diff --git a/.changelog/12725.txt b/.changelog/12725.txt new file mode 100644 index 000000000..3cba2d250 --- /dev/null +++ b/.changelog/12725.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: improve log messages when a service with a critical health check is deregistered due to exceeding the deregister_critical_service_after timeout +``` \ No newline at end of file diff --git a/agent/agent.go b/agent/agent.go index c08316fd8..818587407 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1813,14 +1813,17 @@ func (a *Agent) reapServicesInternal() { if timeout > 0 && cs.CriticalFor() > timeout { reaped[serviceID] = true if err := a.RemoveService(serviceID); err != nil { - a.logger.Error("unable to deregister service after check has been critical for too long", + a.logger.Error("failed to deregister service with critical health that exceeded health check's 'deregister_critical_service_after' timeout", "service", serviceID.String(), "check", checkID.String(), - "error", err) + "timeout", timeout.String(), + "error", err, + ) } else { - a.logger.Info("Check for service has been critical for too long; deregistered service", + a.logger.Info("deregistered service with critical health due to exceeding health check's 'deregister_critical_service_after' timeout", "service", serviceID.String(), "check", checkID.String(), + "timeout", timeout.String(), ) } } From f4eac06b21b176bac168f7d75ee97761326da976 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Thu, 7 Apr 2022 16:58:21 -0500 Subject: [PATCH 106/785] xds: ensure that all connect timeout configs can apply equally to tproxy direct dial connections (#12711) Just like standard upstreams the order of applicability in descending precedence: 1. caller's `service-defaults` upstream override for destination 2. caller's `service-defaults` upstream defaults 3. destination's `service-resolver` ConnectTimeout 4. system default of 5s Co-authored-by: mrspanishviking --- .changelog/12711.txt | 3 ++ agent/consul/discovery_chain_endpoint_test.go | 11 ++++- agent/consul/discoverychain/compile.go | 3 ++ agent/consul/discoverychain/compile_test.go | 31 ++++++++++-- agent/discovery_chain_endpoint_test.go | 26 ++++++++-- agent/proxycfg/connect_proxy.go | 2 +- agent/proxycfg/testing_tproxy.go | 8 +++- agent/structs/discovery_chain.go | 38 +++++++++++++++ agent/xds/clusters.go | 17 +++++-- ...ial-instances-directly.envoy-1-20-x.golden | 4 +- api/discovery_chain.go | 47 +++++++++++++++++-- api/discovery_chain_test.go | 44 +++++++++-------- .../connect/l7-traffic/discovery-chain.mdx | 4 ++ .../docs/connect/transparent-proxy.mdx | 5 +- 14 files changed, 198 insertions(+), 45 deletions(-) create mode 100644 .changelog/12711.txt diff --git a/.changelog/12711.txt b/.changelog/12711.txt new file mode 100644 index 000000000..3d1400550 --- /dev/null +++ b/.changelog/12711.txt @@ -0,0 +1,3 @@ +```release-note:improvement +xds: ensure that all connect timeout configs can apply equally to tproxy direct dial connections +``` diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 1f9a82f14..97ae5b124 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -59,6 +59,12 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI + t.ConnectTimeout = 5 * time.Second // default + return t + } + + targetWithConnectTimeout := func(t *structs.DiscoveryTarget, connectTimeout time.Duration) *structs.DiscoveryTarget { + t.ConnectTimeout = connectTimeout return t } @@ -237,7 +243,10 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": targetWithConnectTimeout( + newTarget("web", "", "default", "default", "dc1"), + 33*time.Second, + ), }, }, } diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index 0567b8b90..ed664878b 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -928,6 +928,9 @@ RESOLVE_AGAIN: } } + // Expose a copy of this on the targets for ease of access. + target.ConnectTimeout = connectTimeout + // Build node. node := &structs.DiscoveryGraphNode{ Type: structs.DiscoveryGraphNodeTypeResolver, diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 9a3dde647..221ac757f 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -293,7 +293,10 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": targetWithConnectTimeout( + newTarget("main", "", "default", "default", "dc1", nil), + 33*time.Second, + ), }, } @@ -494,7 +497,10 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": targetWithConnectTimeout( + newTarget("main", "", "default", "default", "dc1", nil), + 33*time.Second, + ), }, } @@ -687,7 +693,10 @@ func testcase_NoopSplit_WithResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": targetWithConnectTimeout( + newTarget("main", "", "default", "default", "dc1", nil), + 33*time.Second, + ), }, } @@ -1847,8 +1856,14 @@ func testcase_MultiDatacenterCanary() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil), - "main.default.default.dc3": newTarget("main", "", "default", "default", "dc3", nil), + "main.default.default.dc2": targetWithConnectTimeout( + newTarget("main", "", "default", "default", "dc2", nil), + 33*time.Second, + ), + "main.default.default.dc3": targetWithConnectTimeout( + newTarget("main", "", "default", "default", "dc3", nil), + 33*time.Second, + ), }, } return compileTestCase{entries: entries, expect: expect} @@ -2780,8 +2795,14 @@ func newTarget(service, serviceSubset, namespace, partition, datacenter string, t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) t.SNI = connect.TargetSNI(t, "trustdomain.consul") t.Name = t.SNI + t.ConnectTimeout = 5 * time.Second // default if modFn != nil { modFn(t) } return t } + +func targetWithConnectTimeout(t *structs.DiscoveryTarget, connectTimeout time.Duration) *structs.DiscoveryTarget { + t.ConnectTimeout = connectTimeout + return t +} diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 3db87ba52..86ef96617 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -31,6 +31,12 @@ func TestDiscoveryChainRead(t *testing.T) { t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI + t.ConnectTimeout = 5 * time.Second // default + return t + } + + targetWithConnectTimeout := func(t *structs.DiscoveryTarget, connectTimeout time.Duration) *structs.DiscoveryTarget { + t.ConnectTimeout = connectTimeout return t } @@ -258,8 +264,14 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), - "web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"), + "web.default.default.dc1": targetWithConnectTimeout( + newTarget("web", "", "default", "default", "dc1"), + 33*time.Second, + ), + "web.default.default.dc2": targetWithConnectTimeout( + newTarget("web", "", "default", "default", "dc2"), + 33*time.Second, + ), }, } if !reflect.DeepEqual(expect, value.Chain) { @@ -268,12 +280,18 @@ func TestDiscoveryChainRead(t *testing.T) { }) })) - expectTarget_DC1 := newTarget("web", "", "default", "default", "dc1") + expectTarget_DC1 := targetWithConnectTimeout( + newTarget("web", "", "default", "default", "dc1"), + 33*time.Second, + ) expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeLocal, } - expectTarget_DC2 := newTarget("web", "", "default", "default", "dc2") + expectTarget_DC2 := targetWithConnectTimeout( + newTarget("web", "", "default", "default", "dc2"), + 33*time.Second, + ) expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeLocal, } diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index d0849a01e..e23ae0662 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -116,12 +116,12 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e continue } + snap.ConnectProxy.UpstreamConfig[uid] = &u // This can be true if the upstream is a synthetic entry populated from centralized upstream config. // Watches should not be created for them. if u.CentrallyConfigured { continue } - snap.ConnectProxy.UpstreamConfig[uid] = &u dc := s.source.Datacenter if u.Datacenter != "" { diff --git a/agent/proxycfg/testing_tproxy.go b/agent/proxycfg/testing_tproxy.go index 4c04c9346..e593a51e9 100644 --- a/agent/proxycfg/testing_tproxy.go +++ b/agent/proxycfg/testing_tproxy.go @@ -1,6 +1,8 @@ package proxycfg import ( + "time" + "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/consul/agent/cache" @@ -322,7 +324,11 @@ func TestConfigSnapshotTransparentProxyDialDirectly(t testing.T) *ConfigSnapshot mongo = structs.NewServiceName("mongo", nil) mongoUID = NewUpstreamIDFromServiceName(mongo) - mongoChain = discoverychain.TestCompileConfigEntries(t, "mongo", "default", "default", "dc1", connect.TestClusterID+".consul", nil) + mongoChain = discoverychain.TestCompileConfigEntries(t, "mongo", "default", "default", "dc1", connect.TestClusterID+".consul", nil, &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: "mongo", + ConnectTimeout: 33 * time.Second, + }) db = structs.NewServiceName("db", nil) ) diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index c2738f842..046ec1c4d 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -208,6 +208,8 @@ type DiscoveryTarget struct { MeshGateway MeshGatewayConfig `json:",omitempty"` Subset ServiceResolverSubset `json:",omitempty"` + ConnectTimeout time.Duration `json:",omitempty"` + // External is true if this target is outside of this consul cluster. External bool `json:",omitempty"` @@ -221,6 +223,42 @@ type DiscoveryTarget struct { Name string `json:",omitempty"` } +func (t *DiscoveryTarget) MarshalJSON() ([]byte, error) { + type Alias DiscoveryTarget + exported := struct { + ConnectTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: t.ConnectTimeout.String(), + Alias: (*Alias)(t), + } + if t.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + + return json.Marshal(exported) +} + +func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { + type Alias DiscoveryTarget + aux := &struct { + ConnectTimeout string + *Alias + }{ + Alias: (*Alias)(t), + } + if err := lib.UnmarshalJSON(data, &aux); err != nil { + return err + } + var err error + if aux.ConnectTimeout != "" { + if t.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + return err + } + } + return nil +} + func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget { t := &DiscoveryTarget{ Service: service, diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 63442eb51..bbbad4a8b 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -173,9 +173,15 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, }) } - for _, target := range cfgSnap.ConnectProxy.PassthroughUpstreams { - for tid := range target { - uid := proxycfg.NewUpstreamIDFromTargetID(tid) + for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { + targetMap, ok := cfgSnap.ConnectProxy.PassthroughUpstreams[uid] + if !ok { + continue + } + + for targetID := range targetMap { + + uid := proxycfg.NewUpstreamIDFromTargetID(targetID) sni := connect.ServiceSNI( uid.Name, "", uid.NamespaceOrDefault(), uid.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) @@ -190,10 +196,13 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, }, LbPolicy: envoy_cluster_v3.Cluster_CLUSTER_PROVIDED, - // TODO(tproxy) This should use the connection timeout configured on the upstream's config entry ConnectTimeout: ptypes.DurationProto(5 * time.Second), } + if discoTarget, ok := chain.Targets[targetID]; ok && discoTarget.ConnectTimeout > 0 { + c.ConnectTimeout = ptypes.DurationProto(discoTarget.ConnectTimeout) + } + spiffeID := connect.SpiffeIDService{ Host: cfgSnap.Roots.TrustDomain, Partition: uid.PartitionOrDefault(), diff --git a/agent/xds/testdata/clusters/transparent-proxy-dial-instances-directly.envoy-1-20-x.golden b/agent/xds/testdata/clusters/transparent-proxy-dial-instances-directly.envoy-1-20-x.golden index d920c11a4..ff729a66d 100644 --- a/agent/xds/testdata/clusters/transparent-proxy-dial-instances-directly.envoy-1-20-x.golden +++ b/agent/xds/testdata/clusters/transparent-proxy-dial-instances-directly.envoy-1-20-x.golden @@ -210,7 +210,7 @@ "resourceApiVersion": "V3" } }, - "connectTimeout": "5s", + "connectTimeout": "33s", "circuitBreakers": { }, @@ -305,7 +305,7 @@ "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", "name": "passthrough~mongo.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", "type": "ORIGINAL_DST", - "connectTimeout": "5s", + "connectTimeout": "33s", "lbPolicy": "CLUSTER_PROVIDED", "transportSocket": { "name": "tls", diff --git a/api/discovery_chain.go b/api/discovery_chain.go index d198b2bb5..4217603cf 100644 --- a/api/discovery_chain.go +++ b/api/discovery_chain.go @@ -234,9 +234,46 @@ type DiscoveryTarget struct { Namespace string Datacenter string - MeshGateway MeshGatewayConfig - Subset ServiceResolverSubset - External bool - SNI string - Name string + MeshGateway MeshGatewayConfig + Subset ServiceResolverSubset + ConnectTimeout time.Duration + External bool + SNI string + Name string +} + +func (t *DiscoveryTarget) MarshalJSON() ([]byte, error) { + type Alias DiscoveryTarget + exported := &struct { + ConnectTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: t.ConnectTimeout.String(), + Alias: (*Alias)(t), + } + if t.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + + return json.Marshal(exported) +} + +func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { + type Alias DiscoveryTarget + aux := &struct { + ConnectTimeout string + *Alias + }{ + Alias: (*Alias)(t), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.ConnectTimeout != "" { + if t.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + return err + } + } + return nil } diff --git a/api/discovery_chain_test.go b/api/discovery_chain_test.go index 049ce3963..dd4fc0181 100644 --- a/api/discovery_chain_test.go +++ b/api/discovery_chain_test.go @@ -47,12 +47,13 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { }, Targets: map[string]*DiscoveryTarget{ "web.default.default.dc1": { - ID: "web.default.default.dc1", - Service: "web", - Namespace: "default", - Datacenter: "dc1", - SNI: "web.default.dc1.internal." + testClusterID + ".consul", - Name: "web.default.dc1.internal." + testClusterID + ".consul", + ID: "web.default.default.dc1", + Service: "web", + Namespace: "default", + Datacenter: "dc1", + ConnectTimeout: 5 * time.Second, + SNI: "web.default.dc1.internal." + testClusterID + ".consul", + Name: "web.default.dc1.internal." + testClusterID + ".consul", }, }, }, @@ -88,12 +89,13 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { }, Targets: map[string]*DiscoveryTarget{ "web.default.default.dc2": { - ID: "web.default.default.dc2", - Service: "web", - Namespace: "default", - Datacenter: "dc2", - SNI: "web.default.dc2.internal." + testClusterID + ".consul", - Name: "web.default.dc2.internal." + testClusterID + ".consul", + ID: "web.default.default.dc2", + Service: "web", + Namespace: "default", + Datacenter: "dc2", + ConnectTimeout: 5 * time.Second, + SNI: "web.default.dc2.internal." + testClusterID + ".consul", + Name: "web.default.dc2.internal." + testClusterID + ".consul", }, }, }, @@ -134,12 +136,13 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { }, Targets: map[string]*DiscoveryTarget{ "web.default.default.dc1": { - ID: "web.default.default.dc1", - Service: "web", - Namespace: "default", - Datacenter: "dc1", - SNI: "web.default.dc1.internal." + testClusterID + ".consul", - Name: "web.default.dc1.internal." + testClusterID + ".consul", + ID: "web.default.default.dc1", + Service: "web", + Namespace: "default", + Datacenter: "dc1", + ConnectTimeout: 33 * time.Second, + SNI: "web.default.dc1.internal." + testClusterID + ".consul", + Name: "web.default.dc1.internal." + testClusterID + ".consul", }, }, }, @@ -186,8 +189,9 @@ func TestAPI_DiscoveryChain_Get(t *testing.T) { MeshGateway: MeshGatewayConfig{ Mode: MeshGatewayModeLocal, }, - SNI: "web.default.dc2.internal." + testClusterID + ".consul", - Name: "web.default.dc2.internal." + testClusterID + ".consul", + ConnectTimeout: 22 * time.Second, + SNI: "web.default.dc2.internal." + testClusterID + ".consul", + Name: "web.default.dc2.internal." + testClusterID + ".consul", }, }, }, diff --git a/website/content/docs/connect/l7-traffic/discovery-chain.mdx b/website/content/docs/connect/l7-traffic/discovery-chain.mdx index dc9594e61..39ea48df5 100644 --- a/website/content/docs/connect/l7-traffic/discovery-chain.mdx +++ b/website/content/docs/connect/l7-traffic/discovery-chain.mdx @@ -237,6 +237,10 @@ A single node in the compiled discovery chain. - `External` `(bool: false)` - True if this target is outside of this consul cluster. +- `ConnectTimeout` `(duration)` - Copy of the underlying `service-resolver` + [`ConnectTimeout`](/docs/connect/config-entries/service-resolver#connecttimeout) + field. If one is not defined the default of `5s` is returned. + - `SNI` `(string)` - This value should be used as the [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) value when connecting to this set of endpoints over TLS. diff --git a/website/content/docs/connect/transparent-proxy.mdx b/website/content/docs/connect/transparent-proxy.mdx index 750c62e0f..1091091cf 100644 --- a/website/content/docs/connect/transparent-proxy.mdx +++ b/website/content/docs/connect/transparent-proxy.mdx @@ -169,8 +169,9 @@ transparent proxy's datacenter. Services can also dial explicit upstreams in oth in the datacenter `dc2`. * In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, where `my-service` is the service that exists in another Kubernetes cluster and is exposed on port `1234`. Although Transparent Proxy is enabled, KubeDNS is not utilized when communicating between services existing on separate Kubernetes clusters. -* When dialing headless services the request will be proxied using a plain TCP proxy with a 5s connection timeout. -Currently the upstream's protocol and connection timeout are not considered. + +* When dialing headless services, the request will be proxied using a plain TCP + proxy. The upstream's protocol is not considered. ## Using Transparent Proxy From 365f1c866f229fb469fb729eedbfa303aca6adc0 Mon Sep 17 00:00:00 2001 From: Mark Anderson Date: Fri, 8 Apr 2022 15:33:45 -0700 Subject: [PATCH 107/785] Update vault to 1.9.4 Vault hasn't been updated for a while, and we should be testing against a newer version. I'd update to 1.10.0, but we would run afoul of https://github.com/hashicorp/vault/issues/14863. We should update to 1.10.1 as soon as it comes our, or better yet move to using latest. Signed-off-by: Mark Anderson --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index df724af17..0226bffbf 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,7 +31,7 @@ references: GIT_COMMITTER_NAME: circleci-consul S3_ARTIFACT_BUCKET: consul-dev-artifacts-v2 BASH_ENV: .circleci/bash_env.sh - VAULT_BINARY_VERSION: 1.2.2 + VAULT_BINARY_VERSION: 1.9.4 steps: install-gotestsum: &install-gotestsum From e4d8c3b1d02125ad2b4e5fb1d9af54122a3bb50f Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 11 Apr 2022 10:04:26 +0100 Subject: [PATCH 108/785] Fallback icons to currentColor --- .../consul-ui/app/styles/base/icons/base-keyframes.scss | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/app/styles/base/icons/base-keyframes.scss b/ui/packages/consul-ui/app/styles/base/icons/base-keyframes.scss index dab94ecee..37d328afc 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/base-keyframes.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/base-keyframes.scss @@ -8,12 +8,12 @@ *::before { animation-name: var(--icon-name-start, var(--icon-name)), var(--icon-size-start, var(--icon-size, icon-000)); - background-color: var(--icon-color-start, var(--icon-color)); + background-color: var(--icon-color-start, var(--icon-color, currentColor)); } *::after { animation-name: var(--icon-name-end, var(--icon-name)), var(--icon-size-end, var(--icon-size, icon-000)); - background-color: var(--icon-color-end, var(--icon-color)); + background-color: var(--icon-color-end, var(--icon-color, currentColor)); } [style*='--icon-color-start']::before { From 42a0f204ac5c9798288675461ed20f497041f034 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 11 Apr 2022 10:05:02 +0100 Subject: [PATCH 109/785] Fixup any psuedo elements that don't need currentColor --- ui/packages/consul-ui/app/components/breadcrumbs/skin.scss | 1 + ui/packages/consul-ui/app/components/csv-list/index.scss | 1 + ui/packages/consul-ui/app/components/empty-state/skin.scss | 2 +- .../consul-ui/app/components/main-nav-vertical/skin.scss | 1 + .../consul-ui/app/components/secret-button/layout.scss | 1 + ui/packages/consul-ui/app/components/tooltip-panel/skin.scss | 2 +- ui/packages/consul-ui/app/components/tooltip/index.scss | 5 +++++ 7 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/app/components/breadcrumbs/skin.scss b/ui/packages/consul-ui/app/components/breadcrumbs/skin.scss index 966cf886e..55a32e5c9 100644 --- a/ui/packages/consul-ui/app/components/breadcrumbs/skin.scss +++ b/ui/packages/consul-ui/app/components/breadcrumbs/skin.scss @@ -8,6 +8,7 @@ } %crumbs::before { text-decoration: none; + background-color: var(--transparent); } %breadcrumb-milestone::before { @extend %with-chevron-left-mask, %as-pseudo; diff --git a/ui/packages/consul-ui/app/components/csv-list/index.scss b/ui/packages/consul-ui/app/components/csv-list/index.scss index 4c64a7c87..a5141fe18 100644 --- a/ui/packages/consul-ui/app/components/csv-list/index.scss +++ b/ui/packages/consul-ui/app/components/csv-list/index.scss @@ -14,4 +14,5 @@ content: var(--csv-list-separator); vertical-align: initial; margin-right: 0.3em; + background-color: var(--transparent); } diff --git a/ui/packages/consul-ui/app/components/empty-state/skin.scss b/ui/packages/consul-ui/app/components/empty-state/skin.scss index 9e09d9368..22d9fcae4 100644 --- a/ui/packages/consul-ui/app/components/empty-state/skin.scss +++ b/ui/packages/consul-ui/app/components/empty-state/skin.scss @@ -23,7 +23,7 @@ %empty-state[class*='status-'] header::before { @extend %as-pseudo; } -%empty-state header::before { +%empty-state[class*='status-'] header::before { @extend %with-alert-circle-outline-mask; } %empty-state.status-404 header::before { diff --git a/ui/packages/consul-ui/app/components/main-nav-vertical/skin.scss b/ui/packages/consul-ui/app/components/main-nav-vertical/skin.scss index e1930d8bf..abd048df9 100644 --- a/ui/packages/consul-ui/app/components/main-nav-vertical/skin.scss +++ b/ui/packages/consul-ui/app/components/main-nav-vertical/skin.scss @@ -47,6 +47,7 @@ display: block; margin-top: -0.5rem; margin-bottom: 0.5rem; + background-color: var(--transparent); } %main-nav-vertical-popover-menu-trigger { border: var(--decor-border-100); diff --git a/ui/packages/consul-ui/app/components/secret-button/layout.scss b/ui/packages/consul-ui/app/components/secret-button/layout.scss index 1393e5efe..fe223180e 100644 --- a/ui/packages/consul-ui/app/components/secret-button/layout.scss +++ b/ui/packages/consul-ui/app/components/secret-button/layout.scss @@ -17,6 +17,7 @@ display: inline; visibility: visible; content: '■ ■ ■ ■ ■ ■ ■ ■ ■ ■ ■ ■'; + background-color: var(--transparent); } %secret-button input:checked ~ em::before { display: none; diff --git a/ui/packages/consul-ui/app/components/tooltip-panel/skin.scss b/ui/packages/consul-ui/app/components/tooltip-panel/skin.scss index 619da5172..7e65c6292 100644 --- a/ui/packages/consul-ui/app/components/tooltip-panel/skin.scss +++ b/ui/packages/consul-ui/app/components/tooltip-panel/skin.scss @@ -6,7 +6,7 @@ @extend %as-pseudo; width: 12px; height: 12px; - background: white; + background-color: rgb(var(--tone-gray-000)); border-top: 1px solid rgb(var(--tone-gray-300)); border-right: 1px solid rgb(var(--tone-gray-300)); transform: rotate(-45deg); diff --git a/ui/packages/consul-ui/app/components/tooltip/index.scss b/ui/packages/consul-ui/app/components/tooltip/index.scss index 333faed35..1f8802a11 100644 --- a/ui/packages/consul-ui/app/components/tooltip/index.scss +++ b/ui/packages/consul-ui/app/components/tooltip/index.scss @@ -45,6 +45,7 @@ &::before { border-color: var(--transparent); border-style: solid; + background-color: var(--transparent); } } @@ -52,23 +53,27 @@ &::before { border-width: var(--size) var(--size) 0; border-top-color: initial; + background-color: var(--transparent); } } %tooltip-tail-bottom { &::before { border-width: 0 var(--size) var(--size); border-bottom-color: initial; + background-color: var(--transparent); } } %tooltip-tail-left { &::before { border-width: var(--size) 0 var(--size) var(--size); border-left-color: initial; + background-color: var(--transparent); } } %tooltip-tail-right { &::before { border-width: var(--size) var(--size) var(--size) 0; border-right-color: initial; + background-color: var(--transparent); } } From de234b3aa74e3e85f4c82f1224936d85053ae663 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 11 Apr 2022 12:49:59 +0100 Subject: [PATCH 110/785] ui: Add more explanatory texts for empty states (#12354) * ui: Add more explanatory texts for empty states * Change all template "Read the guide"s * Add missing htmlSafe * Remove the stuff I commented out to try and grok the hairy rebase * Changelog * More rebased yaml weirdness plus added node:read --- .changelog/12354.txt | 3 + .../app/templates/dc/nodes/show/sessions.hbs | 25 +- .../templates/dc/acls/auth-methods/index.hbs | 19 +- .../acls/auth-methods/show/binding-rules.hbs | 10 +- .../acls/auth-methods/show/nspace-rules.hbs | 10 +- .../app/templates/dc/acls/policies/index.hbs | 21 +- .../app/templates/dc/acls/roles/index.hbs | 19 +- .../app/templates/dc/acls/tokens/index.hbs | 19 +- .../app/templates/dc/intentions/index.hbs | 21 +- .../consul-ui/app/templates/dc/kv/index.hbs | 21 +- .../app/templates/dc/nodes/index.hbs | 15 +- .../templates/dc/nodes/show/healthchecks.hbs | 8 +- .../app/templates/dc/nodes/show/services.hbs | 7 +- .../app/templates/dc/services/index.hbs | 23 +- .../dc/services/instance/exposedpaths.hbs | 8 +- .../dc/services/instance/upstreams.hbs | 24 +- .../templates/dc/services/show/instances.hbs | 7 +- .../dc/services/show/intentions/index.hbs | 21 +- .../templates/dc/services/show/services.hbs | 14 +- .../app/templates/dc/services/show/tags.hbs | 11 +- .../templates/dc/services/show/upstreams.hbs | 7 +- .../consul-ui/translations/routes/en-us.yaml | 268 ++++++++++++++++-- 22 files changed, 385 insertions(+), 196 deletions(-) create mode 100644 .changelog/12354.txt diff --git a/.changelog/12354.txt b/.changelog/12354.txt new file mode 100644 index 000000000..a81bb29bc --- /dev/null +++ b/.changelog/12354.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Include details on ACL policy dispositions required for unauthorized views +``` diff --git a/ui/packages/consul-lock-sessions/app/templates/dc/nodes/show/sessions.hbs b/ui/packages/consul-lock-sessions/app/templates/dc/nodes/show/sessions.hbs index b868871f7..b6cb1107a 100644 --- a/ui/packages/consul-lock-sessions/app/templates/dc/nodes/show/sessions.hbs +++ b/ui/packages/consul-lock-sessions/app/templates/dc/nodes/show/sessions.hbs @@ -70,17 +70,18 @@ as |route|> - -

- Welcome to Lock Sessions -

-
- - -

- Consul provides a session mechanism which can be used to build distributed locks. Sessions act as a binding layer between nodes, health checks, and key/value data. There are currently no lock sessions present, or you may not have permission to view lock sessions. -

-
+ +

+ {{t 'routes.dc.nodes.show.sessions.empty.header' + items=items.length + }} +

+
+ + {{t 'routes.dc.nodes.show.sessions.empty.body' + htmlSafe=true + }} + diff --git a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/index.hbs b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/index.hbs index d68a2ec88..f61a94347 100644 --- a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/index.hbs @@ -90,21 +90,16 @@ as |route|> >

- {{#if (gt items.length 0)}} - No auth methods found - {{else}} - Welcome to Auth Methods - {{/if}} + {{t 'routes.dc.auth-methods.index.empty.header' + items=items.length + }}

-

- {{#if (gt items.length 0)}} - No auth methods where found matching that search, or you may not have access to view the auth methods you are searching for. - {{else}} - There don't seem to be any auth methods, or you may not have access to view auth methods yet. - {{/if}} -

+ {{t 'routes.dc.auth-methods.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/acls/roles/index.hbs b/ui/packages/consul-ui/app/templates/dc/acls/roles/index.hbs index 17d398429..a1b76fb63 100644 --- a/ui/packages/consul-ui/app/templates/dc/acls/roles/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/acls/roles/index.hbs @@ -91,21 +91,16 @@ as |route|> >

- {{#if (gt items.length 0)}} - No roles found - {{else}} - Welcome to Roles - {{/if}} + {{t 'routes.dc.acls.roles.index.empty.header' + items=items.length + }}

-

- {{#if (gt items.length 0)}} - No roles where found matching that search, or you may not have access to view the roles you are searching for. - {{else}} - There don't seem to be any roles, or you may not have access to view roles yet. - {{/if}} -

+ {{t 'routes.dc.acls.roles.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/kv/index.hbs b/ui/packages/consul-ui/app/templates/dc/kv/index.hbs index 100e6144d..97858ca95 100644 --- a/ui/packages/consul-ui/app/templates/dc/kv/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/kv/index.hbs @@ -178,28 +178,23 @@ as |sort filters parent items|}} >

- {{#if (gt items.length 0)}} - No K/V pairs found - {{else}} - Welcome to Key/Value - {{/if}} + {{t 'routes.dc.kv.index.empty.header' + items=items.length + }}

-

- {{#if (gt items.length 0)}} - No K/V pairs where found matching that search, or you may not have access to view the K/V pairs you are searching for. - {{else}} - You don't have any K/V pairs, or you may not have access to view K/V pairs yet. - {{/if}} -

+ {{t 'routes.dc.kv.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs b/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs index 7c75724d1..d5ba6306a 100644 --- a/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs @@ -95,17 +95,16 @@ as |route|> >

- {{#if (gt items.length 0)}} - No nodes found - {{else}} - Welcome to Nodes - {{/if}} + {{t 'routes.dc.nodes.index.empty.header' + items=items.length + }}

-

- There don't seem to be any registered nodes, or you may not have access to view nodes yet. -

+ {{t 'routes.dc.nodes.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/nodes/show/healthchecks.hbs b/ui/packages/consul-ui/app/templates/dc/nodes/show/healthchecks.hbs index ca2cb185a..8a8f9b876 100644 --- a/ui/packages/consul-ui/app/templates/dc/nodes/show/healthchecks.hbs +++ b/ui/packages/consul-ui/app/templates/dc/nodes/show/healthchecks.hbs @@ -82,10 +82,10 @@ as |route|> - {{t "routes.dc.nodes.show.healthchecks.empty" - items=items.length - htmlSafe=true - }} + {{t "routes.dc.nodes.show.healthchecks.empty" + items=items.length + htmlSafe=true + }} diff --git a/ui/packages/consul-ui/app/templates/dc/nodes/show/services.hbs b/ui/packages/consul-ui/app/templates/dc/nodes/show/services.hbs index 770386b37..2c7c67a18 100644 --- a/ui/packages/consul-ui/app/templates/dc/nodes/show/services.hbs +++ b/ui/packages/consul-ui/app/templates/dc/nodes/show/services.hbs @@ -63,9 +63,10 @@ as |route|> -

- This node has no service instances{{#if (gt items.length 0)}} matching that search{{/if}}. -

+ {{t "routes.dc.nodes.show.services.empty" + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/index.hbs b/ui/packages/consul-ui/app/templates/dc/services/index.hbs index 3d6e04933..433211d0c 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/index.hbs @@ -105,21 +105,16 @@ as |sort filters items partition nspace|}} >

- {{#if (gt items.length 0)}} - No services found - {{else}} - Welcome to Services - {{/if}} + {{t 'routes.dc.services.index.empty.header' + items=items.length + }}

-

- {{#if (gt items.length 0)}} - No services where found matching that search, or you may not have access to view the services you are searching for. - {{else}} - There don't seem to be any registered services, or you may not have access to view services yet. - {{/if}} -

+ {{t 'routes.dc.services.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/instance/exposedpaths.hbs b/ui/packages/consul-ui/app/templates/dc/services/instance/exposedpaths.hbs index df7e723d6..c044d97dd 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/instance/exposedpaths.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/instance/exposedpaths.hbs @@ -7,9 +7,7 @@ as |route|> as |item proxy|}}
{{#if (gt proxy.ServiceProxy.Expose.Paths.length 0)}} -

- The following list shows individual HTTP paths exposed through Envoy for external services like Prometheus. Read more about this in our documentation. -

+ {{t 'routes.dc.services.instance.exposedpaths.intro' htmlSafe=true}} -

- There are no individual HTTP paths exposed through Envoy for external services like Prometheus. Read more about this in our documentation. -

+ {{t 'routes.dc.services.instance.exposedpaths.empty.body' htmlSafe=true}}
{{/if}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs index 30830ca4c..c5282482f 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs @@ -53,19 +53,14 @@ as |route|> -

- {{t "routes.dc.services.instance.upstreams.tproxy-mode.body"}} -

+ {{t "routes.dc.services.instance.upstreams.tproxy-mode.body" + htmlSafe=true + }}
-

- - {{t "routes.dc.services.instance.upstreams.tproxy-mode.footer"}} - -

+ {{t "routes.dc.services.instance.upstreams.tproxy-mode.footer" + htmlSafe=true + }}
{{/if}} @@ -87,9 +82,10 @@ as |route|> -

- This service has no upstreams{{#if (gt items.length 0)}} matching that search{{/if}}. -

+ {{t "routes.dc.services.instance.upstreams.empty" + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/instances.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/instances.hbs index 5cd8d2297..7ee9df362 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/instances.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/instances.hbs @@ -75,9 +75,10 @@ as |sort filters items proxyMeta|}} -

- There are no instances{{#if (gt items.length 0)}} matching that search{{/if}}. -

+ {{t "routes.dc.services.show.instances.empty" + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/intentions/index.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/intentions/index.hbs index 003d915f6..2b127daa3 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/intentions/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/intentions/index.hbs @@ -94,28 +94,23 @@ as |route|> >

- {{#if (gt items.length 0)}} - No intentions found - {{else}} - Welcome to Intentions - {{/if}} + {{t 'routes.dc.services.intentions.index.empty.header' + items=items.length + }}

-

- {{#if (gt items.length 0)}} - No intentions where found matching that search, or you may not have access to view the intentions you are searching for. - {{else}} - There don't seem to be any intentions, or you may not have access to view intentions yet. - {{/if}} -

+ {{t 'routes.dc.services.intentions.index.empty.body' + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/services.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/services.hbs index 33d782863..04d568e8e 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/services.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/services.hbs @@ -56,10 +56,6 @@ as |route|> @filter={{filters}} /> {{/if}} -

- The following services may receive traffic from external services through this gateway. Learn more about configuring gateways in our - step-by-step guide. -

@items={{items}} as |collection|> + {{t "routes.dc.services.show.services.intro" + htmlSafe=true + }} -

- There are no linked services{{#if (gt items.length 0)}} matching that search{{/if}}. -

+ {{t "routes.dc.services.show.services.empty" + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/tags.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/tags.hbs index e94891f87..5aae2f633 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/tags.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/tags.hbs @@ -7,10 +7,15 @@ as |route|> {{else}} + +

+ {{t 'routes.dc.services.show.tags.empty.header'}} +

+
-

- There are no tags. -

+ {{t 'routes.dc.services.show.tags.empty.body' + htmlSafe=true + }}
{{/if}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs index 72d86d0f7..1f4c8efbc 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs @@ -82,9 +82,10 @@ as |route|> -

- There are no upstreams{{#if (gt items.length 0)}} matching that search{{/if}}. -

+ {{t "routes.dc.services.show.upstreams.empty" + items=items.length + htmlSafe=true + }}
diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index 59dd94ff1..e1b7d3559 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -18,12 +18,46 @@ dc: title: License nodes: + index: + empty: + header: | + {items, select, + 0 {Welcome to Nodes} + other {No Nodes found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any registered Nodes in this Consul cluster} + other {No Nodes were found matching your search} + }, or you may not have service:read and node:read permissions access to this view. +

show: + rtt: + title: Round Trip Time + metadata: + title: Metadata + sessions: + title: Lock Sessions + header: Welcome to Lock Sessions + body: | +

+ Consul provides a session mechanism which can be used to build distributed locks. Sessions act as a binding layer between Nodes, Health Checks, and Key/Value data. There are currently no Lock Sessions present, or you may not have key:read or session:read permissions. +

+ services: + title: Service Instances + empty: | +

+ This Node has no Service Instances{items, select, + 0 {} + other { matching that search} + }. +

healthchecks: title: Health Checks empty: |

- This node has no health checks{items, select, + This Node has no Health Checks{items, select, 0 {} other { matching that search} }. @@ -34,15 +68,65 @@ dc:

This node has a failing serf node check. The health statuses shown on this page are the statuses as they were known before the node became unreachable.

- services: - title: Service Instances - rtt: - title: Round Trip Time - sessions: - title: Lock Sessions - metadata: - title: Metadata services: + index: + empty: + header: | + {items, select, + 0 {Welcome to Services} + other {No Services found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any registered services in this Consul cluster} + other {No Services were found matching your search} + }, or you may not have service:read and node:read access to this view. Use Terraform, Kubernetes CRDs, Vault, or the Consul CLI to register Services. +

+ instance: + exposedpaths: + intro: | +

+ The following list shows individual HTTP paths exposed through Envoy for external services like Prometheus. Read more about this in our documentation. +

+ + empty: + body: | +

+ There are no individual HTTP paths exposed through Envoy for external services like Prometheus. Read more about this in our documentation. +

+ healthchecks: + empty: | +

+ This instance has no health checks{items, select, + 0 {} + other { matching that search} + }. +

+ critical-serf-notice: + header: Failing serf check + body: | +

+ This instance has a failing serf node check. The health statuses shown on this page are the statuses as they were known before the node became unreachable. +

+ upstreams: + tproxy-mode: + header: Transparent proxy mode + body: | +

+ The upstreams listed on this page have been defined in a proxy registration. There may be more upstreams, though, as "transparent" mode is enabled on this proxy. +

+ footer: | +

+ Read the documentation +

+ empty: | +

+ This Service Instance has no Upstreams{items, select, + 0 {} + other { matching that search} + }. +

show: topology: notices: @@ -87,30 +171,168 @@ dc:

Read the documentation

+ intentions: + index: + empty: + header: | + {items, select, + 0 {Welcome to Intentions} + other {No Intentions found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Intentions in this Consul cluster} + other {No Intentions were found matching your search} + }, or you may not have intentions:read permissions access to this view. +

+ + instances: + empty: | +

+ This Service has no Instances{items, select, + 0 {} + other { matching that search} + }. +

+ services: + intro: | +

+ The following services may receive traffic from external services through this gateway. Learn more about configuring gateways in our step-by-step guide. +

+ empty: | +

+ There are no Services{items, select, + 0 {} + other { matching that search} + }. +

+ tags: + empty: + header: Welcome to Tags + body: | +

+ There are no tags for this Service. +

upstreams: intro: |

Upstreams are services that may receive traffic from this gateway. If you are not using Consul DNS, please make sure your Host: header uses the correct domain name for the gateway to correctly proxy to its upstreams. Learn more about configuring gateways in our documentation.

- instance: - healthchecks: empty: |

- This instance has no health checks{items, select, + This Service has no Upstreams{items, select, 0 {} other { matching that search} }.

- critical-serf-notice: - header: Failing serf check - body: | -

- This instance has a failing serf node check. The health statuses shown on this page are the statuses as they were known before the node became unreachable. -

- upstreams: - tproxy-mode: - header: Transparent proxy mode - body: The upstreams listed on this page have been defined in a proxy registration. There may be more upstreams, though, as "transparent" mode is enabled on this proxy. - footer: Read the documentation + routing-config: source: Routing Configuration + intentions: + index: + empty: + header: | + {items, select, + 0 {Welcome to Intentions} + other {No Intentions found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Intentions in this Consul cluster} + other {No Intentions were found matching your search} + }, or you may not have intentions:read permissions access to this view. +

+ kv: + index: + empty: + header: | + {items, select, + 0 {Welcome to Key/Value} + other {No Key/Values found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any K/V pairs in this Consul cluster yet} + other {No K/V pairs were found matching your search} + }, or you may not have key:read permissions access to this view. +

+ acls: + tokens: + index: + empty: + header: | + {items, select, + 0 {Welcome to Tokens} + other {No Tokens found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Tokens} + other {No Tokens were found matching your search} + }, or you may not have acl:read permissions to view Tokens yet. +

+ policies: + index: + empty: + header: | + {items, select, + 0 {Welcome to Policies} + other {No Policies found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Policies} + other {No Policies were found matching your search} + }, or you may not have acl:read permissions to view Policies yet. +

+ roles: + index: + empty: + header: | + {items, select, + 0 {Welcome to Roles} + other {No Roles found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Roles} + other {No Roles were found matching your search} + }, or you may not have acl:read permissions to view Roles yet. +

+ auth-methods: + show: + binding-rules: + empty: + header: No Binding Rules + body: | +

+ Binding rules allow an operator to express a systematic way of automatically linking roles and service identities to newly created tokens without operator intervention. +

+ nspace-rules: + empty: + header: No Namespace Rules + body: | +

+ A set of rules that can control which namespace tokens created via this auth method will be created within. Unlike binding rules, the first matching namespace rule wins. +

+ + index: + empty: + header: | + {items, select, + 0 {Welcome to Auth Methods} + other {No Auth Methods found} + } + body: | +

+ {items, select, + 0 {There don't seem to be any Auth Methods} + other {No Auth Methods were found matching your search} + }, or you may not have acl:read permissions to view Auth Methods yet. +

From 13ab14e60c528d1b9cf48d40d46f3f9fe3787212 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 11 Apr 2022 10:49:44 -0500 Subject: [PATCH 111/785] test: use docker buildkit backend for envoy integration tests (#12726) --- test/integration/connect/envoy/run-tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index db008d5a5..bd33af513 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -13,6 +13,8 @@ DEBUG=${DEBUG:-} ENVOY_VERSION=${ENVOY_VERSION:-"1.20.2"} export ENVOY_VERSION +export DOCKER_BUILDKIT=1 + if [ ! -z "$DEBUG" ] ; then set -x fi From f5a882f66c26991fdf5c63f923e9d3939dea5bcc Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 11 Apr 2022 10:56:57 -0500 Subject: [PATCH 112/785] fix broken test (#12741) --- agent/discovery_chain_endpoint_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 86ef96617..b93cd45c9 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -282,7 +282,7 @@ func TestDiscoveryChainRead(t *testing.T) { expectTarget_DC1 := targetWithConnectTimeout( newTarget("web", "", "default", "default", "dc1"), - 33*time.Second, + 22*time.Second, ) expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeLocal, @@ -290,7 +290,7 @@ func TestDiscoveryChainRead(t *testing.T) { expectTarget_DC2 := targetWithConnectTimeout( newTarget("web", "", "default", "default", "dc2"), - 33*time.Second, + 22*time.Second, ) expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeLocal, From 07893afbdb30e3eb12725dd4247dab455c9d53fd Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 11 Apr 2022 11:41:35 -0700 Subject: [PATCH 113/785] docs: Upgrade Consul K8s update link to combat matrix (#12744) --- website/content/docs/k8s/upgrade/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/k8s/upgrade/index.mdx b/website/content/docs/k8s/upgrade/index.mdx index 7ce00caf3..3f119200d 100644 --- a/website/content/docs/k8s/upgrade/index.mdx +++ b/website/content/docs/k8s/upgrade/index.mdx @@ -118,7 +118,7 @@ to update to the new version. 1. Ensure you've read the [Upgrading Consul](/docs/upgrading) documentation. 1. Ensure you've read any [specific instructions](/docs/upgrading/upgrade-specific) for the version you're upgrading to and the Consul [changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md) for that version. -1. Read our [Compatibility Matrix](/docs/k8s/upgrade/compatibility) to ensure +1. Read our [Compatibility Matrix](/docs/k8s/installation/compatibility) to ensure your current Helm chart version supports this Consul version. If it does not, you may need to also upgrade your Helm chart version at the same time. 1. Set `global.image` in your `values.yaml` to the desired version: From 0422512ca17c4272ecdbc81bc933da50263eb912 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 11 Apr 2022 14:40:57 -0500 Subject: [PATCH 114/785] ci: upsize many slow-running circleci builds (#12742) --- .circleci/config.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index df724af17..381bc5594 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -177,6 +177,7 @@ jobs: default: "" docker: - image: *GOLANG_IMAGE + resource_class: large environment: GOTAGS: "" # No tags for OSS but there are for enterprise GOARCH: "<>" @@ -241,7 +242,7 @@ jobs: go-test-arm64: machine: image: ubuntu-2004:202101-01 - resource_class: arm.medium + resource_class: arm.large parallelism: 4 environment: <<: *ENVIRONMENT @@ -272,6 +273,7 @@ jobs: go-test: docker: - image: *GOLANG_IMAGE + resource_class: large parallelism: 4 environment: <<: *ENVIRONMENT @@ -328,6 +330,7 @@ jobs: go-test-32bit: docker: - image: *GOLANG_IMAGE + resource_class: large environment: <<: *ENVIRONMENT GOTAGS: "" # No tags for OSS but there are for enterprise @@ -401,6 +404,7 @@ jobs: build-distros: &build-distros docker: - image: *GOLANG_IMAGE + resource_class: large environment: &build-env <<: *ENVIRONMENT steps: @@ -438,6 +442,7 @@ jobs: build-arm: docker: - image: *GOLANG_IMAGE + resource_class: large environment: <<: *ENVIRONMENT CGO_ENABLED: 1 @@ -470,6 +475,7 @@ jobs: dev-build: docker: - image: *GOLANG_IMAGE + resource_class: large environment: <<: *ENVIRONMENT steps: From fd3f797dd5ec5f118250cadbbfc9c632a63a5f73 Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 11 Apr 2022 13:51:21 -0700 Subject: [PATCH 115/785] website: redirect change consul k8s compatibility matrix link (#12751) --- website/redirects.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/redirects.js b/website/redirects.js index 3517dc5e3..3ccff92be 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1089,7 +1089,7 @@ module.exports = [ }, { source: '/docs/compatibility', - destination: '/docs/upgrading/compatibility', + destination: '/docs/installation/compatibility', permanent: true, }, { From 1ba6678f26123988546796f325b2bf04c780cbcc Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 11 Apr 2022 15:45:18 -0700 Subject: [PATCH 116/785] redirect.js: fixing redirect to new compatibility matrix for k8s (#12755) --- website/redirects.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/website/redirects.js b/website/redirects.js index 3ccff92be..ea95d1803 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1089,7 +1089,12 @@ module.exports = [ }, { source: '/docs/compatibility', - destination: '/docs/installation/compatibility', + destination: '/docs/upgrading/compatibility', + permanent: true, + }, + { + source: '/docs/k8s/upgrade/compatibility', + destination: '/docs/k8s/installation/compatibility', permanent: true, }, { From f7edcdc6b91bc527d0f292028041294db4015dc6 Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Mon, 11 Apr 2022 16:05:21 -0700 Subject: [PATCH 117/785] docs: move agent/options.mdx into agent/config/index.mdx and add placeholder .mdx files for cli/files Also update nav data --- .../docs/agent/config/agent-config-cli.mdx | 0 .../docs/agent/config/agent-config-files.mdx | 0 .../docs/agent/{options.mdx => config/index.mdx} | 0 website/data/docs-nav-data.json | 15 ++++++++++++++- 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 website/content/docs/agent/config/agent-config-cli.mdx create mode 100644 website/content/docs/agent/config/agent-config-files.mdx rename website/content/docs/agent/{options.mdx => config/index.mdx} (100%) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx new file mode 100644 index 000000000..e69de29bb diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx new file mode 100644 index 000000000..e69de29bb diff --git a/website/content/docs/agent/options.mdx b/website/content/docs/agent/config/index.mdx similarity index 100% rename from website/content/docs/agent/options.mdx rename to website/content/docs/agent/config/index.mdx diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index d8cf955f1..f23305dbb 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -888,7 +888,20 @@ }, { "title": "Configuration", - "path": "agent/options" + "routes": [ + { + "title": "General", + "path": "agent/config" + }, + { + "title": "CLI Reference", + "path": "agent/config/agent-config-cli" + }, + { + "title": "Configuration Reference", + "path": "agent/config/agent-config-files" + } + ] }, { "title": "Configuration Entries", From 84123368db01c8af2f8fa4df3d1c512a6e55f8d9 Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Mon, 11 Apr 2022 16:05:48 -0700 Subject: [PATCH 118/785] docs: move cli content from agent/config/index to agent/config/agent-config-cli And add sections for logical groupings of options --- .../docs/agent/config/agent-config-cli.mdx | 517 ++++++++++++++++ website/content/docs/agent/config/index.mdx | 550 +----------------- 2 files changed, 518 insertions(+), 549 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx index e69de29bb..ff19b147f 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/agent-config-cli.mdx @@ -0,0 +1,517 @@ +--- +layout: docs +page_title: Consul Agent CLI Reference +description: >- + This topic describes the supported options for configuring Consul agents on the command line. +--- + +# Command-line Options ((#commandline_options)) + +-> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](#config_key_reference) for equivalent HCL Keys. + +The options below are all specified on the command-line. + +## Environment Variables + +Environment variables **cannot** be used to configure the Consul client. They +_can_ be used when running other `consul` CLI commands that connect with a +running agent, e.g. `CONSUL_HTTP_ADDR=192.168.0.1:8500 consul members`. + +See [Consul Commands](/docs/commands#environment-variables) for more +information. + +## General + +- `-check_output_max_size` - Override the default + limit of 4k for maximum size of checks, this is a positive value. By limiting this + size, it allows to put less pressure on Consul servers when many checks are having + a very large output in their checks. In order to completely disable check output + capture, it is possible to use [`discard_check_output`](#discard_check_output). + +- `-client` ((#\_client)) - The address to which Consul will bind client + interfaces, including the HTTP and DNS servers. By default, this is "127.0.0.1", + allowing only loopback connections. In Consul 1.0 and later this can be set to + a space-separated list of addresses to bind to, or a [go-sockaddr] + template that can potentially resolve to multiple addresses. + +- `-data-dir` ((#\_data_dir)) - This flag provides a data directory for + the agent to store state. This is required for all agents. The directory should + be durable across reboots. This is especially critical for agents that are running + in server mode as they must be able to persist cluster state. Additionally, the + directory must support the use of filesystem locking, meaning some types of mounted + folders (e.g. VirtualBox shared folders) may not be suitable. + + **Note:** both server and non-server agents may store ACL tokens in the state in this directory so read access may grant access to any tokens on servers and to any tokens used during service registration on non-servers. On Unix-based platforms the files are written with 0600 permissions so you should ensure only trusted processes can execute as the same user as Consul. On Windows, you should ensure the directory has suitable permissions configured as these will be inherited. + +- `-datacenter` ((#\_datacenter)) - This flag controls the datacenter in + which the agent is running. If not provided, it defaults to "dc1". Consul has first-class + support for multiple datacenters, but it relies on proper configuration. Nodes + in the same datacenter should be on a single LAN. + +- `-dev` ((#\_dev)) - Enable development server mode. This is useful for + quickly starting a Consul agent with all persistence options turned off, enabling + an in-memory server which can be used for rapid prototyping or developing against + the API. In this mode, [Connect is enabled](/docs/connect/configuration) and + will by default create a new root CA certificate on startup. This mode is **not** + intended for production use as it does not write any data to disk. The gRPC port + is also defaulted to `8502` in this mode. + +- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to + true will prevent Consul from using information from the host to generate a deterministic + node ID, and will instead generate a random node ID which will be persisted in + the data directory. This is useful when running multiple Consul agents on the same + host for testing. This defaults to false in Consul prior to version 0.8.5 and in + 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based + IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/tree/master/v3/host), which + is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in + to host-based IDs then Consul and Nomad will use information on the host to automatically + assign the same ID in both systems. + +- `-disable-keyring-file` ((#\_disable_keyring_file)) - If set, the keyring + will not be persisted to a file. Any installed keys will be lost on shutdown, and + only the given `-encrypt` key will be available on startup. This defaults to false. + +- `-enable-script-checks` ((#\_enable_script_checks)) This controls whether + [health checks that execute scripts](/docs/agent/checks) are enabled on this + agent, and defaults to `false` so operators must opt-in to allowing these. This + was added in Consul 0.9.0. + + ~> **Security Warning:** Enabling script checks in some configurations may + introduce a remote execution vulnerability which is known to be targeted by + malware. We strongly recommend `-enable-local-script-checks` instead. See [this + blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) + for more details. + +- `-enable-local-script-checks` ((#\_enable_local_script_checks)) + Like [`enable_script_checks`](#_enable_script_checks), but only enable them when + they are defined in the local configuration files. Script checks defined in HTTP + API registrations will still not be allowed. + + +- `-encrypt` ((#\_encrypt)) - Specifies the secret key to use for encryption + of Consul network traffic. This key must be 32-bytes that are Base64-encoded. The + easiest way to create an encryption key is to use [`consul keygen`](/commands/keygen). + All nodes within a cluster must share the same encryption key to communicate. The + provided key is automatically persisted to the data directory and loaded automatically + whenever the agent is restarted. This means that to encrypt Consul's gossip protocol, + this option only needs to be provided once on each agent's initial startup sequence. + If it is provided after Consul has been initialized with an encryption key, then + the provided key is ignored and a warning will be displayed. + +- `-grpc-port` ((#\_grpc_port)) - the gRPC API port to listen on. Default + -1 (gRPC disabled). See [ports](#ports) documentation for more detail. + +- `-hcl` ((#\_hcl)) - A HCL configuration fragment. This HCL configuration + fragment is appended to the configuration and allows to specify the full range + of options of a config file on the command line. This option can be specified multiple + times. This was added in Consul 1.0. + +- `-http-port` ((#\_http_port)) - the HTTP API port to listen on. This overrides + the default port 8500. This option is very useful when deploying Consul to an environment + which communicates the HTTP port through the environment e.g. PaaS like CloudFoundry, + allowing you to set the port directly via a Procfile. + +- `-https-port` ((#\_https_port)) - the HTTPS API port to listen on. Default + -1 (https disabled). See [ports](#ports) documentation for more detail. + +- `-default-query-time` ((#\_default_query_time)) - This flag controls the + amount of time a blocking query will wait before Consul will force a response. + This value can be overridden by the `wait` query parameter. Note that Consul applies + some jitter on top of this time. Defaults to 300s. + +- `-max-query-time` ((#\_max_query_time)) - this flag controls the maximum + amount of time a blocking query can wait before Consul will force a response. Consul + applies jitter to the wait time. The jittered time will be capped to this time. + Defaults to 600s. + +- `-pid-file` ((#\_pid_file)) - This flag provides the file path for the + agent to store its PID. This is useful for sending signals (for example, `SIGINT` + to close the agent or `SIGHUP` to update check definitions) to the agent. + +- `-protocol` ((#\_protocol)) - The Consul protocol version to use. Consul + agents speak protocol 2 by default, however agents will automatically use protocol > 2 when speaking to compatible agents. This should be set only when [upgrading](/docs/upgrading). You can view the protocol versions supported by Consul by running `consul -v`. + +- `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version + of the Raft consensus protocol used for server communications. This must be set + to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrade-specific#raft-protocol-version-compatibility) for more details. + +- `-segment` ((#\_segment)) - This flag is used to set + the name of the network segment the agent belongs to. An agent can only join and + communicate with other agents within its network segment. Ensure the [join + operation uses the correct port for this segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). + Review the [Network Segments documentation](/docs/enterprise/network-segments) + for more details. By default, this is an empty string, which is the `` + network segment. + + ~> **Warning:** The `segment` flag cannot be used with the [`partition`](#partition-1) option. + +## Advertise Address Options + +- `-advertise` ((#\_advertise)) - The advertise address is used to change + the address that we advertise to other nodes in the cluster. By default, the [`-bind`](#_bind) + address is advertised. However, in some cases, there may be a routable address + that cannot be bound. This flag enables gossiping a different address to support + this. If this address is not routable, the node will be in a constant flapping + state as other nodes will treat the non-routability as a failure. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +- `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used + to change the address that we advertise to server nodes joining through the WAN. + This can also be set on client agents when used in combination with the [`translate_wan_addrs`](#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address + is advertised. However, in some cases all members of all datacenters cannot be + on the same physical or virtual network, especially on hybrid setups mixing cloud + and private datacenters. This flag enables server nodes gossiping through the public + network for the WAN while using private VLANs for gossiping to each other and their + client agents, and it allows client agents to be reached at this address when being + accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +## Bind Options + +- `-bind` ((#\_bind)) - The address that should be bound to for internal + cluster communications. This is an IP address that should be reachable by all other + nodes in the cluster. By default, this is "0.0.0.0", meaning Consul will bind to + all addresses on the local machine and will [advertise](/docs/agent/options#_advertise) + the private IPv4 address to the rest of the cluster. If there are multiple private + IPv4 addresses available, Consul will exit with an error at startup. If you specify + `"[::]"`, Consul will [advertise](/docs/agent/options#_advertise) the public + IPv6 address. If there are multiple public IPv6 addresses available, Consul will + exit with an error at startup. Consul uses both TCP and UDP and the same port for + both. If you have any firewalls, be sure to allow both protocols. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that must resolve at runtime to a single address. Some example templates: + + ```shell + # Using address within a specific CIDR + $ consul agent -bind '{{ GetPrivateInterfaces | include "network" "10.0.0.0/8" | attr "address" }}' + ``` + + ```shell + # Using a static network interface name + $ consul agent -bind '{{ GetInterfaceIP "eth0" }}' + ``` + + ```shell + # Using regular expression matching for network interface name that is forwardable and up + $ consul agent -bind '{{ GetAllInterfaces | include "name" "^eth" | include "flags" "forwardable|up" | attr "address" }}' + ``` + +- `-serf-wan-bind` ((#\_serf_wan_bind)) - The address that should be bound + to for Serf WAN gossip communications. By default, the value follows the same rules + as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind` + option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +- `-serf-lan-bind` ((#\_serf_lan_bind)) - The address that should be bound + to for Serf LAN gossip communications. This is an IP address that should be reachable + by all other LAN nodes in the cluster. By default, the value follows the same rules + as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind` + option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +## Bootstrap Options + +- `-bootstrap` ((#\_bootstrap)) - This flag is used to control if a server + is in "bootstrap" mode. It is important that no more than one server **per** datacenter + be running in this mode. Technically, a server in bootstrap mode is allowed to + self-elect as the Raft leader. It is important that only a single node is in this + mode; otherwise, consistency cannot be guaranteed as multiple nodes are able to + self-elect. It is not recommended to use this flag after a cluster has been bootstrapped. + +- `-bootstrap-expect` ((#\_bootstrap_expect)) - This flag provides the number + of expected servers in the datacenter. Either this value should not be provided + or the value must agree with other servers in the cluster. When provided, Consul + waits until the specified number of servers are available and then bootstraps the + cluster. This allows an initial leader to be elected automatically. This cannot + be used in conjunction with the legacy [`-bootstrap`](#_bootstrap) flag. This flag + requires [`-server`](#_server) mode. + +## Configuration File Options + +- `-config-file` ((#\_config_file)) - A configuration file to load. For + more information on the format of this file, read the [Configuration Files](#configuration_files) + section. This option can be specified multiple times to load multiple configuration + files. If it is specified multiple times, configuration files loaded later will + merge with configuration files loaded earlier. During a config merge, single-value + keys (string, int, bool) will simply have their values replaced while list types + will be appended together. + +- `-config-dir` ((#\_config_dir)) - A directory of configuration files to + load. Consul will load all files in this directory with the suffix ".json" or ".hcl". + The load order is alphabetical, and the the same merge routine is used as with + the [`config-file`](#_config_file) option above. This option can be specified multiple + times to load multiple directories. Sub-directories of the config directory are + not loaded. For more information on the format of the configuration files, see + the [Configuration Files](#configuration_files) section. + +- `-config-format` ((#\_config_format)) - The format of the configuration + files to load. Normally, Consul detects the format of the config files from the + ".json" or ".hcl" extension. Setting this option to either "json" or "hcl" forces + Consul to interpret any file with or without extension to be interpreted in that + format. + +## DNS and Domain Options + +- `-dns-port` ((#\_dns_port)) - the DNS port to listen on. This overrides + the default port 8600. This is available in Consul 0.7 and later. + +- `-domain` ((#\_domain)) - By default, Consul responds to DNS queries in + the "consul." domain. This flag can be used to change that domain. All queries + in this domain are assumed to be handled by Consul and will not be recursively + resolved. + +- `-alt-domain` ((#\_alt_domain)) - This flag allows Consul to respond to + DNS queries in an alternate domain, in addition to the primary domain. If unset, + no alternate domain is used. + + In Consul 1.10.4 and later, Consul DNS responses will use the same domain as in the query (`-domain` or `-alt-domain`) where applicable. + PTR query responses will always use `-domain`, since the desired domain cannot be included in the query. + +- `-recursor` ((#\_recursor)) - Specifies the address of an upstream DNS + server. This option may be provided multiple times, and is functionally equivalent + to the [`recursors` configuration option](#recursors). + +## Join Options + +- `-join` ((#\_join)) - Address of another agent to join upon starting up. + This can be specified multiple times to specify multiple agents to join. If Consul + is unable to join with any of the specified addresses, agent startup will fail. + By default, the agent won't join any nodes when it starts up. Note that using [`retry_join`](#retry_join) could be more appropriate to help mitigate node startup race conditions when automating + a Consul cluster deployment. + + In Consul 1.1.0 and later this can be dynamically defined with a + [go-sockaddr] + template that is resolved at runtime. + + If using Enterprise network segments, see [additional documentation on + joining a client to a segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). + +- `-retry-join` ((#\_retry_join)) - Similar to [`-join`](#_join) but allows retrying a join until + it is successful. Once it joins successfully to a member in a list of members + it will never attempt to join again. Agents will then solely maintain their + membership via gossip. This is useful for cases where you know the address will + eventually be available. This option can be specified multiple times to + specify multiple agents to join. The value can contain IPv4, IPv6, or DNS + addresses. IPv6 must use the "bracketed" syntax. If multiple values + are given, they are tried and retried in the order listed until the first + succeeds. + + In Consul 1.1.0 and later this can be dynamically defined with a + [go-sockaddr] + template that is resolved at runtime. + + If Consul is running on the non-default Serf LAN port, the port must + be specified in the join address, or configured as the agent's default Serf port + using the [`ports.serf_lan`](#serf_lan_port) configuration option or + [`-serf-lan-port`](#_serf_lan_port) command line flag. + + If using network segments (Enterprise), see [additional documentation on + joining a client to a segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). + + Here are some examples of using `-retry-join`: + + ```shell + # Using a DNS entry + $ consul agent -retry-join "consul.domain.internal" + ``` + + ```shell + # Using IPv4 + $ consul agent -retry-join "10.0.4.67" + ``` + + ```shell + # Using a non-default Serf LAN port + $ consul agent -retry-join "192.0.2.10:8304" + ``` + + ```shell + # Using IPv6 + $ consul agent -retry-join "[::1]:8301" + ``` + + ```shell + # Using multiple addresses + $ consul agent -retry-join "consul.domain.internal" -retry-join "10.0.4.67" + ``` + + ### Cloud Auto-Joining + + As of Consul 0.9.1, `retry-join` accepts a unified interface using the + [go-discover](https://github.com/hashicorp/go-discover) library for doing + automatic cluster joining using cloud metadata. For more information, see + the [Cloud Auto-join page](/docs/agent/cloud-auto-join). + + ```shell + # Using Cloud Auto-Joining + $ consul agent -retry-join "provider=aws tag_key=..." + ``` + +- `-retry-interval` ((#\_retry_interval)) - Time to wait between join attempts. + Defaults to 30s. + +- `-retry-max` ((#\_retry_max)) - The maximum number of [`-join`](#_join) + attempts to be made before exiting with return code 1. By default, this is set + to 0 which is interpreted as infinite retries. + +- `-join-wan` ((#\_join_wan)) - Address of another wan agent to join upon + starting up. This can be specified multiple times to specify multiple WAN agents + to join. If Consul is unable to join with any of the specified addresses, agent + startup will fail. By default, the agent won't [`-join-wan`](#_join_wan) any nodes + when it starts up. + + In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +- `-retry-join-wan` ((#\_retry_join_wan)) - Similar to [`retry-join`](#_retry_join) + but allows retrying a wan join if the first attempt fails. This is useful for cases + where we know the address will become available eventually. As of Consul 0.9.3 + [Cloud Auto-Joining](#cloud-auto-joining) is supported as well. + + In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + template that is resolved at runtime. + +- `-primary-gateway` ((#\_primary_gateway)) - Similar to [`retry-join-wan`](#_retry_join_wan) + but allows retrying discovery of fallback addresses for the mesh gateways in the + primary datacenter if the first attempt fails. This is useful for cases where we + know the address will become available eventually. [Cloud Auto-Joining](#cloud-auto-joining) + is supported as well as [go-sockaddr] + templates. This was added in Consul 1.8.0. + +- `-retry-interval-wan` ((#\_retry_interval_wan)) - Time to wait between + [`-join-wan`](#_join_wan) attempts. Defaults to 30s. + +- `-retry-max-wan` ((#\_retry_max_wan)) - The maximum number of [`-join-wan`](#_join_wan) + attempts to be made before exiting with return code 1. By default, this is set + to 0 which is interpreted as infinite retries. + +- `-rejoin` ((#\_rejoin)) - When provided, Consul will ignore a previous + leave and attempt to rejoin the cluster when starting. By default, Consul treats + leave as a permanent intent and does not attempt to join the cluster again when + starting. This flag allows the previous state to be used to rejoin the cluster. + +## Log Options + +- `-log-file` ((#\_log_file)) - writes all the Consul agent log messages + to a file. This value is used as a prefix for the log file name. The current timestamp + is appended to the file name. If the value ends in a path separator, `consul-` + will be appended to the value. If the file name is missing an extension, `.log` + is appended. For example, setting `log-file` to `/var/log/` would result in a log + file path of `/var/log/consul-{timestamp}.log`. `log-file` can be combined with + [`-log-rotate-bytes`](#_log_rotate_bytes) and [-log-rotate-duration](#_log_rotate_duration) + for a fine-grained log rotation experience. + +- `-log-rotate-bytes` ((#\_log_rotate_bytes)) - to specify the number of + bytes that should be written to a log before it needs to be rotated. Unless specified, + there is no limit to the number of bytes that can be written to a log file. + +- `-log-rotate-duration` ((#\_log_rotate_duration)) - to specify the maximum + duration a log should be written to before it needs to be rotated. Must be a duration + value such as 30s. Defaults to 24h. + +- `-log-rotate-max-files` ((#\_log_rotate_max_files)) - to specify the maximum + number of older log file archives to keep. Defaults to 0 (no files are ever deleted). + Set to -1 to discard old log files when a new one is created. + +- `-log-level` ((#\_log_level)) - The level of logging to show after the + Consul agent has started. This defaults to "info". The available log levels are + "trace", "debug", "info", "warn", and "err". You can always connect to an agent + via [`consul monitor`](/commands/monitor) and use any log level. Also, + the log level can be changed during a config reload. + +- `-log-json` ((#\_log_json)) - This flag enables the agent to output logs + in a JSON format. By default this is false. + +- `-syslog` ((#\_syslog)) - This flag enables logging to syslog. This is + only supported on Linux and OSX. It will result in an error if provided on Windows. + +## Node Options + +- `-node` ((#\_node)) - The name of this node in the cluster. This must + be unique within the cluster. By default this is the hostname of the machine. + +- `-node-id` ((#\_node_id)) - Available in Consul 0.7.3 and later, this + is a unique identifier for this node across all time, even if the name of the node + or address changes. This must be in the form of a hex string, 36 characters long, + such as `adf4238a-882b-9ddc-4a9d-5b6758e4159e`. If this isn't supplied, which is + the most common case, then the agent will generate an identifier at startup and + persist it in the [data directory](#_data_dir) so that it will remain the same + across agent restarts. Information from the host will be used to generate a deterministic + node ID if possible, unless [`-disable-host-node-id`](#_disable_host_node_id) is + set to true. + +- `-node-meta` ((#\_node_meta)) - Available in Consul 0.7.3 and later, this + specifies an arbitrary metadata key/value pair to associate with the node, of the + form `key:value`. This can be specified multiple times. Node metadata pairs have + the following restrictions: + + - A maximum of 64 key/value pairs can be registered per node. + - Metadata keys must be between 1 and 128 characters (inclusive) in length + - Metadata keys must contain only alphanumeric, `-`, and `_` characters. + - Metadata keys must not begin with the `consul-` prefix; that is reserved for internal use by Consul. + - Metadata values must be between 0 and 512 (inclusive) characters in length. + - Metadata values for keys beginning with `rfc1035-` are encoded verbatim in DNS TXT requests, otherwise + the metadata kv-pair is encoded according [RFC1464](https://www.ietf.org/rfc/rfc1464.txt). + +## Serf Options + +- `-serf-lan-allowed-cidrs` ((#\_serf_lan_allowed_cidrs)) - The Serf LAN allowed CIDRs allow to accept incoming + connections for Serf only from several networks (multiple values are supported). + Those networks are specified with CIDR notation (eg: 192.168.1.0/24). + This is available in Consul 1.8 and later. + +- `-serf-lan-port` ((#\_serf_lan_port)) - the Serf LAN port to listen on. + This overrides the default Serf LAN port 8301. This is available in Consul 1.2.2 + and later. + +- `-serf-wan-allowed-cidrs` ((#\_serf_wan_allowed_cidrs)) - The Serf WAN allowed CIDRs allow to accept incoming + connections for Serf only from several networks (multiple values are supported). + Those networks are specified with CIDR notation (eg: 192.168.1.0/24). + This is available in Consul 1.8 and later. + +- `-serf-wan-port` ((#\_serf_wan_port)) - the Serf WAN port to listen on. + This overrides the default Serf WAN port 8302. This is available in Consul 1.2.2 + and later. + +## Server Options + +- `-server` ((#\_server)) - This flag is used to control if an agent is + in server or client mode. When provided, an agent will act as a Consul server. + Each Consul cluster must have at least one server and ideally no more than 5 per + datacenter. All servers participate in the Raft consensus algorithm to ensure that + transactions occur in a consistent, linearizable manner. Transactions modify cluster + state, which is maintained on all server nodes to ensure availability in the case + of node failure. Server nodes also participate in a WAN gossip pool with server + nodes in other datacenters. Servers act as gateways to other datacenters and forward + traffic as appropriate. + +- `-server-port` ((#\_server_port)) - the server RPC port to listen on. + This overrides the default server RPC port 8300. This is available in Consul 1.2.2 + and later. + +- `-non-voting-server` ((#\_non_voting_server)) - **This field + is deprecated in Consul 1.9.1. See the [`-read-replica`](#_read_replica) flag instead.** + +- `-read-replica` ((#\_read_replica)) - This + flag is used to make the server not participate in the Raft quorum, and have it + only receive the data replication stream. This can be used to add read scalability + to a cluster in cases where a high volume of reads to servers are needed. + +## UI Options + +- `-ui` ((#\_ui)) - Enables the built-in web UI server and the required + HTTP routes. This eliminates the need to maintain the Consul web UI files separately + from the binary. + +- `-ui-dir` ((#\_ui_dir)) - This flag provides the directory containing + the Web UI resources for Consul. This will automatically enable the Web UI. The + directory must be readable to the agent. Starting with Consul version 0.7.0 and + later, the Web UI assets are included in the binary so this flag is no longer necessary; + specifying only the `-ui` flag is enough to enable the Web UI. Specifying both + the '-ui' and '-ui-dir' flags will result in an error. + + +- `-ui-content-path` ((#\_ui\_content\_path)) - This flag provides the option + to change the path the Consul UI loads from and will be displayed in the browser. + By default, the path is `/ui/`, for example `http://localhost:8500/ui/`. Only alphanumerics, + `-`, and `_` are allowed in a custom path.`/v1/` is not allowed as it would overwrite + the API endpoint. \ No newline at end of file diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx index 19cc786e9..064165879 100644 --- a/website/content/docs/agent/config/index.mdx +++ b/website/content/docs/agent/config/index.mdx @@ -43,554 +43,6 @@ You can test the following configuration options by following the [Getting Started](https://learn.hashicorp.com/tutorials/consul/get-started-install?utm_source=consul.io&utm_medium=docs) tutorials to install a local agent. -## Environment Variables - -Environment variables **cannot** be used to configure the Consul client. They -_can_ be used when running other `consul` CLI commands that connect with a -running agent, e.g. `CONSUL_HTTP_ADDR=192.168.0.1:8500 consul members`. - -See [Consul Commands](/commands#environment-variables) for more -information. - -## Command-line Options ((#commandline_options)) - --> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](#config_key_reference) for equivalent HCL Keys. - -The agent configuration options below are all specified on the command-line. - -- `-advertise` ((#\_advertise)) - The advertise address is used to change - the address that we advertise to other nodes in the cluster. By default, the [`-bind`](#_bind) - address is advertised. However, in some cases, there may be a routable address - that cannot be bound. This flag enables gossiping a different address to support - this. If this address is not routable, the node will be in a constant flapping - state as other nodes will treat the non-routability as a failure. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - - - - ```shell-session - $ consul agent -advertise '{{ GetInterfaceIP "eth0" }}' - ``` - - - -- `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used - to change the address that we advertise to server nodes joining through the WAN. - This can also be set on client agents when used in combination with the [`translate_wan_addrs`](#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address - is advertised. However, in some cases all members of all datacenters cannot be - on the same physical or virtual network, especially on hybrid setups mixing cloud - and private datacenters. This flag enables server nodes gossiping through the public - network for the WAN while using private VLANs for gossiping to each other and their - client agents, and it allows client agents to be reached at this address when being - accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - -- `-bootstrap` ((#\_bootstrap)) - This flag is used to control if a server - is in "bootstrap" mode. It is important that no more than one server **per** datacenter - be running in this mode. Technically, a server in bootstrap mode is allowed to - self-elect as the Raft leader. It is important that only a single node is in this - mode; otherwise, consistency cannot be guaranteed as multiple nodes are able to - self-elect. It is not recommended to use this flag after a cluster has been bootstrapped. - -- `-bootstrap-expect` ((#\_bootstrap_expect)) - This flag provides the number - of expected servers in the datacenter. Either this value should not be provided - or the value must agree with other servers in the cluster. When provided, Consul - waits until the specified number of servers are available and then bootstraps the - cluster. This allows an initial leader to be elected automatically. This cannot - be used in conjunction with the legacy [`-bootstrap`](#_bootstrap) flag. This flag - requires [`-server`](#_server) mode. - -- `-bind` ((#\_bind)) - The address that should be bound to for internal - cluster communications. This is an IP address that should be reachable by all other - nodes in the cluster. By default, this is "0.0.0.0", meaning Consul will bind to - all addresses on the local machine and will [advertise](/docs/agent/options#_advertise) - the private IPv4 address to the rest of the cluster. If there are multiple private - IPv4 addresses available, Consul will exit with an error at startup. If you specify - `"[::]"`, Consul will [advertise](/docs/agent/options#_advertise) the public - IPv6 address. If there are multiple public IPv6 addresses available, Consul will - exit with an error at startup. Consul uses both TCP and UDP and the same port for - both. If you have any firewalls, be sure to allow both protocols. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that must resolve at runtime to a single address. Some example templates: - - - - ```shell-session - $ consul agent -bind '{{ GetPrivateInterfaces | include "network" "10.0.0.0/8" | attr "address" }}' - ``` - - - - - - ```shell-session - $ consul agent -bind '{{ GetInterfaceIP "eth0" }}' - ``` - - - - - - ```shell-session - $ consul agent -bind '{{ GetAllInterfaces | include "name" "^eth" | include "flags" "forwardable|up" | attr "address" }}' - ``` - - - -- `-serf-wan-bind` ((#\_serf_wan_bind)) - The address that should be bound - to for Serf WAN gossip communications. By default, the value follows the same rules - as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind` - option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - -- `-serf-lan-bind` ((#\_serf_lan_bind)) - The address that should be bound - to for Serf LAN gossip communications. This is an IP address that should be reachable - by all other LAN nodes in the cluster. By default, the value follows the same rules - as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind` - option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - -- `-check_output_max_size` - Override the default - limit of 4k for maximum size of checks, this is a positive value. By limiting this - size, it allows to put less pressure on Consul servers when many checks are having - a very large output in their checks. In order to completely disable check output - capture, it is possible to use [`discard_check_output`](#discard_check_output). - -- `-client` ((#\_client)) - The address to which Consul will bind client - interfaces, including the HTTP, HTTPS, gRPC and DNS servers. By default, this is "127.0.0.1", - allowing only loopback connections. In Consul 1.0 and later this can be set to - a space-separated list of addresses to bind to, or a [go-sockaddr] - template that can potentially resolve to multiple addresses. - - - - ```shell - $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "type" "ipv6" | join "address" " " }}' - ``` - - - - - - ```shell - $ consul agent -dev -client '{{ GetPrivateInterfaces | join "address" " " }} {{ GetAllInterfaces | include "flags" "loopback" | join "address" " " }}' - ``` - - - - - - ```shell - $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "name" "br.*" | join "address" " " }}' - ``` - - - -- `-config-file` ((#\_config_file)) - A configuration file to load. For - more information on the format of this file, read the [Configuration Files](#configuration_files) - section. This option can be specified multiple times to load multiple configuration - files. If it is specified multiple times, configuration files loaded later will - merge with configuration files loaded earlier. During a config merge, single-value - keys (string, int, bool) will simply have their values replaced while list types - will be appended together. - -- `-config-dir` ((#\_config_dir)) - A directory of configuration files to - load. Consul will load all files in this directory with the suffix ".json" or ".hcl". - The load order is alphabetical, and the the same merge routine is used as with - the [`config-file`](#_config_file) option above. This option can be specified multiple - times to load multiple directories. Sub-directories of the config directory are - not loaded. For more information on the format of the configuration files, see - the [Configuration Files](#configuration_files) section. - -- `-config-format` ((#\_config_format)) - The format of the configuration - files to load. Normally, Consul detects the format of the config files from the - ".json" or ".hcl" extension. Setting this option to either "json" or "hcl" forces - Consul to interpret any file with or without extension to be interpreted in that - format. - -- `-data-dir` ((#\_data_dir)) - This flag provides a data directory for - the agent to store state. This is required for all agents. The directory should - be durable across reboots. This is especially critical for agents that are running - in server mode as they must be able to persist cluster state. Additionally, the - directory must support the use of filesystem locking, meaning some types of mounted - folders (e.g. VirtualBox shared folders) may not be suitable. - - **Note:** both server and non-server agents may store ACL tokens in the state in this directory so read access may grant access to any tokens on servers and to any tokens used during service registration on non-servers. On Unix-based platforms the files are written with 0600 permissions so you should ensure only trusted processes can execute as the same user as Consul. On Windows, you should ensure the directory has suitable permissions configured as these will be inherited. - -- `-datacenter` ((#\_datacenter)) - This flag controls the datacenter in - which the agent is running. If not provided, it defaults to "dc1". Consul has first-class - support for multiple datacenters, but it relies on proper configuration. Nodes - in the same datacenter should be on a single LAN. - -- `-dev` ((#\_dev)) - Enable development server mode. This is useful for - quickly starting a Consul agent with all persistence options turned off, enabling - an in-memory server which can be used for rapid prototyping or developing against - the API. In this mode, [Connect is enabled](/docs/connect/configuration) and - will by default create a new root CA certificate on startup. This mode is **not** - intended for production use as it does not write any data to disk. The gRPC port - is also defaulted to `8502` in this mode. - -- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to - true will prevent Consul from using information from the host to generate a deterministic - node ID, and will instead generate a random node ID which will be persisted in - the data directory. This is useful when running multiple Consul agents on the same - host for testing. This defaults to false in Consul prior to version 0.8.5 and in - 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based - IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/tree/master/v3/host), which - is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in - to host-based IDs then Consul and Nomad will use information on the host to automatically - assign the same ID in both systems. - -- `-disable-keyring-file` ((#\_disable_keyring_file)) - If set, the keyring - will not be persisted to a file. Any installed keys will be lost on shutdown, and - only the given `-encrypt` key will be available on startup. This defaults to false. - -- `-dns-port` ((#\_dns_port)) - the DNS port to listen on. This overrides - the default port 8600. This is available in Consul 0.7 and later. - -- `-domain` ((#\_domain)) - By default, Consul responds to DNS queries in - the "consul." domain. This flag can be used to change that domain. All queries - in this domain are assumed to be handled by Consul and will not be recursively - resolved. - -- `-alt-domain` ((#\_alt_domain)) - This flag allows Consul to respond to - DNS queries in an alternate domain, in addition to the primary domain. If unset, - no alternate domain is used. - - In Consul 1.10.4 and later, Consul DNS responses will use the same domain as in the query (`-domain` or `-alt-domain`) where applicable. - PTR query responses will always use `-domain`, since the desired domain cannot be included in the query. - -- `-enable-script-checks` ((#\_enable_script_checks)) This controls whether - [health checks that execute scripts](/docs/discovery/checks) are enabled on this - agent, and defaults to `false` so operators must opt-in to allowing these. This - was added in Consul 0.9.0. - - ~> **Security Warning:** Enabling script checks in some configurations may - introduce a remote execution vulnerability which is known to be targeted by - malware. We strongly recommend `-enable-local-script-checks` instead. See [this - blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) - for more details. - -- `-enable-local-script-checks` ((#\_enable_local_script_checks)) - Like [`enable_script_checks`](#_enable_script_checks), but only enable them when - they are defined in the local configuration files. Script checks defined in HTTP - API registrations will still not be allowed. - -- `-encrypt` ((#\_encrypt)) - Specifies the secret key to use for encryption - of Consul network traffic. This key must be 32-bytes that are Base64-encoded. The - easiest way to create an encryption key is to use [`consul keygen`](/commands/keygen). - All nodes within a cluster must share the same encryption key to communicate. The - provided key is automatically persisted to the data directory and loaded automatically - whenever the agent is restarted. This means that to encrypt Consul's gossip protocol, - this option only needs to be provided once on each agent's initial startup sequence. - If it is provided after Consul has been initialized with an encryption key, then - the provided key is ignored and a warning will be displayed. - -- `-grpc-port` ((#\_grpc_port)) - the gRPC API port to listen on. Default - -1 (gRPC disabled). See [ports](#ports) documentation for more detail. - -- `-hcl` ((#\_hcl)) - A HCL configuration fragment. This HCL configuration - fragment is appended to the configuration and allows to specify the full range - of options of a config file on the command line. This option can be specified multiple - times. This was added in Consul 1.0. - -- `-http-port` ((#\_http_port)) - the HTTP API port to listen on. This overrides - the default port 8500. This option is very useful when deploying Consul to an environment - which communicates the HTTP port through the environment e.g. PaaS like CloudFoundry, - allowing you to set the port directly via a Procfile. - -- `-https-port` ((#\_https_port)) - the HTTPS API port to listen on. Default - -1 (https disabled). See [ports](#ports) documentation for more detail. - -- `-log-file` ((#\_log_file)) - writes all the Consul agent log messages - to a file. This value is used as a prefix for the log file name. The current timestamp - is appended to the file name. If the value ends in a path separator, `consul-` - will be appended to the value. If the file name is missing an extension, `.log` - is appended. For example, setting `log-file` to `/var/log/` would result in a log - file path of `/var/log/consul-{timestamp}.log`. `log-file` can be combined with - [`-log-rotate-bytes`](#_log_rotate_bytes) and [-log-rotate-duration](#_log_rotate_duration) - for a fine-grained log rotation experience. - -- `-log-rotate-bytes` ((#\_log_rotate_bytes)) - to specify the number of - bytes that should be written to a log before it needs to be rotated. Unless specified, - there is no limit to the number of bytes that can be written to a log file. - -- `-log-rotate-duration` ((#\_log_rotate_duration)) - to specify the maximum - duration a log should be written to before it needs to be rotated. Must be a duration - value such as 30s. Defaults to 24h. - -- `-log-rotate-max-files` ((#\_log_rotate_max_files)) - to specify the maximum - number of older log file archives to keep. Defaults to 0 (no files are ever deleted). - Set to -1 to discard old log files when a new one is created. - -- `-default-query-time` ((#\_default_query_time)) - This flag controls the - amount of time a blocking query will wait before Consul will force a response. - This value can be overridden by the `wait` query parameter. Note that Consul applies - some jitter on top of this time. Defaults to 300s. - -- `-max-query-time` ((#\_max_query_time)) - this flag controls the maximum - amount of time a blocking query can wait before Consul will force a response. Consul - applies jitter to the wait time. The jittered time will be capped to this time. - Defaults to 600s. - -- `-join` ((#\_join)) - Address of another agent to join upon starting up. - This can be specified multiple times to specify multiple agents to join. If Consul - is unable to join with any of the specified addresses, agent startup will fail. - By default, the agent won't join any nodes when it starts up. Note that using [`retry_join`](#retry_join) could be more appropriate to help mitigate node startup race conditions when automating - a Consul cluster deployment. - - In Consul 1.1.0 and later this can be dynamically defined with a - [go-sockaddr] - template that is resolved at runtime. - - If using Enterprise network segments, see [additional documentation on - joining a client to a segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). - -- `-retry-join` ((#\_retry_join)) - Similar to [`-join`](#_join) but allows retrying a join until - it is successful. Once it joins successfully to a member in a list of members - it will never attempt to join again. Agents will then solely maintain their - membership via gossip. This is useful for cases where you know the address will - eventually be available. This option can be specified multiple times to - specify multiple agents to join. The value can contain IPv4, IPv6, or DNS - addresses. IPv6 must use the "bracketed" syntax. If multiple values - are given, they are tried and retried in the order listed until the first - succeeds. - - In Consul 1.1.0 and later this can be dynamically defined with a - [go-sockaddr] - template that is resolved at runtime. - - If Consul is running on the non-default Serf LAN port, the port must - be specified in the join address, or configured as the agent's default Serf port - using the [`ports.serf_lan`](#serf_lan_port) configuration option or - [`-serf-lan-port`](#_serf_lan_port) command line flag. - - If using network segments (Enterprise), see [additional documentation on - joining a client to a segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). - - Here are some examples of using `-retry-join`: - - - - ```shell-session - $ consul agent -retry-join "consul.domain.internal" - ``` - - - - - - ```shell-session - $ consul agent -retry-join "10.0.4.67" - ``` - - - - - - ```shell-session - $ consul agent -retry-join "192.0.2.10:8304" - ``` - - - - - - ```shell-session - $ consul agent -retry-join "[::1]:8301" - ``` - - - - - - ```shell-session - $ consul agent -retry-join "consul.domain.internal" -retry-join "10.0.4.67" - ``` - - - - ### Cloud Auto-Joining - - As of Consul 0.9.1, `retry-join` accepts a unified interface using the - [go-discover](https://github.com/hashicorp/go-discover) library for doing - automatic cluster joining using cloud metadata. For more information, see - the [Cloud Auto-join page](/docs/install/cloud-auto-join). - - - - ```shell-session - $ consul agent -retry-join "provider=aws tag_key=..." - ``` - - - -- `-retry-interval` ((#\_retry_interval)) - Time to wait between join attempts. - Defaults to 30s. - -- `-retry-max` ((#\_retry_max)) - The maximum number of [`-join`](#_join) - attempts to be made before exiting with return code 1. By default, this is set - to 0 which is interpreted as infinite retries. - -- `-join-wan` ((#\_join_wan)) - Address of another wan agent to join upon - starting up. This can be specified multiple times to specify multiple WAN agents - to join. If Consul is unable to join with any of the specified addresses, agent - startup will fail. By default, the agent won't [`-join-wan`](#_join_wan) any nodes - when it starts up. - - In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - -- `-retry-join-wan` ((#\_retry_join_wan)) - Similar to [`retry-join`](#_retry_join) - but allows retrying a wan join if the first attempt fails. This is useful for cases - where we know the address will become available eventually. As of Consul 0.9.3 - [Cloud Auto-Joining](#cloud-auto-joining) is supported as well. - - In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] - template that is resolved at runtime. - -- `-retry-interval-wan` ((#\_retry_interval_wan)) - Time to wait between - [`-join-wan`](#_join_wan) attempts. Defaults to 30s. - -- `-retry-max-wan` ((#\_retry_max_wan)) - The maximum number of [`-join-wan`](#_join_wan) - attempts to be made before exiting with return code 1. By default, this is set - to 0 which is interpreted as infinite retries. - -- `-log-level` ((#\_log_level)) - The level of logging to show after the - Consul agent has started. This defaults to "info". The available log levels are - "trace", "debug", "info", "warn", and "err". You can always connect to an agent - via [`consul monitor`](/commands/monitor) and use any log level. Also, - the log level can be changed during a config reload. -- `-auto-reload-config` ((#\_auto_reload_config)) - This flag set Consul to automatically reload - [Reloadable Configuration](#reloadable-configuration) when configuration files change. - Consul will also watch certificate and key files set in `cert_file` and `key_file` and reload the configuration - if updated. -- `-log-json` ((#\_log_json)) - This flag enables the agent to output logs - in a JSON format. By default this is false. - -- `-node` ((#\_node)) - The name of this node in the cluster. This must - be unique within the cluster. By default this is the hostname of the machine. - -- `-node-id` ((#\_node_id)) - Available in Consul 0.7.3 and later, this - is a unique identifier for this node across all time, even if the name of the node - or address changes. This must be in the form of a hex string, 36 characters long, - such as `adf4238a-882b-9ddc-4a9d-5b6758e4159e`. If this isn't supplied, which is - the most common case, then the agent will generate an identifier at startup and - persist it in the [data directory](#_data_dir) so that it will remain the same - across agent restarts. Information from the host will be used to generate a deterministic - node ID if possible, unless [`-disable-host-node-id`](#_disable_host_node_id) is - set to true. - -- `-node-meta` ((#\_node_meta)) - Available in Consul 0.7.3 and later, this - specifies an arbitrary metadata key/value pair to associate with the node, of the - form `key:value`. This can be specified multiple times. Node metadata pairs have - the following restrictions: - - - A maximum of 64 key/value pairs can be registered per node. - - Metadata keys must be between 1 and 128 characters (inclusive) in length - - Metadata keys must contain only alphanumeric, `-`, and `_` characters. - - Metadata keys must not begin with the `consul-` prefix; that is reserved for internal use by Consul. - - Metadata values must be between 0 and 512 (inclusive) characters in length. - - Metadata values for keys beginning with `rfc1035-` are encoded verbatim in DNS TXT requests, otherwise - the metadata kv-pair is encoded according [RFC1464](https://www.ietf.org/rfc/rfc1464.txt). - -- `-pid-file` ((#\_pid_file)) - This flag provides the file path for the - agent to store its PID. This is useful for sending signals (for example, `SIGINT` - to close the agent or `SIGHUP` to update check definitions) to the agent. - -- `-protocol` ((#\_protocol)) - The Consul protocol version to use. Consul - agents speak protocol 2 by default, however agents will automatically use protocol > 2 when speaking to compatible agents. This should be set only when [upgrading](/docs/upgrading). You can view the protocol versions supported by Consul by running `consul -v`. - -- `-primary-gateway` ((#\_primary_gateway)) - Similar to [`retry-join-wan`](#_retry_join_wan) - but allows retrying discovery of fallback addresses for the mesh gateways in the - primary datacenter if the first attempt fails. This is useful for cases where we - know the address will become available eventually. [Cloud Auto-Joining](#cloud-auto-joining) - is supported as well as [go-sockaddr] - templates. This was added in Consul 1.8.0. - -- `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version - of the Raft consensus protocol used for server communications. This must be set - to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrading/upgrade-specific#raft-protocol-version-compatibility) for more details. - -- `-recursor` ((#\_recursor)) - Specifies the address of an upstream DNS - server. This option may be provided multiple times, and is functionally equivalent - to the [`recursors` configuration option](#recursors). - -- `-rejoin` ((#\_rejoin)) - When provided, Consul will ignore a previous - leave and attempt to rejoin the cluster when starting. By default, Consul treats - leave as a permanent intent and does not attempt to join the cluster again when - starting. This flag allows the previous state to be used to rejoin the cluster. - -- `-segment` ((#\_segment)) - This flag is used to set - the name of the network segment the agent belongs to. An agent can only join and - communicate with other agents within its network segment. Ensure the [join - operation uses the correct port for this segment](/docs/enterprise/network-segments#join_a_client_to_a_segment). - Review the [Network Segments documentation](/docs/enterprise/network-segments) - for more details. By default, this is an empty string, which is the `` - network segment. - - ~> **Warning:** The `segment` flag cannot be used with the [`partition`](#partition-1) option. - -- `-serf-lan-allowed-cidrs` ((#\_serf_lan_allowed_cidrs)) - The Serf LAN allowed CIDRs allow to accept incoming - connections for Serf only from several networks (multiple values are supported). - Those networks are specified with CIDR notation (eg: 192.168.1.0/24). - This is available in Consul 1.8 and later. - -- `-serf-lan-port` ((#\_serf_lan_port)) - the Serf LAN port to listen on. - This overrides the default Serf LAN port 8301. This is available in Consul 1.2.2 - and later. - -- `-serf-wan-allowed-cidrs` ((#\_serf_wan_allowed_cidrs)) - The Serf WAN allowed CIDRs allow to accept incoming - connections for Serf only from several networks (multiple values are supported). - Those networks are specified with CIDR notation (eg: 192.168.1.0/24). - This is available in Consul 1.8 and later. - -- `-serf-wan-port` ((#\_serf_wan_port)) - the Serf WAN port to listen on. - This overrides the default Serf WAN port 8302. This is available in Consul 1.2.2 - and later. - -- `-server` ((#\_server)) - This flag is used to control if an agent is - in server or client mode. When provided, an agent will act as a Consul server. - Each Consul cluster must have at least one server and ideally no more than 5 per - datacenter. All servers participate in the Raft consensus algorithm to ensure that - transactions occur in a consistent, linearizable manner. Transactions modify cluster - state, which is maintained on all server nodes to ensure availability in the case - of node failure. Server nodes also participate in a WAN gossip pool with server - nodes in other datacenters. Servers act as gateways to other datacenters and forward - traffic as appropriate. - -- `-server-port` ((#\_server_port)) - the server RPC port to listen on. - This overrides the default server RPC port 8300. This is available in Consul 1.2.2 - and later. - -- `-non-voting-server` ((#\_non_voting_server)) - **This field - is deprecated in Consul 1.9.1. See the [`-read-replica`](#_read_replica) flag instead.** - -- `-read-replica` ((#\_read_replica)) - This - flag is used to make the server not participate in the Raft quorum, and have it - only receive the data replication stream. This can be used to add read scalability - to a cluster in cases where a high volume of reads to servers are needed. - -- `-syslog` ((#\_syslog)) - This flag enables logging to syslog. This is - only supported on Linux and OSX. It will result in an error if provided on Windows. - -- `-ui` ((#\_ui)) - Enables the built-in web UI server and the required - HTTP routes. This eliminates the need to maintain the Consul web UI files separately - from the binary. - -- `-ui-dir` ((#\_ui_dir)) - This flag provides the directory containing - the Web UI resources for Consul. This will automatically enable the Web UI. The - directory must be readable to the agent. Starting with Consul version 0.7.0 and - later, the Web UI assets are included in the binary so this flag is no longer necessary; - specifying only the `-ui` flag is enough to enable the Web UI. Specifying both - the '-ui' and '-ui-dir' flags will result in an error. - - -- `-ui-content-path` ((#\_ui\_content\_path)) - This flag provides the option - to change the path the Consul UI loads from and will be displayed in the browser. - By default, the path is `/ui/`, for example `http://localhost:8500/ui/`. Only alphanumerics, - `-`, and `_` are allowed in a custom path.`/v1/` is not allowed as it would overwrite - the API endpoint. - ## Configuration Files ((#configuration_files)) In addition to the command-line options, configuration for the Consul agent can be put into @@ -1416,7 +868,7 @@ There are also a number of common configuration options supported by all provide certificate will be requested by a proxy before this limit is reached. This is also the effective limit on how long a server outage can last (with no leader) before network connections will start being rejected. Defaults to `72h`. - + You can specify a range from one hour (minimum) up to one year (maximum) using the following units: `h`, `m`, `s`, `ms`, `us` (or `µs`), `ns`, or a combination of those units, e.g. `1h5m`. From 7a1d4f0ec5b24510d95692dbc7d54187a22275ab Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Mon, 11 Apr 2022 16:06:20 -0700 Subject: [PATCH 119/785] docs: move configuration files content from agent/config/index to agent/config/agent-config-files --- .../docs/agent/config/agent-config-files.mdx | 1884 +++++++++++++++ website/content/docs/agent/config/index.mdx | 2152 ----------------- 2 files changed, 1884 insertions(+), 2152 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index e69de29bb..a454adb0e 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -0,0 +1,1884 @@ +--- +layout: docs +page_title: Consul Agent Configuration Reference +description: >- + This topic describes the supported parameters for configuring Consul agents in HCL and JSON configuration files. +--- + +# Configuration Files ((#configuration_files)) + +In addition to the command-line options, configuration can be put into +files. This may be easier in certain situations, for example when Consul is +being configured using a configuration management system. + +The configuration files are JSON formatted, making them easily readable +and editable by both humans and computers. The configuration is formatted +as a single JSON object with configuration within it. + +Configuration files are used for more than just setting up the agent, +they are also used to provide check and service definitions. These are used +to announce the availability of system servers to the rest of the cluster. +They are documented separately under [check configuration](/docs/agent/checks) and +[service configuration](/docs/agent/services) respectively. The service and check +definitions support being updated during a reload. + +#### Example Configuration File + +```json +{ + "datacenter": "east-aws", + "data_dir": "/opt/consul", + "log_level": "INFO", + "node_name": "foobar", + "server": true, + "watches": [ + { + "type": "checks", + "handler": "/usr/bin/health-check-handler.sh" + } + ], + "telemetry": { + "statsite_address": "127.0.0.1:2180" + } +} +``` + +#### Configuration Key Reference + +-> **Note:** All the TTL values described below are parsed by Go's `time` package, and have the following +[formatting specification](https://golang.org/pkg/time/#ParseDuration): "A +duration string is a possibly signed sequence of decimal numbers, each with +optional fraction and a unit suffix, such as '300ms', '-1.5h' or '2h45m'. +Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." + +- `acl` ((#acl)) - This object allows a number of sub-keys to be set which + controls the ACL system. Configuring the ACL system within the ACL stanza was added + in Consul 1.4.0 + + The following sub-keys are available: + + - `enabled` ((#acl_enabled)) - Enables ACLs. + + - `policy_ttl` ((#acl_policy_ttl)) - Used to control Time-To-Live caching + of ACL policies. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL policy may be stale up to the TTL value. + + - `role_ttl` ((#acl_role_ttl)) - Used to control Time-To-Live caching + of ACL roles. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL role may be stale up to the TTL value. + + - `token_ttl` ((#acl_token_ttl)) - Used to control Time-To-Live caching + of ACL tokens. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL token may be stale up to the TTL value. + + - `down_policy` ((#acl_down_policy)) - Either "allow", "deny", "extend-cache" + or "async-cache"; "extend-cache" is the default. In the case that a policy or + token cannot be read from the [`primary_datacenter`](#primary_datacenter) or + leader node, the down policy is applied. In "allow" mode, all actions are permitted, + "deny" restricts all operations, and "extend-cache" allows any cached objects + to be used, ignoring the expiry time of the cached entry. If the request uses an + ACL that is not in the cache, "extend-cache" falls back to the behaviour of + `default_policy`. + The value "async-cache" acts the same way as "extend-cache" + but performs updates asynchronously when ACL is present but its TTL is expired, + thus, if latency is bad between the primary and secondary datacenters, latency + of operations is not impacted. + + - `default_policy` ((#acl_default_policy)) - Either "allow" or "deny"; + defaults to "allow" but this will be changed in a future major release. The default + policy controls the behavior of a token when there is no matching rule. In "allow" + mode, ACLs are a denylist: any operation not specifically prohibited is allowed. + In "deny" mode, ACLs are an allowlist: any operation not specifically + allowed is blocked. **Note**: this will not take effect until you've enabled ACLs. + + - `enable_key_list_policy` ((#acl_enable_key_list_policy)) - Boolean value, defaults to false. + When true, the `list` permission will be required on the prefix being recursively read from the KV store. + Regardless of being enabled, the full set of KV entries under the prefix will be filtered + to remove any entries that the request's ACL token does not grant at least read + permissions. This option is only available in Consul 1.0 and newer. + + - `enable_token_replication` ((#acl_enable_token_replication)) - By default + secondary Consul datacenters will perform replication of only ACL policies and + roles. Setting this configuration will will enable ACL token replication and + allow for the creation of both [local tokens](/api/acl/tokens#local) and + [auth methods](/docs/acl/auth-methods) in connected secondary datacenters. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + global tokens already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + + - `enable_token_persistence` ((#acl_enable_token_persistence)) - Either + `true` or `false`. When `true` tokens set using the API will be persisted to + disk and reloaded when an agent restarts. + + - `tokens` ((#acl_tokens)) - This object holds all of the configured + ACL tokens for the agents usage. + + - `initial_management` ((#acl_tokens_initial_management)) - This is available in + Consul 1.11 and later. In prior versions, use [`acl.tokens.master`](#acl_tokens_master). + + Only used for servers in the [`primary_datacenter`](#primary_datacenter). + This token will be created with management-level permissions if it does not exist. + It allows operators to bootstrap the ACL system with a token Secret ID that is + well-known. + + The `initial_management` token is only installed when a server acquires cluster + leadership. If you would like to install or change it, set the new value for + `initial_management` in the configuration for all servers. Once this is done, + restart the current leader to force a leader election. If the `initial_management` + token is not supplied, then the servers do not create an initial management token. + When you provide a value, it should be a UUID. To maintain backwards compatibility + and an upgrade path this restriction is not currently enforced but will be in a + future major Consul release. + + - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to + [`acl.tokens.initial_management`](#acl_tokens_initial_management).** + + - `default` ((#acl_tokens_default)) - When provided, the agent will + use this token when making requests to the Consul servers. Clients can override + this token on a per-request basis by providing the "?token" query parameter. + When not provided, the empty token, which maps to the 'anonymous' ACL token, + is used. + + - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform + internal operations. If this isn't specified, then the + [`default`](#acl_tokens_default) will be used. + + This token must at least have write access to the node name it will + register as in order to set any of the node-level information in the + catalog such as metadata, or the node's tagged addresses. + + - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11 + and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master). + + Used to access [agent endpoints](/api/agent) that require agent read or write privileges, + or node read privileges, even if Consul servers aren't present to validate any tokens. + This should only be used by operators during outages, regular ACL tokens should normally + be used by applications. + + - `agent_master` ((#acl_tokens_agent_master)) **Renamed in Consul 1.11 to + [`acl.tokens.agent_recovery`](#acl_tokens_agent_recovery).** + + - `replication` ((#acl_tokens_replication)) - The ACL token used to + authorize secondary datacenters with the primary datacenter for replication + operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + policies and roles already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + + - `managed_service_provider` ((#acl_tokens_managed_service_provider)) - An + array of ACL tokens used by Consul managed service providers for cluster operations. + + ```json + "managed_service_provider": [ + { + "accessor_id": "ed22003b-0832-4e48-ac65-31de64e5c2ff", + "secret_id": "cb6be010-bba8-4f30-a9ed-d347128dde17" + } + ] + ``` + +- `acl_datacenter` - **This field is deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.** + + This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients, + it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement + of ACLs. Please review the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more details. + +- `acl_default_policy` ((#acl_default_policy_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.default_policy`](#acl_default_policy) field instead.** + Either "allow" or "deny"; defaults to "allow". The default policy controls the + behavior of a token when there is no matching rule. In "allow" mode, ACLs are a + denylist: any operation not specifically prohibited is allowed. In "deny" mode, + ACLs are an allowlist: any operation not specifically allowed is blocked. **Note**: + this will not take effect until you've set `primary_datacenter` to enable ACL support. + +- `acl_down_policy` ((#acl_down_policy_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.down_policy`](#acl_down_policy) field instead.** Either "allow", + "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the + case that the policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter) + or leader node, the down policy is applied. In "allow" mode, all actions are permitted, + "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be + used, ignoring their TTL values. If a non-cached ACL is used, "extend-cache" acts + like "deny". The value "async-cache" acts the same way as "extend-cache" but performs + updates asynchronously when ACL is present but its TTL is expired, thus, if latency + is bad between ACL authoritative and other datacenters, latency of operations is + not impacted. + +- `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated + in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master) + field instead.** Used to access [agent endpoints](/api/agent) that + require agent read or write privileges, or node read privileges, even if Consul + servers aren't present to validate any tokens. This should only be used by operators + during outages, regular ACL tokens should normally be used by applications. This + was added in Consul 0.7.2 and is only used when [`acl_enforce_version_8`](#acl_enforce_version_8) is set to true. + +- `acl_agent_token` ((#acl_agent_token_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.tokens.agent`](#acl_tokens_agent) field instead.** Used for + clients and servers to perform internal operations. If this isn't specified, then + the [`acl_token`](#acl_token) will be used. This was added in Consul 0.7.2. + + This token must at least have write access to the node name it will register as in order to set any + of the node-level information in the catalog such as metadata, or the node's tagged addresses. + +- `acl_enforce_version_8` - **Deprecated in + Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should + occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2, + this defaults to false in versions of Consul prior to 0.8, and defaults to true + in Consul 0.8 and later. This helps ease the transition to the new ACL features + by allowing policies to be in place before enforcement begins. + +- `acl_master_token` ((#acl_master_token_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.tokens.master`](#acl_tokens_master) field instead.** + +- `acl_replication_token` ((#acl_replication_token_legacy)) - **Deprecated + in Consul 1.4.0. See the [`acl.tokens.replication`](#acl_tokens_replication) field + instead.** Only used for servers outside the [`primary_datacenter`](#primary_datacenter) + running Consul 0.7 or later. When provided, this will enable [ACL replication](https://learn.hashicorp.com/tutorials/consul/access-control-replication-multiple-datacenters) + using this ACL replication using this token to retrieve and replicate the ACLs + to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable + ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then + set the token later using the [agent token API](/api/agent#update-acl-tokens) + on each server. If the `acl_replication_token` is set in the config, it will automatically + set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. + + If there's a partition or other outage affecting the authoritative datacenter, and the + [`acl_down_policy`](/docs/agent/options#acl_down_policy) is set to "extend-cache", tokens not + in the cache can be resolved during the outage using the replicated set of ACLs. + +- `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See + the [`acl.tokens.default`](#acl_tokens_default) field instead.** When provided, + the agent will use this token when making requests to the Consul servers. Clients + can override this token on a per-request basis by providing the "?token" query + parameter. When not provided, the empty token, which maps to the 'anonymous' ACL + policy, is used. + +- `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the + [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live + caching of ACLs. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL policy may be stale up to the TTL value. + +- `addresses` - This is a nested object that allows setting + bind addresses. In Consul 1.0 and later these can be set to a space-separated list + of addresses to bind to, or a [go-sockaddr] template that can potentially resolve to multiple addresses. + + `http`, `https` and `grpc` all support binding to a Unix domain socket. A + socket can be specified in the form `unix:///path/to/socket`. A new domain + socket will be created at the given path. If the specified file path already + exists, Consul will attempt to clear the file and create the domain socket + in its place. The permissions of the socket file are tunable via the + [`unix_sockets` config construct](#unix_sockets). + + When running Consul agent commands against Unix socket interfaces, use the + `-http-addr` argument to specify the path to the socket. You can also place + the desired values in the `CONSUL_HTTP_ADDR` environment variable. + + For TCP addresses, the environment variable value should be an IP address + _with the port_. For example: `10.0.0.1:8500` and not `10.0.0.1`. However, + ports are set separately in the [`ports`](#ports) structure when + defining them in a configuration file. + + The following keys are valid: + + - `dns` - The DNS server. Defaults to `client_addr` + - `http` - The HTTP API. Defaults to `client_addr` + - `https` - The HTTPS API. Defaults to `client_addr` + - `grpc` - The gRPC API. Defaults to `client_addr` + +- `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). + +- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). + +- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_reconnect_timeout` This is a per-agent setting of the [`reconnect_timeout`](#reconnect_timeout) parameter. + This agent will advertise to all other nodes in the cluster that after this timeout, the node may be completely + removed from the cluster. This may only be set on client agents and if unset then other nodes will use the main + `reconnect_timeout` setting when determining when this node may be removed from the cluster. + +- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](#_alt_domain) + +- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). + This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). + +- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). + +- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](#_serf_wan_bind). + +- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](#_serf_wan_allowed_cidrs). + +- `audit` - Added in Consul 1.8, the audit object allow users to enable auditing + and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](https://learn.hashicorp.com/tutorials/consul/audit-logging). + + ```hcl + audit { + enabled = true + sink "My sink" { + type = "file" + format = "json" + path = "data/audit/audit.json" + delivery_guarantee = "best-effort" + rotate_duration = "24h" + rotate_max_files = 15 + rotate_bytes = 25165824 + } + } + ``` + + The following sub-keys are available: + + - `enabled` - Controls whether Consul logs out each time a user + performs an operation. ACLs must be enabled to use this feature. Defaults to `false`. + + - `sink` - This object provides configuration for the destination to which + Consul will log auditing events. Sink is an object containing keys to sink objects, where the key is the name of the sink. + + - `type` - Type specifies what kind of sink this is. + The following keys are valid: + - `file` - Currently only file sinks are available, they take the following keys. + - `format` - Format specifies what format the events will + be emitted with. + The following keys are valid: + - `json` - Currently only json events are offered. + - `path` - The directory and filename to write audit events to. + - `delivery_guarantee` - Specifies + the rules governing how audit events are written. + The following keys are valid: + - `best-effort` - Consul only supports `best-effort` event delivery. + - `mode` - The permissions to set on the audit log files. + - `rotate_duration` - Specifies the + interval by which the system rotates to a new log file. At least one of `rotate_duration` or `rotate_bytes` + must be configured to enable audit logging. + - `rotate_max_files` - Defines the + limit that Consul should follow before it deletes old log files. + - `rotate_bytes` - Specifies how large an + individual log file can grow before Consul rotates to a new file. At least one of `rotate_bytes` or + `rotate_duration` must be configured to enable audit logging. + +- `autopilot` Added in Consul 0.8, this object allows a + number of sub-keys to be set which can configure operator-friendly settings for + Consul servers. When these keys are provided as configuration, they will only be + respected on bootstrapping. If they are not provided, the defaults will be used. + In order to change the value of these options after bootstrapping, you will need + to use the [Consul Operator Autopilot](/commands/operator/autopilot) + command. For more information about Autopilot, review the [Autopilot tutorial](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations). + + The following sub-keys are available: + + - `cleanup_dead_servers` - This controls the + automatic removal of dead server nodes periodically and whenever a new server + is added to the cluster. Defaults to `true`. + + - `last_contact_threshold` - Controls the + maximum amount of time a server can go without contact from the leader before + being considered unhealthy. Must be a duration value such as `10s`. Defaults + to `200ms`. + + - `max_trailing_logs` - Controls the maximum number + of log entries that a server can trail the leader by before being considered + unhealthy. Defaults to 250. + + - `min_quorum` - Sets the minimum number of servers necessary + in a cluster before autopilot can prune dead servers. There is no default. + + - `server_stabilization_time` - Controls + the minimum amount of time a server must be stable in the 'healthy' state before + being added to the cluster. Only takes effect if all servers are running Raft + protocol version 3 or higher. Must be a duration value such as `30s`. Defaults + to `10s`. + + - `redundancy_zone_tag` - + This controls the [`-node-meta`](#_node_meta) key to use when Autopilot is separating + servers into zones for redundancy. Only one server in each zone can be a voting + member at one time. If left blank (the default), this feature will be disabled. + + - `disable_upgrade_migration` - + If set to `true`, this setting will disable Autopilot's upgrade migration strategy + in Consul Enterprise of waiting until enough newer-versioned servers have been + added to the cluster before promoting any of them to voters. Defaults to `false`. + + - `upgrade_version_tag` - + The node_meta tag to use for version info when performing upgrade migrations. + If this is not set, the Consul version will be used. + +- `auto_config` This object allows setting options for the `auto_config` feature. + + The following sub-keys are available: + + - `enabled` (Defaults to `false`) This option enables `auto_config` on a client + agent. When starting up but before joining the cluster, the client agent will + make an RPC to the configured server addresses to request configuration settings, + such as its `agent` ACL token, TLS certificates, Gossip encryption key as well + as other configuration settings. These configurations get merged in as defaults + with any user-supplied configuration on the client agent able to override them. + The initial RPC uses a JWT specified with either `intro_token`, + `intro_token_file` or the `CONSUL_INTRO_TOKEN` environment variable to authorize + the request. How the JWT token is verified is controlled by the `auto_config.authorizer` + object available for use on Consul servers. Enabling this option also turns + on Connect because it is vital for `auto_config`, more specifically the CA + and certificates infrastructure. + + ~> **Warning:** Enabling `auto_config` conflicts with the [`auto_encrypt.tls`](#tls) feature. + Only one option may be specified. + + - `intro_token` (Defaults to `""`) This specifies the JWT to use for the initial + `auto_config` RPC to the Consul servers. This can be overridden with the + `CONSUL_INTRO_TOKEN` environment variable + + - `intro_token_file` (Defaults to `""`) This specifies a file containing the JWT + to use for the initial `auto_config` RPC to the Consul servers. This token + from this file is only loaded if the `intro_token` configuration is unset as + well as the `CONSUL_INTRO_TOKEN` environment variable + + - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in + the local datacenter to use for the initial RPC. These addresses support + [Cloud Auto-Joining](#cloud-auto-joining) and can optionally include a port to + use when making the outbound connection. If not port is provided the `server_port` + will be used. + + - `dns_sans` (Defaults to `[]`) This is a list of extra DNS SANs to request in the + client agent's TLS certificate. The `localhost` DNS SAN is always requested. + + - `ip_sans` (Defaults to `[]`) This is a list of extra IP SANs to request in the + client agent's TLS certificate. The `::1` and `127.0.0.1` IP SANs are always requested. + + - `authorization` This object controls how a Consul server will authorize `auto_config` + requests and in particular how to verify the JWT intro token. + + - `enabled` (Defaults to `false`) This option enables `auto_config` authorization + capabilities on the server. + + - `static` This object controls configuring the static authorizer setup in the Consul + configuration file. Almost all sub-keys are identical to those provided by the [JWT + Auth Method](/docs/acl/auth-methods/jwt). + + - `jwt_validation_pub_keys` (Defaults to `[]`) A list of PEM-encoded public keys + to use to authenticate signatures locally. + + Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. + + - `oidc_discovery_url` (Defaults to `""`) The OIDC Discovery URL, without any + .well-known component (base path). + + Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. + + - `oidc_discovery_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS + client used to talk with the OIDC Discovery URL. NOTE: Every line must end + with a newline (`\n`). If not set, system certificates are used. + + - `jwks_url` (Defaults to `""`) The JWKS URL to use to authenticate signatures. + + Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. + + - `jwks_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS client + used to talk with the JWKS URL. NOTE: Every line must end with a newline + (`\n`). If not set, system certificates are used. + + - `claim_mappings` (Defaults to `(map[string]string)` Mappings of claims (key) that + will be copied to a metadata field (value). Use this if the claim you are capturing + is singular (such as an attribute). + + When mapped, the values can be any of a number, string, or boolean and will + all be stringified when returned. + + - `list_claim_mappings` (Defaults to `(map[string]string)`) Mappings of claims (key) + will be copied to a metadata field (value). Use this if the claim you are capturing + is list-like (such as groups). + + When mapped, the values in each list can be any of a number, string, or + boolean and will all be stringified when returned. + + - `jwt_supported_algs` (Defaults to `["RS256"]`) JWTSupportedAlgs is a list of + supported signing algorithms. + + - `bound_audiences` (Defaults to `[]`) List of `aud` claims that are valid for + login; any match is sufficient. + + - `bound_issuer` (Defaults to `""`) The value against which to match the `iss` + claim in a JWT. + + - `expiration_leeway` (Defaults to `"0s"`) Duration of leeway when + validating expiration of a token to account for clock skew. Defaults to 150s + (2.5 minutes) if set to 0s and can be disabled if set to -1ns. + + - `not_before_leeway` (Defaults to `"0s"`) Duration of leeway when + validating not before values of a token to account for clock skew. Defaults + to 150s (2.5 minutes) if set to 0s and can be disabled if set to -1. + + - `clock_skew_leeway` (Defaults to `"0s"`) Duration of leeway when + validating all claims to account for clock skew. Defaults to 60s (1 minute) + if set to 0s and can be disabled if set to -1ns. + + - `claim_assertions` (Defaults to []) List of assertions about the mapped + claims required to authorize the incoming RPC request. The syntax uses + github.com/hashicorp/go-bexpr which is shared with the + [API filtering feature](/api/features/filtering). For example, the following + configurations when combined will ensure that the JWT `sub` matches the node + name requested by the client. + + ``` + claim_mappings { + sub = "node_name" + } + claim_assertions = [ + "value.node_name == \"${node}\"" + ] + ``` + + The assertions are lightly templated using [HIL syntax](https://github.com/hashicorp/hil) + to interpolate some values from the RPC request. The list of variables that can be interpolated + are: + + - `node` - The node name the client agent is requesting. + + - `segment` - The network segment name the client is requesting. + + - `partition` - The admin partition name the client is requesting. + +- `auto_encrypt` This object allows setting options for the `auto_encrypt` feature. + + The following sub-keys are available: + + - `allow_tls` (Defaults to `false`) This option enables + `auto_encrypt` on the servers and allows them to automatically distribute certificates + from the Connect CA to the clients. If enabled, the server can accept incoming + connections from both the built-in CA and the Connect CA, as well as their certificates. + Note, the server will only present the built-in CA and certificate, which the + client can verify using the CA it received from `auto_encrypt` endpoint. If disabled, + a client configured with `auto_encrypt.tls` will be unable to start. + + - `tls` (Defaults to `false`) Allows the client to request the + Connect CA and certificates from the servers, for encrypting RPC communication. + The client will make the request to any servers listed in the `-join` or `-retry-join` + option. This requires that every server to have `auto_encrypt.allow_tls` enabled. + When both `auto_encrypt` options are used, it allows clients to receive certificates + that are generated on the servers. If the `-server-port` is not the default one, + it has to be provided to the client as well. Usually this is discovered through + LAN gossip, but `auto_encrypt` provision happens before the information can be + distributed through gossip. The most secure `auto_encrypt` setup is when the + client is provided with the built-in CA, `verify_server_hostname` is turned on, + and when an ACL token with `node.write` permissions is setup. It is also possible + to use `auto_encrypt` with a CA and ACL, but without `verify_server_hostname`, + or only with a ACL enabled, or only with CA and `verify_server_hostname`, or + only with a CA, or finally without a CA and without ACL enabled. In any case, + the communication to the `auto_encrypt` endpoint is always TLS encrypted. + + ~> **Warning:** Enabling `auto_encrypt.tls` conflicts with the [`auto_config`](#auto_config) feature. + Only one option may be specified. + + - `dns_san` (Defaults to `[]`) When this option is being + used, the certificates requested by `auto_encrypt` from the server have these + `dns_san` set as DNS SAN. + + - `ip_san` (Defaults to `[]`) When this option is being used, + the certificates requested by `auto_encrypt` from the server have these `ip_san` + set as IP SAN. + +- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](#_bootstrap). + +- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](#_bootstrap_expect). + +- `bind_addr` Equivalent to the [`-bind` command-line flag](#_bind). + + This parameter can be set to a go-sockaddr template that resolves to a single + address. Special characters such as backslashes `\` or double quotes `"` + within a double quoted string value must be escaped with a backslash `\`. + Some example templates: + + + +```hcl +bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}" +``` + +```json +{ + "bind_addr": "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}" +} +``` + + + +- `cache` configuration for client agents. The configurable values are the following: + + - `entry_fetch_max_burst` The size of the token bucket used to recharge the rate-limit per + cache entry. The default value is 2 and means that when cache has not been updated + for a long time, 2 successive queries can be made as long as the rate-limit is not + reached. + + - `entry_fetch_rate` configures the rate-limit at which the cache may refresh a single + entry. On a cluster with many changes/s, watching changes in the cache might put high + pressure on the servers. This ensures the number of requests for a single cache entry + will never go beyond this limit, even when a given service changes every 1/100s. + Since this is a per cache entry limit, having a highly unstable service will only rate + limit the watched on this service, but not the other services/entries. + The value is strictly positive, expressed in queries per second as a float, + 1 means 1 query per second, 0.1 mean 1 request every 10s maximum. + The default value is "No limit" and should be tuned on large + clusters to avoid performing too many RPCs on entries changing a lot. + +- `check_update_interval` ((#check_update_interval)) + This interval controls how often check output from checks in a steady state is + synchronized with the server. By default, this is set to 5 minutes ("5m"). Many + checks which are in a steady state produce slightly different output per run (timestamps, + etc) which cause constant writes. This configuration allows deferring the sync + of check output for a given interval to reduce write pressure. If a check ever + changes state, the new state and associated output is synchronized immediately. + To disable this behavior, set the value to "0s". + +- `client_addr` Equivalent to the [`-client` command-line flag](#_client). + +- `config_entries` This object allows setting options for centralized config entries. + + The following sub-keys are available: + + - `bootstrap` ((#config_entries_bootstrap)) + This is a list of inlined config entries to insert into the state store when + the Consul server gains leadership. This option is only applicable to server + nodes. Each bootstrap entry will be created only if it does not exist. When reloading, + any new entries that have been added to the configuration will be processed. + See the [configuration entry docs](/docs/agent/config-entries) for more + details about the contents of each entry. + +- `connect` This object allows setting options for the Connect feature. + + The following sub-keys are available: + + - `enabled` ((#connect_enabled)) Controls whether Connect features are + enabled on this agent. Should be enabled on all servers in the cluster + in order for Connect to function properly. Defaults to false. + + - `enable_mesh_gateway_wan_federation` ((#connect_enable_mesh_gateway_wan_federation)) Controls whether cross-datacenter federation traffic between servers is funneled + through mesh gateways. Defaults to false. This was added in Consul 1.8.0. + + - `ca_provider` ((#connect_ca_provider)) Controls which CA provider to + use for Connect's CA. Currently only the `aws-pca`, `consul`, and `vault` providers are supported. + This is only used when initially bootstrapping the cluster. For an existing cluster, + use the [Update CA Configuration Endpoint](/api/connect/ca#update-ca-configuration). + + - `ca_config` ((#connect_ca_config)) An object which allows setting different + config options based on the CA provider chosen. This is only used when initially + bootstrapping the cluster. For an existing cluster, use the [Update CA Configuration + Endpoint](/api/connect/ca#update-ca-configuration). + + The following providers are supported: + + #### AWS ACM Private CA Provider (`ca_provider = "aws-pca"`) + + - `existing_arn` ((#aws_ca_existing_arn)) The Amazon Resource Name (ARN) of + an existing private CA in your ACM account. If specified, Consul will + attempt to use the existing CA to issue certificates. + + #### Consul CA Provider (`ca_provider = "consul"`) + + - `private_key` ((#consul_ca_private_key)) The PEM contents of the + private key to use for the CA. + + - `root_cert` ((#consul_ca_root_cert)) The PEM contents of the root + certificate to use for the CA. + + #### Vault CA Provider (`ca_provider = "vault"`) + + - `address` ((#vault_ca_address)) The address of the Vault server to + connect to. + + - `token` ((#vault_ca_token)) The Vault token to use. In Consul 1.8.5 and later, if + the token has the [renewable](https://www.vaultproject.io/api-docs/auth/token#renewable) + flag set, Consul will attempt to renew its lease periodically after half the + duration has expired. + + - `root_pki_path` ((#vault_ca_root_pki)) The path to use for the root + CA pki backend in Vault. This can be an existing backend with a CA already + configured, or a blank/unmounted backend in which case Connect will automatically + mount/generate the CA. The Vault token given above must have `sudo` access + to this backend, as well as permission to mount the backend at this path if + it is not already mounted. + + - `intermediate_pki_path` ((#vault_ca_intermediate_pki)) + The path to use for the temporary intermediate CA pki backend in Vault. **Connect + will overwrite any data at this path in order to generate a temporary intermediate + CA**. The Vault token given above must have `write` access to this backend, + as well as permission to mount the backend at this path if it is not already + mounted. + + #### Common CA Config Options + + There are also a number of common configuration options supported by all providers: + + - `csr_max_concurrent` ((#ca_csr_max_concurrent)) Sets a limit on the number + of Certificate Signing Requests that can be processed concurrently. Defaults + to 0 (disabled). This is useful when you want to limit the number of CPU cores + available to the server for certificate signing operations. For example, on an + 8 core server, setting this to 1 will ensure that no more than one CPU core + will be consumed when generating or rotating certificates. Setting this is + recommended **instead** of `csr_max_per_second` when you want to limit the + number of cores consumed since it is simpler to reason about limiting CSR + resources this way without artificially slowing down rotations. Added in 1.4.1. + + - `csr_max_per_second` ((#ca_csr_max_per_second)) Sets a rate limit + on the maximum number of Certificate Signing Requests (CSRs) the servers will + accept. This is used to prevent CA rotation from causing unbounded CPU usage + on servers. It defaults to 50 which is conservative – a 2017 Macbook can process + about 100 per second using only ~40% of one CPU core – but sufficient for deployments + up to ~1500 service instances before the time it takes to rotate is impacted. + For larger deployments we recommend increasing this based on the expected number + of server instances and server resources, or use `csr_max_concurrent` instead + if servers have more than one CPU core. Setting this to zero disables rate limiting. + Added in 1.4.1. + + - `leaf_cert_ttl` ((#ca_leaf_cert_ttl)) The upper bound on the lease + duration of a leaf certificate issued for a service. In most cases a new leaf + certificate will be requested by a proxy before this limit is reached. This + is also the effective limit on how long a server outage can last (with no leader) + before network connections will start being rejected. Defaults to `72h`. + This value cannot be lower than 1 hour or higher than 1 year. + + This value is also used when rotating out old root certificates from + the cluster. When a root certificate has been inactive (rotated out) + for more than twice the _current_ `leaf_cert_ttl`, it will be removed + from the trusted list. + + - `root_cert_ttl` ((#ca_root_cert_ttl)) The time to live (TTL) for a root certificate. + Defaults to 10 years as `87600h`. This value, if provided, needs to be higher than the + intermediate certificate TTL. + + This setting applies to all Consul CA providers. + + For the Vault provider, this value is only used if the backend is not initialized at first. + + This value is also applied on the `ca set-config` command. + + - `private_key_type` ((#ca_private_key_type)) The type of key to generate + for this CA. This is only used when the provider is generating a new key. If + `private_key` is set for the Consul provider, or existing root or intermediate + PKI paths given for Vault then this will be ignored. Currently supported options + are `ec` or `rsa`. Default is `ec`. + + It is required that all servers in a datacenter have + the same config for the CA. It is recommended that servers in + different datacenters use the same key type and size, + although the built-in CA and Vault provider will both allow mixed CA + key types. + + Some CA providers (currently Vault) will not allow cross-signing a + new CA certificate with a different key type. This means that if you + migrate from an RSA-keyed Vault CA to an EC-keyed CA from any + provider, you may have to proceed without cross-signing which risks + temporary connection issues for workloads during the new certificate + rollout. We highly recommend testing this outside of production to + understand the impact and suggest sticking to same key type where + possible. + + Note that this only affects _CA_ keys generated by the provider. + Leaf certificate keys are always EC 256 regardless of the CA + configuration. + + - `private_key_bits` ((#ca_private_key_bits)) The length of key to + generate for this CA. This is only used when the provider is generating a new + key. If `private_key` is set for the Consul provider, or existing root or intermediate + PKI paths given for Vault then this will be ignored. + + Currently supported values are: + + - `private_key_type = ec` (default): `224, 256, 384, 521` + corresponding to the NIST P-\* curves of the same name. + - `private_key_type = rsa`: `2048, 4096` + +- `datacenter` Equivalent to the [`-datacenter` command-line flag](#_datacenter). + +- `data_dir` Equivalent to the [`-data-dir` command-line flag](#_data_dir). + +- `disable_anonymous_signature` Disables providing an anonymous + signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). + +- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](#_disable_host_node_id). + +- `disable_http_unprintable_char_filter` Defaults to false. Consul 1.0.3 fixed a potential security vulnerability where malicious users could craft KV keys with unprintable chars that would confuse operators using the CLI or UI into taking wrong actions. Users who had data written in older versions of Consul that did not have this restriction will be unable to delete those values by default in 1.0.3 or later. This setting enables those users to **temporarily** disable the filter such that delete operations can work on those keys again to get back to a healthy state. It is strongly recommended that this filter is not disabled permanently as it exposes the original security vulnerability. + +- `disable_remote_exec` Disables support for remote execution. When set to true, the agent will ignore + any incoming remote exec requests. In versions of Consul prior to 0.8, this defaulted + to false. In Consul 0.8 the default was changed to true, to make remote exec opt-in + instead of opt-out. + +- `disable_update_check` Disables automatic checking for security bulletins and new version releases. This is disabled in Consul Enterprise. + +- `discard_check_output` Discards the output of health checks before storing them. This reduces the number of writes to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ... + +- `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is + equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server can handle the service discovery request. If a Consul server is behind the leader by more than `discovery_max_stale`, the query will be re-evaluated on the leader to get more up-to-date results. Consul agents also add a new `X-Consul-Effective-Consistency` response header which indicates if the agent did a stale read. `discover-max-stale` was introduced in Consul 1.0.7 as a way for Consul operators to force stale requests from clients at the agent level, and defaults to zero which matches default consistency behavior in earlier Consul versions. + +- `dns_config` This object allows a number of sub-keys + to be set which can tune how DNS queries are serviced. Check the tutorial on [DNS caching](https://learn.hashicorp.com/tutorials/consul/dns-caching) for more detail. + + The following sub-keys are available: + + - `allow_stale` - Enables a stale query for DNS information. + This allows any Consul server, rather than only the leader, to service the request. + The advantage of this is you get linear read scalability with Consul servers. + In versions of Consul prior to 0.7, this defaulted to false, meaning all requests + are serviced by the leader, providing stronger consistency but less throughput + and higher latency. In Consul 0.7 and later, this defaults to true for better + utilization of available servers. + + - `max_stale` - When [`allow_stale`](#allow_stale) is + specified, this is used to limit how stale results are allowed to be. If a Consul + server is behind the leader by more than `max_stale`, the query will be re-evaluated + on the leader to get more up-to-date results. Prior to Consul 0.7.1 this defaulted + to 5 seconds; in Consul 0.7.1 and later this defaults to 10 years ("87600h") + which effectively allows DNS queries to be answered by any server, no matter + how stale. In practice, servers are usually only milliseconds behind the leader, + so this lets Consul continue serving requests in long outage scenarios where + no leader can be elected. + + - `node_ttl` - By default, this is "0s", so all node lookups + are served with a 0 TTL value. DNS caching for node lookups can be enabled by + setting this value. This should be specified with the "s" suffix for second or + "m" for minute. + + - `service_ttl` - This is a sub-object which allows + for setting a TTL on service lookups with a per-service policy. The "\*" wildcard + service can be used when there is no specific policy available for a service. + By default, all services are served with a 0 TTL value. DNS caching for service + lookups can be enabled by setting this value. + + - `enable_truncate` - If set to true, a UDP DNS + query that would return more than 3 records, or more than would fit into a valid + UDP response, will set the truncated flag, indicating to clients that they should + re-query using TCP to get the full set of records. + + - `only_passing` - If set to true, any nodes whose + health checks are warning or critical will be excluded from DNS results. If false, + the default, only nodes whose health checks are failing as critical will be excluded. + For service lookups, the health checks of the node itself, as well as the service-specific + checks are considered. For example, if a node has a health check that is critical + then all services on that node will be excluded because they are also considered + critical. + + - `recursor_strategy` - If set to `sequential`, Consul will query recursors in the + order listed in the [`recursors`](#recursors) option. If set to `random`, + Consul will query an upstream DNS resolvers in a random order. Defaults to + `sequential`. + + - `recursor_timeout` - Timeout used by Consul when + recursively querying an upstream DNS server. See [`recursors`](#recursors) for more details. Default is 2s. This is available in Consul 0.7 and later. + + - `disable_compression` - If set to true, DNS + responses will not be compressed. Compression was added and enabled by default + in Consul 0.7. + + - `udp_answer_limit` - Limit the number of resource + records contained in the answer section of a UDP-based DNS response. This parameter + applies only to UDP DNS queries that are less than 512 bytes. This setting is + deprecated and replaced in Consul 1.0.7 by [`a_record_limit`](#a_record_limit). + + - `a_record_limit` - Limit the number of resource + records contained in the answer section of a A, AAAA or ANY DNS response (both + TCP and UDP). When answering a question, Consul will use the complete list of + matching hosts, shuffle the list randomly, and then limit the number of answers + to `a_record_limit` (default: no limit). This limit does not apply to SRV records. + + In environments where [RFC 3484 Section 6](https://tools.ietf.org/html/rfc3484#section-6) Rule 9 + is implemented and enforced (i.e. DNS answers are always sorted and + therefore never random), clients may need to set this value to `1` to + preserve the expected randomized distribution behavior (note: + [RFC 3484](https://tools.ietf.org/html/rfc3484) has been obsoleted by + [RFC 6724](https://tools.ietf.org/html/rfc6724) and as a result it should + be increasingly uncommon to need to change this value with modern + resolvers). + + - `enable_additional_node_meta_txt` - When set to true, Consul + will add TXT records for Node metadata into the Additional section of the DNS responses for several query types such as SRV queries. When set to false those records are not emitted. This does not impact the behavior of those same TXT records when they would be added to the Answer section of the response like when querying with type TXT or ANY. This defaults to true. + + - `soa` Allow to tune the setting set up in SOA. Non specified + values fallback to their default values, all values are integers and expressed + as seconds. + + The following settings are available: + + - `expire` ((#soa_expire)) - Configure SOA Expire duration in seconds, + default value is 86400, ie: 24 hours. + + - `min_ttl` ((#soa_min_ttl)) - Configure SOA DNS minimum TTL. As explained + in [RFC-2308](https://tools.ietf.org/html/rfc2308) this also controls negative + cache TTL in most implementations. Default value is 0, ie: no minimum delay + or negative TTL. + + - `refresh` ((#soa_refresh)) - Configure SOA Refresh duration in seconds, + default value is `3600`, ie: 1 hour. + + - `retry` ((#soa_retry)) - Configures the Retry duration expressed + in seconds, default value is 600, ie: 10 minutes. + + - `use_cache` ((#dns_use_cache)) - When set to true, DNS resolution will + use the agent cache described in [agent caching](/api/features/caching). + This setting affects all service and prepared queries DNS requests. Implies [`allow_stale`](#allow_stale) + + - `cache_max_age` ((#dns_cache_max_age)) - When [use_cache](#dns_use_cache) + is enabled, the agent will attempt to re-fetch the result from the servers if + the cached value is older than this duration. See: [agent caching](/api/features/caching). + + **Note** that unlike the `max-age` HTTP header, a value of 0 for this field is + equivalent to "no max age". To get a fresh value from the cache use a very small value + of `1ns` instead of 0. + + - `prefer_namespace` ((#dns_prefer_namespace)) **Deprecated in + Consul 1.11. Use the [canonical DNS format](/docs/discovery/dns#namespaced-partitioned-services) instead.** - + When set to true, in a DNS query for a service, the label between the domain + and the `service` label will be treated as a namespace name instead of a datacenter. + When set to false, the default, the behavior will be the same as non-Enterprise + versions and will assume the label is the datacenter. See: [this section](/docs/discovery/dns#namespaced-services) + for more details. + +- `domain` Equivalent to the [`-domain` command-line flag](#_domain). + +- `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.** + When set on a Consul server, enables ACL replication without having to set + the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication + and then introduce the token using the [agent token API](/api/agent#update-acl-tokens) on each server. + See [`acl_replication_token`](#acl_replication_token) for more details. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + policies and roles already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + +- `enable_agent_tls_for_checks` When set, uses a subset of the agent's TLS configuration (`key_file`, + `cert_file`, `ca_file`, `ca_path`, and `server_name`) to set up the client for HTTP or gRPC health checks. This allows services requiring 2-way TLS to be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false. + +- `enable_central_service_config` When set, the Consul agent will look for any + [centralized service configuration](/docs/agent/config-entries) + that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. + This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. + +- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to + access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. + +- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](#_enable_script_checks). + + ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. + + ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) + for more details. + +- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](#_enable_local_script_checks). + +- `enable_syslog` Equivalent to the [`-syslog` command-line flag](#_syslog). + +- `encrypt` Equivalent to the [`-encrypt` command-line flag](#_encrypt). + +- `encrypt_verify_incoming` - This is an optional + parameter that can be used to disable enforcing encryption for incoming gossip + in order to upshift from unencrypted to encrypted gossip on a running cluster. + See [this section](/docs/agent/encryption#configuring-gossip-encryption-on-an-existing-cluster) + for more information. Defaults to true. + +- `encrypt_verify_outgoing` - This is an optional + parameter that can be used to disable enforcing encryption for outgoing gossip + in order to upshift from unencrypted to encrypted gossip on a running cluster. + See [this section](/docs/agent/encryption#configuring-gossip-encryption-on-an-existing-cluster) + for more information. Defaults to true. + +- `disable_keyring_file` - Equivalent to the + [`-disable-keyring-file` command-line flag](#_disable_keyring_file). + +- `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). + When network coordinates are disabled the `near` query param will not work to sort the nodes, + and the [`consul rtt`](/commands/rtt) command will not be able to provide round trip time between nodes. + +- `gossip_lan` - **(Advanced)** This object contains a + number of sub-keys which can be set to tune the LAN gossip communications. These + are only provided for users running especially large clusters that need fine tuning + and are prepared to spend significant effort correctly tuning them for their environment + and workload. **Tuning these improperly can cause Consul to fail in unexpected + ways**. The default values are appropriate in almost all deployments. + + - `gossip_nodes` - The number of random nodes to send + gossip messages to per gossip_interval. Increasing this number causes the gossip + messages to propagate across the cluster more quickly at the expense of increased + bandwidth. The default is 3. + + - `gossip_interval` - The interval between sending + messages that need to be gossiped that haven't been able to piggyback on probing + messages. If this is set to zero, non-piggyback gossip is disabled. By lowering + this value (more frequent) gossip messages are propagated across the cluster + more quickly at the expense of increased bandwidth. The default is 200ms. + + - `probe_interval` - The interval between random + node probes. Setting this lower (more frequent) will cause the cluster to detect + failed nodes more quickly at the expense of increased bandwidth usage. The default + is 1s. + + - `probe_timeout` - The timeout to wait for an ack + from a probed node before assuming it is unhealthy. This should be at least the + 99-percentile of RTT (round-trip time) on your network. The default is 500ms + and is a conservative value suitable for almost all realistic deployments. + + - `retransmit_mult` - The multiplier for the number + of retransmissions that are attempted for messages broadcasted over gossip. The + number of retransmits is scaled using this multiplier and the cluster size. The + higher the multiplier, the more likely a failed broadcast is to converge at the + expense of increased bandwidth. The default is 4. + + - `suspicion_mult` - The multiplier for determining + the time an inaccessible node is considered suspect before declaring it dead. + The timeout is scaled with the cluster size and the probe_interval. This allows + the timeout to scale properly with expected propagation delay with a larger cluster + size. The higher the multiplier, the longer an inaccessible node is considered + part of the cluster before declaring it dead, giving that suspect node more time + to refute if it is indeed still alive. The default is 4. + +- `gossip_wan` - **(Advanced)** This object contains a + number of sub-keys which can be set to tune the WAN gossip communications. These + are only provided for users running especially large clusters that need fine tuning + and are prepared to spend significant effort correctly tuning them for their environment + and workload. **Tuning these improperly can cause Consul to fail in unexpected + ways**. The default values are appropriate in almost all deployments. + + - `gossip_nodes` - The number of random nodes to send + gossip messages to per gossip_interval. Increasing this number causes the gossip + messages to propagate across the cluster more quickly at the expense of increased + bandwidth. The default is 4. + + - `gossip_interval` - The interval between sending + messages that need to be gossiped that haven't been able to piggyback on probing + messages. If this is set to zero, non-piggyback gossip is disabled. By lowering + this value (more frequent) gossip messages are propagated across the cluster + more quickly at the expense of increased bandwidth. The default is 500ms. + + - `probe_interval` - The interval between random + node probes. Setting this lower (more frequent) will cause the cluster to detect + failed nodes more quickly at the expense of increased bandwidth usage. The default + is 5s. + + - `probe_timeout` - The timeout to wait for an ack + from a probed node before assuming it is unhealthy. This should be at least the + 99-percentile of RTT (round-trip time) on your network. The default is 3s + and is a conservative value suitable for almost all realistic deployments. + + - `retransmit_mult` - The multiplier for the number + of retransmissions that are attempted for messages broadcasted over gossip. The + number of retransmits is scaled using this multiplier and the cluster size. The + higher the multiplier, the more likely a failed broadcast is to converge at the + expense of increased bandwidth. The default is 4. + + - `suspicion_mult` - The multiplier for determining + the time an inaccessible node is considered suspect before declaring it dead. + The timeout is scaled with the cluster size and the probe_interval. This allows + the timeout to scale properly with expected propagation delay with a larger cluster + size. The higher the multiplier, the longer an inaccessible node is considered + part of the cluster before declaring it dead, giving that suspect node more time + to refute if it is indeed still alive. The default is 6. + +- `http_config` This object allows setting options for the HTTP API and UI. + + The following sub-keys are available: + + - `block_endpoints` + This object is a list of HTTP API endpoint prefixes to block on the agent, and + defaults to an empty list, meaning all endpoints are enabled. Any endpoint that + has a common prefix with one of the entries on this list will be blocked and + will return a 403 response code when accessed. For example, to block all of the + V1 ACL endpoints, set this to `["/v1/acl"]`, which will block `/v1/acl/create`, + `/v1/acl/update`, and the other ACL endpoints that begin with `/v1/acl`. This + only works with API endpoints, not `/ui` or `/debug`, those must be disabled + with their respective configuration options. Any CLI commands that use disabled + endpoints will no longer function as well. For more general access control, Consul's + [ACL system](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) + should be used, but this option is useful for removing access to HTTP API endpoints + completely, or on specific agents. This is available in Consul 0.9.0 and later. + + - `response_headers` This object allows adding headers to the HTTP API and UI responses. For example, the following config can be used to enable [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) on the HTTP API endpoints: + + ```json + { + "http_config": { + "response_headers": { + "Access-Control-Allow-Origin": "*" + } + } + } + ``` + + - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]` + + - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api/features/caching) to answer the request. Even when the url parameter is provided. + + - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte. + +- `leave_on_terminate` If enabled, when the agent receives a TERM signal, it will send a `Leave` message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server (prior to Consul 0.7 the default value was unconditionally set to `false`). On agents in client-mode, this defaults to `true` and for agents in server-mode, this defaults to `false`. + +- `license_path` This specifies the path to a file that contains the Consul Enterprise license. Alternatively the license may also be specified in either the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. See the [licensing documentation](/docs/enterprise/license/overview) for more information about Consul Enterprise license management. Added in versions 1.10.0, 1.9.7 and 1.8.13. Prior to version 1.10.0 the value may be set for all agents to facilitate forwards compatibility with 1.10 but will only actually be used by client agents. + +- `limits` Available in Consul 0.9.3 and later, this is a nested + object that configures limits that are enforced by the agent. Prior to Consul 1.5.2, + this only applied to agents in client mode, not Consul servers. The following parameters + are available: + + - `http_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single client IP address is allowed to open to the agent's HTTP(S) server. This affects the HTTP(S) servers in both client and server agents. Default value is `200`. + - `https_handshake_timeout` - Configures the limit for how long the HTTPS server in both client and server agents will wait for a client to complete a TLS handshake. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). Default value is `5s`. + - `rpc_handshake_timeout` - Configures the limit for how long servers will wait after a client TCP connection is established before they complete the connection handshake. When TLS is used, the same timeout applies to the TLS handshake separately from the initial protocol negotiation. All Consul clients should perform this immediately on establishing a new connection. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). When `verify_incoming` is true on servers, this limits how long the connection socket and associated goroutines will be held open before the client successfully authenticates. Default value is `5s`. + - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though. + - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting. + - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate. + - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. + - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. + +- `log_file` Equivalent to the [`-log-file` command-line flag](#_log_file). + +- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](#_log_rotate_duration). + +- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](#_log_rotate_bytes). + +- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](#_log_rotate_max_files). + +- `log_level` Equivalent to the [`-log-level` command-line flag](#_log_level). + +- `log_json` Equivalent to the [`-log-json` command-line flag](#_log_json). + +- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). + +- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](#_max_query_time). + +- `node_id` Equivalent to the [`-node-id` command-line flag](#_node_id). + +- `node_name` Equivalent to the [`-node` command-line flag](#_node). + +- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](#_node_meta) for more information. + + ```json + { + "node_meta": { + "instance_type": "t2.medium" + } + } + ``` + +- `partition` - This flag is used to set + the name of the admin partition the agent belongs to. An agent can only join + and communicate with other agents within its admin partition. Review the + [Admin Partitions documentation](/docs/enterprise/admin-partitions) for more + details. By default, this is an empty string, which is the `default` admin + partition. This cannot be set on a server agent. + + ~> **Warning:** The `partition` option cannot be used either the + [`segment`](#segment-2) option or [`-segment`](#_segment) flag. + +- `performance` Available in Consul 0.7 and later, this is a nested object that allows tuning the performance of different subsystems in Consul. See the [Server Performance](/docs/install/performance) documentation for more details. The following parameters are available: + + - `leave_drain_time` - A duration that a server will dwell during a graceful leave in order to allow requests to be retried against other Consul servers. Under normal circumstances, this can prevent clients from experiencing "no leader" errors when performing a rolling update of the Consul servers. This was added in Consul 1.0. Must be a duration value such as 10s. Defaults to 5s. + + - `raft_multiplier` - An integer multiplier used by Consul servers to scale key Raft timing parameters. Omitting this value or setting it to 0 uses default timing described below. Lower values are used to tighten timing and increase sensitivity while higher values relax timings and reduce sensitivity. Tuning this affects the time it takes Consul to detect leader failures and to perform leader elections, at the expense of requiring more network and CPU resources for better performance. + + By default, Consul will use a lower-performance timing that's suitable + for [minimal Consul servers](/docs/install/performance#minimum), currently equivalent + to setting this to a value of 5 (this default may be changed in future versions of Consul, + depending if the target minimum server profile changes). Setting this to a value of 1 will + configure Raft to its highest-performance mode, equivalent to the default timing of Consul + prior to 0.7, and is recommended for [production Consul servers](/docs/install/performance#production). + + See the note on [last contact](/docs/install/performance#production-server-requirements) timing for more + details on tuning this parameter. The maximum allowed value is 10. + + - `rpc_hold_timeout` - A duration that a client + or server will retry internal RPC requests during leader elections. Under normal + circumstances, this can prevent clients from experiencing "no leader" errors. + This was added in Consul 1.0. Must be a duration value such as 10s. Defaults + to 7s. + +- `pid_file` Equivalent to the [`-pid-file` command line flag](#_pid_file). + +- `ports` This is a nested object that allows setting the bind ports for the following keys: + + - `dns` ((#dns_port)) - The DNS server, -1 to disable. Default 8600. + TCP and UDP. + - `http` ((#http_port)) - The HTTP API, -1 to disable. Default 8500. + TCP only. + - `https` ((#https_port)) - The HTTPS API, -1 to disable. Default -1 + (disabled). **We recommend using `8501`** for `https` by convention as some tooling + will work automatically with this. + - `grpc` ((#grpc_port)) - The gRPC API, -1 to disable. Default -1 (disabled). + **We recommend using `8502`** for `grpc` by convention as some tooling will work + automatically with this. This is set to `8502` by default when the agent runs + in `-dev` mode. Currently gRPC is only used to expose Envoy xDS API to Envoy + proxies. + - `serf_lan` ((#serf_lan_port)) - The Serf LAN port. Default 8301. TCP + and UDP. Equivalent to the [`-serf-lan-port` command line flag](#_serf_lan_port). + - `serf_wan` ((#serf_wan_port)) - The Serf WAN port. Default 8302. + Equivalent to the [`-serf-wan-port` command line flag](#_serf_wan_port). Set + to -1 to disable. **Note**: this will disable WAN federation which is not recommended. + Various catalog and WAN related endpoints will return errors or empty results. + TCP and UDP. + - `server` ((#server_rpc_port)) - Server RPC address. Default 8300. TCP + only. + - `sidecar_min_port` ((#sidecar_min_port)) - Inclusive minimum port number + to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). + Default 21000. Set to `0` to disable automatic port assignment. + - `sidecar_max_port` ((#sidecar_max_port)) - Inclusive maximum port number + to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). + Default 21255. Set to `0` to disable automatic port assignment. + - `expose_min_port` ((#expose_min_port)) - Inclusive minimum port number + to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). + Default 21500. Set to `0` to disable automatic port assignment. + - `expose_max_port` ((#expose_max_port)) - Inclusive maximum port number + to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). + Default 21755. Set to `0` to disable automatic port assignment. + +- `primary_datacenter` - This designates the datacenter + which is authoritative for ACL information, intentions and is the root Certificate + Authority for Connect. It must be provided to enable ACLs. All servers and datacenters + must agree on the primary datacenter. Setting it on the servers is all you need + for cluster-level enforcement, but for the APIs to forward properly from the clients, + it must be set on them too. In Consul 0.8 and later, this also enables agent-level + enforcement of ACLs. + +- `primary_gateways` Equivalent to the [`-primary-gateway` + command-line flag](#_primary_gateway). Takes a list of addresses to use as the + mesh gateways for the primary datacenter when authoritative replicated catalog + data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) + until at least one primary mesh gateway is discovered. This was added in Consul + 1.8.0. + +- `primary_gateways_interval` Time to wait + between [`primary_gateways`](#primary_gateways) discovery attempts. Defaults to + 30s. This was added in Consul 1.8.0. + +- `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line + flag](#_protocol). + +- `raft_boltdb` ((#raft_boltdb)) This is a nested object that allows configuring + options for Raft's BoltDB based log store. + + - `NoFreelistSync` ((#NoFreelistSync)) Setting this to `true` will disable + syncing the BoltDB freelist to disk within the raft.db file. Not syncing + the freelist to disk will reduce disk IO required for write operations + at the expense of potentially increasing start up time due to needing + to scan the db to discover where the free space resides within the file. + + +- `raft_protocol` ((#raft_protocol)) Equivalent to the [`-raft-protocol` + command-line flag](#_raft_protocol). + +- `raft_snapshot_threshold` ((#\_raft_snapshot_threshold)) This controls the + minimum number of raft commit entries between snapshots that are saved to + disk. This is a low-level parameter that should rarely need to be changed. + Very busy clusters experiencing excessive disk IO may increase this value to + reduce disk IO, and minimize the chances of all servers taking snapshots at + the same time. Increasing this trades off disk IO for disk space since the log + will grow much larger and the space in the raft.db file can't be reclaimed + till the next snapshot. Servers may take longer to recover from crashes or + failover if this is increased significantly as more logs will need to be + replayed. In Consul 1.1.0 and later this defaults to 16384, and in prior + versions it was set to 8192. + + Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the + server a `SIGHUP` to allow tuning snapshot activity without a rolling restart + in emergencies. + +- `raft_snapshot_interval` ((#\_raft_snapshot_interval)) This controls how often + servers check if they need to save a snapshot to disk. This is a low-level + parameter that should rarely need to be changed. Very busy clusters + experiencing excessive disk IO may increase this value to reduce disk IO, and + minimize the chances of all servers taking snapshots at the same time. + Increasing this trades off disk IO for disk space since the log will grow much + larger and the space in the raft.db file can't be reclaimed till the next + snapshot. Servers may take longer to recover from crashes or failover if this + is increased significantly as more logs will need to be replayed. In Consul + 1.1.0 and later this defaults to `30s`, and in prior versions it was set to + `5s`. + + Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the + server a `SIGHUP` to allow tuning snapshot activity without a rolling restart + in emergencies. + +- `raft_trailing_logs` - This controls how many log entries are left in the log + store on disk after a snapshot is made. This should only be adjusted when + followers cannot catch up to the leader due to a very large snapshot size + and high write throughput causing log truncation before an snapshot can be + fully installed on a follower. If you need to use this to recover a cluster, + consider reducing write throughput or the amount of data stored on Consul as + it is likely under a load it is not designed to handle. The default value is + 10000 which is suitable for all normal workloads. Added in Consul 1.5.3. + + Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the + server a `SIGHUP` to allow recovery without downtime when followers can't keep + up. + +- `reap` This controls Consul's automatic reaping of child processes, + which is useful if Consul is running as PID 1 in a Docker container. If this isn't + specified, then Consul will automatically reap child processes if it detects it + is running as PID 1. If this is set to true or false, then it controls reaping + regardless of Consul's PID (forces reaping on or off, respectively). This option + was removed in Consul 0.7.1. For later versions of Consul, you will need to reap + processes using a wrapper, please see the [Consul Docker image entry point script](https://github.com/hashicorp/docker-consul/blob/master/0.X/docker-entrypoint.sh) + for an example. If you are using Docker 1.13.0 or later, you can use the new `--init` + option of the `docker run` command and docker will enable an init process with + PID 1 that reaps child processes for the container. More info on [Docker docs](https://docs.docker.com/engine/reference/commandline/run/#options). + +- `reconnect_timeout` This controls how long it + takes for a failed node to be completely removed from the cluster. This defaults + to 72 hours and it is recommended that this is set to at least double the maximum + expected recoverable outage time for a node or network partition. WARNING: Setting + this time too low could cause Consul servers to be removed from quorum during an + extended node failure or partition, which could complicate recovery of the cluster. + The value is a time with a unit suffix, which can be "s", "m", "h" for seconds, + minutes, or hours. The value must be >= 8 hours. + +- `reconnect_timeout_wan` This is the WAN equivalent + of the [`reconnect_timeout`](#reconnect_timeout) parameter, which controls + how long it takes for a failed server to be completely removed from the WAN pool. + This also defaults to 72 hours, and must be >= 8 hours. + +- `recursors` This flag provides addresses of upstream DNS + servers that are used to recursively resolve queries if they are not inside the + service domain for Consul. For example, a node can use Consul directly as a DNS + server, and if the record is outside of the "consul." domain, the query will be + resolved upstream. As of Consul 1.0.1 recursors can be provided as IP addresses + or as go-sockaddr templates. IP addresses are resolved in order, and duplicates + are ignored. + +- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](#_rejoin). + +- `retry_join` - Equivalent to the [`-retry-join`](#retry-join) command-line flag. + +- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](#_retry_interval). + +- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. + +- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](#_retry_interval_wan). + +- `rpc` configuration for Consul servers. + + - `enable_streaming` ((#rpc_enable_streaming)) defaults to true. If set to false it will disable + the gRPC subscribe endpoint on a Consul Server. All + servers in all federated datacenters must have this enabled before any client can use + [`use_streaming_backend`](#use_streaming_backend). + +- `segment` - Equivalent to the [`-segment` command-line flag](#_segment). + + ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. + +- `segments` - (Server agents only) This is a list of nested objects + that specifies user-defined network segments, not including the `` segment, which is + created automatically. Review the [Network Segments documentation](/docs/enterprise/network-segments) + for more details. + + - `name` ((#segment_name)) - The name of the segment. Must be a string + between 1 and 64 characters in length. + - `bind` ((#segment_bind)) - The bind address to use for the segment's + gossip layer. Defaults to the [`-bind`](#_bind) value if not provided. + - `port` ((#segment_port)) - The port to use for the segment's gossip + layer (required). + - `advertise` ((#segment_advertise)) - The advertise address to use for + the segment's gossip layer. Defaults to the [`-advertise`](#_advertise) value + if not provided. + - `rpc_listener` ((#segment_rpc_listener)) - If true, a separate RPC + listener will be started on this segment's [`-bind`](#_bind) address on the rpc + port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) + address. Defaults to false. + +- `server` Equivalent to the [`-server` command-line flag](#_server). + +- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** + +- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica). + +- `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTL's + shorter than the specified limit. It is recommended to keep this limit at or above + the default to encourage clients to send infrequent heartbeats. Defaults to 10s. + +- `skip_leave_on_interrupt` This is similar + to [`leave_on_terminate`](#leave_on_terminate) but only affects interrupt handling. + When Consul receives an interrupt signal (such as hitting Control-C in a terminal), + Consul will gracefully leave the cluster. Setting this to `true` disables that + behavior. The default behavior for this feature varies based on whether or not + the agent is running as a client or a server (prior to Consul 0.7 the default value + was unconditionally set to `false`). On agents in client-mode, this defaults to + `false` and for agents in server-mode, this defaults to `true` (i.e. Ctrl-C on + a server will keep the server in the cluster and therefore quorum, and Ctrl-C on + a client will gracefully leave). + +- `start_join` An array of strings specifying addresses + of nodes to [`-join`](#_join) upon startup. Note that using + `retry_join` could be more appropriate to help mitigate + node startup race conditions when automating a Consul cluster deployment. + +- `start_join_wan` An array of strings specifying addresses + of WAN nodes to [`-join-wan`](#_join_wan) upon startup. + +- `telemetry` This is a nested object that configures where + Consul sends its runtime telemetry, and contains the following keys: + + - `circonus_api_token` ((#telemetry-circonus_api_token)) A valid API + Token used to create/manage check. If provided, metric management is + enabled. + + - `circonus_api_app` ((#telemetry-circonus_api_app)) A valid app name + associated with the API token. By default, this is set to "consul". + + - `circonus_api_url` ((#telemetry-circonus_api_url)) + The base URL to use for contacting the Circonus API. By default, this is set + to "https://api.circonus.com/v2". + + - `circonus_submission_interval` ((#telemetry-circonus_submission_interval)) The interval at which metrics are submitted to Circonus. By default, this is set to "10s" (ten seconds). + + - `circonus_submission_url` ((#telemetry-circonus_submission_url)) + The `check.config.submission_url` field, of a Check API object, from a previously + created HTTPTrap check. + + - `circonus_check_id` ((#telemetry-circonus_check_id)) + The Check ID (not **check bundle**) from a previously created HTTPTrap check. + The numeric portion of the `check._cid` field in the Check API object. + + - `circonus_check_force_metric_activation` ((#telemetry-circonus_check_force_metric_activation)) Force activation of metrics which already exist and are not currently active. + If check management is enabled, the default behavior is to add new metrics as + they are encountered. If the metric already exists in the check, it will **not** + be activated. This setting overrides that behavior. By default, this is set to + false. + + - `circonus_check_instance_id` ((#telemetry-circonus_check_instance_id)) Uniquely identifies the metrics coming from this **instance**. It can be used to + maintain metric continuity with transient or ephemeral instances as they move + around within an infrastructure. By default, this is set to hostname:application + name (e.g. "host123:consul"). + + - `circonus_check_search_tag` ((#telemetry-circonus_check_search_tag)) A special tag which, when coupled with the instance id, helps to narrow down + the search results when neither a Submission URL or Check ID is provided. By + default, this is set to service:application name (e.g. "service:consul"). + + - `circonus_check_display_name` ((#telemetry-circonus_check_display_name)) Specifies a name to give a check when it is created. This name is displayed in + the Circonus UI Checks list. Available in Consul 0.7.2 and later. + + - `circonus_check_tags` ((#telemetry-circonus_check_tags)) + Comma separated list of additional tags to add to a check when it is created. + Available in Consul 0.7.2 and later. + + - `circonus_broker_id` ((#telemetry-circonus_broker_id)) + The ID of a specific Circonus Broker to use when creating a new check. The numeric + portion of `broker._cid` field in a Broker API object. If metric management is + enabled and neither a Submission URL nor Check ID is provided, an attempt will + be made to search for an existing check using Instance ID and Search Tag. If + one is not found, a new HTTPTrap check will be created. By default, this is not + used and a random Enterprise Broker is selected, or the default Circonus Public + Broker. + + - `circonus_broker_select_tag` ((#telemetry-circonus_broker_select_tag)) A special tag which will be used to select a Circonus Broker when a Broker ID + is not provided. The best use of this is to as a hint for which broker should + be used based on **where** this particular instance is running (e.g. a specific + geo location or datacenter, dc:sfo). By default, this is left blank and not used. + + - `disable_compat_1.9` ((#telemetry-disable_compat_1.9)) + This allows users to disable metrics deprecated in 1.9 so they are no longer emitted, saving on performance and storage in large deployments. Defaults to false. + + - `disable_hostname` ((#telemetry-disable_hostname)) + This controls whether or not to prepend runtime telemetry with the machine's + hostname, defaults to false. + + - `dogstatsd_addr` ((#telemetry-dogstatsd_addr)) This provides the address + of a DogStatsD instance in the format `host:port`. DogStatsD is a protocol-compatible + flavor of statsd, with the added ability to decorate metrics with tags and event + information. If provided, Consul will send various telemetry information to that + instance for aggregation. This can be used to capture runtime information. + + - `dogstatsd_tags` ((#telemetry-dogstatsd_tags)) This provides a list + of global tags that will be added to all telemetry packets sent to DogStatsD. + It is a list of strings, where each string looks like "my_tag_name:my_tag_value". + + - `filter_default` ((#telemetry-filter_default)) + This controls whether to allow metrics that have not been specified by the filter. + Defaults to `true`, which will allow all metrics when no filters are provided. + When set to `false` with no filters, no metrics will be sent. + + - `metrics_prefix` ((#telemetry-metrics_prefix)) + The prefix used while writing all telemetry data. By default, this is set to + "consul". This was added in Consul 1.0. For previous versions of Consul, use + the config option `statsite_prefix` in this same structure. This was renamed + in Consul 1.0 since this prefix applied to all telemetry providers, not just + statsite. + + - `prefix_filter` ((#telemetry-prefix_filter)) + This is a list of filter rules to apply for allowing/blocking metrics by + prefix in the following format: + + ```json + ["+consul.raft.apply", "-consul.http", "+consul.http.GET"] + ``` + + A leading "**+**" will enable any metrics with the given prefix, and a leading "**-**" will block them. If there is overlap between two rules, the more specific rule will take precedence. Blocking will take priority if the same prefix is listed multiple times. + + - `prometheus_retention_time` ((#telemetry-prometheus_retention_time)) If the value is greater than `0s` (the default), this enables [Prometheus](https://prometheus.io/) + export of metrics. The duration can be expressed using the duration semantics + and will aggregates all counters for the duration specified (it might have an + impact on Consul's memory usage). A good value for this parameter is at least + 2 times the interval of scrape of Prometheus, but you might also put a very high + retention time such as a few days (for instance 744h to enable retention to 31 + days). Fetching the metrics using prometheus can then be performed using the + [`/v1/agent/metrics?format=prometheus`](/api/agent#view-metrics) endpoint. + The format is compatible natively with prometheus. When running in this mode, + it is recommended to also enable the option [`disable_hostname`](#telemetry-disable_hostname) + to avoid having prefixed metrics with hostname. Consul does not use the default + Prometheus path, so Prometheus must be configured as follows. Note that using + ?format=prometheus in the path won't work as ? will be escaped, so it must be + specified as a parameter. + + ```yaml + metrics_path: '/v1/agent/metrics' + params: + format: ['prometheus'] + ``` + + - `statsd_address` ((#telemetry-statsd_address)) This provides the address + of a statsd instance in the format `host:port`. If provided, Consul will send + various telemetry information to that instance for aggregation. This can be used + to capture runtime information. This sends UDP packets only and can be used with + statsd or statsite. + + - `statsite_address` ((#telemetry-statsite_address)) This provides the + address of a statsite instance in the format `host:port`. If provided, Consul + will stream various telemetry information to that instance for aggregation. This + can be used to capture runtime information. This streams via TCP and can only + be used with statsite. + +- `syslog_facility` When [`enable_syslog`](#enable_syslog) + is provided, this controls to which facility messages are sent. By default, `LOCAL0` + will be used. + +- `translate_wan_addrs` If set to true, Consul + will prefer a node's configured [WAN address](#_advertise-wan) + when servicing DNS and HTTP requests for a node in a remote datacenter. This allows + the node to be reached within its own datacenter using its local address, and reached + from other datacenters using its WAN address, which is useful in hybrid setups + with mixed networks. This is disabled by default. + + Starting in Consul 0.7 and later, node addresses in responses to HTTP requests will also prefer a + node's configured [WAN address](#_advertise-wan) when querying for a node in a remote + datacenter. An [`X-Consul-Translate-Addresses`](/api#translated-addresses) header + will be present on all responses when translation is enabled to help clients know that the addresses + may be translated. The `TaggedAddresses` field in responses also have a `lan` address for clients that + need knowledge of that address, regardless of translation. + + The following endpoints translate addresses: + + - [`/v1/catalog/nodes`](/api/catalog#list-nodes) + - [`/v1/catalog/node/`](/api/catalog#retrieve-map-of-services-for-a-node) + - [`/v1/catalog/service/`](/api/catalog#list-nodes-for-service) + - [`/v1/health/service/`](/api/health#list-nodes-for-service) + - [`/v1/query//execute`](/api/query#execute-prepared-query) + +- `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** + Equivalent to the [`-ui`](#_ui) command-line flag. + +- `ui_config` - This object allows a number of sub-keys to be set which controls + the display or features available in the UI. Configuring the UI with this + stanza was added in Consul 1.9.0. + + The following sub-keys are available: + + - `enabled` ((#ui_config_enabled)) - This enables the service of the web UI + from this agent. Boolean value, defaults to false. In `-dev` mode this + defaults to true. Replaces `ui` from before 1.9.0. Equivalent to the + [`-ui`](#_ui) command-line flag. + + - `dir` ((#ui_config_dir)) - This specifies that the web UI should be served + from an external dir rather than the build in one. This allows for + customization or development. Replaces `ui_dir` from before 1.9.0. + Equivalent to the [`-ui-dir`](#_ui_dir) command-line flag. + + - `content_path` ((#ui_config_content_path)) - This specifies the HTTP path + that the web UI should be served from. Defaults to `/ui/`. Equivalent to the + [`-ui-content-path`](#_ui_content_path) flag. + + - `metrics_provider` ((#ui_config_metrics_provider)) - Specifies a named + metrics provider implementation the UI should use to fetch service metrics. + By default metrics are disabled. Consul 1.9.0 includes a built-in provider + named `prometheus` that can be enabled explicitly here. It also requires the + `metrics_proxy` to be configured below and direct queries to a Prometheus + instance that has Envoy metrics for all services in the datacenter. + + - `metrics_provider_files` ((#ui_config_metrics_provider_files)) - An optional array + of absolute paths to javascript files on the Agent's disk which will be + served as part of the UI. These files should contain metrics provider + implementations and registration enabling UI metric queries to be customized + or implemented for an alternative time-series backend. + + ~> **Security Note:** These javascript files are included in the UI with no + further validation or sand-boxing. By configuring them here the operator is + fully trusting anyone able to write to them as well as the original authors + not to include malicious code in the UI being served. + + - `metrics_provider_options_json` ((#ui_config_metrics_provider_options_json)) - + This is an optional raw JSON object as a string which is passed to the + provider implementation's `init` method at startup to allow arbitrary + configuration to be passed through. + + - `metrics_proxy` ((#ui_config_metrics_proxy)) - This object configures an + internal agent API endpoint that will proxy GET requests to a metrics + backend to allow querying metrics data in the UI. This simplifies deployment + where the metrics backend is not exposed externally to UI users' browsers. + It may also be used to augment requests with API credentials to allow + serving graphs to UI users without them needing individual access tokens for + the metrics backend. + + ~> **Security Note:** Exposing your metrics backend via Consul in this way + should be carefully considered in production. As Consul doesn't understand + the requests, it can't limit access to only specific resources. For example + **this might make it possible for a malicious user on the network to query + for arbitrary metrics about any server or workload in your infrastructure, + or overload the metrics infrastructure with queries**. See [Metrics Proxy + Security](/docs/connect/observability/ui-visualization#metrics-proxy-security) + for more details. + + The following sub-keys are available: + + - `base_url` ((#ui_config_metrics_provider_base_url)) - This is required to + enable the proxy. It should be set to the base URL that the Consul agent + should proxy requests for metrics too. For example a value of + `http://prometheus-server` would target a Prometheus instance with local + DNS name "prometheus-server" on port 80. This may include a path prefix + which will then not be necessary in provider requests to the backend and + the proxy will prevent any access to paths without that prefix on the + backend. + + - `path_allowlist` ((#ui_config_metrics_provider_path_allowlist)) - This + specifies the paths that may be proxies to when appended to the + `base_url`. It defaults to `["/api/v1/query_range", "/api/v1/query"]` + which are the endpoints required for the built-in Prometheus provider. If + a [custom + provider](/docs/connect/observability/ui-visualization#custom-metrics-providers) + is used that requires the metrics proxy, the correct allowlist must be + specified to enable proxying to necessary endpoints. See [Path + Allowlist](/docs/connect/observability/ui-visualization#path-allowlist) + for more information. + + - `add_headers` ((#ui_config_metrics_proxy_add_headers)) - This is an + optional list if headers to add to requests that are proxied to the + metrics backend. It may be used to inject Authorization tokens within the + agent without exposing those to UI users. + + Each item in the list is an object with the following keys: + + - `name` ((#ui_config_metrics_proxy_add_headers_name)) - Specifies the + HTTP header name to inject into proxied requests. + + - `value` ((#ui_config_metrics_proxy_add_headers_value)) - Specifies the + value in inject into proxied requests. + + - `dashboard_url_templates` ((#ui_config_dashboard_url_templates)) - This map + specifies URL templates that may be used to render links to external + dashboards in various contexts in the UI. It is a map with the name of the + template as a key. The value is a string URL with optional placeholders. + + Each template may contain placeholders which will be substituted for the + correct values in content when rendered in the UI. The placeholders + available are listed for each template. + + For more information and examples see [UI + Visualization](/docs/connect/observability/ui-visualization#configuring-dashboard-urls) + + The following named templates are defined: + + - `service` ((#ui_config_dashboard_url_templates_service)) - This is the URL + to use when linking to the dashboard for a specific service. It is shown + as part of the [Topology + Visualization](/docs/connect/observability/ui-visualization). + + The placeholders available are: + + - `{{Service.Name}}` - Replaced with the current service's name. + - `{{Service.Namespace}}` - Replaced with the current service's namespace or empty if namespaces are not enabled. + - `{{Service.Partition}}` - Replaced with the current service's admin + partition or empty if admin partitions are not enabled. + - `{{Datacenter}}` - Replaced with the current service's datacenter. + +- `ui_dir` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.dir`](#ui_config_dir) field instead.** + Equivalent to the [`-ui-dir`](#_ui_dir) command-line + flag. This configuration key is not required as of Consul version 0.7.0 and later. + Specifying this configuration key will enable the web UI. There is no need to specify + both ui-dir and ui. Specifying both will result in an error. + +- `unix_sockets` - This allows tuning the ownership and + permissions of the Unix domain socket files created by Consul. Domain sockets are + only used if the HTTP address is configured with the `unix://` prefix. + + It is important to note that this option may have different effects on + different operating systems. Linux generally observes socket file permissions + while many BSD variants ignore permissions on the socket file itself. It is + important to test this feature on your specific distribution. This feature is + currently not functional on Windows hosts. + + The following options are valid within this construct and apply globally to all + sockets created by Consul: + + - `user` - The name or ID of the user who will own the socket file. + - `group` - The group ID ownership of the socket file. This option + currently only supports numeric IDs. + - `mode` - The permission bits to set on the file. + +- `use_streaming_backend` defaults to true. When enabled Consul client agents will use + streaming rpc, instead of the traditional blocking queries, for endpoints which support + streaming. All servers must have [`rpc.enable_streaming`](#rpc_enable_streaming) + enabled before any client can enable `use_streaming_backend`. + +- `watches` - Watches is a list of watch specifications which + allow an external process to be automatically invoked when a particular data view + is updated. See the [watch documentation](/docs/agent/watches) for more detail. + Watches can be modified when the configuration is reloaded. + +## TLS Configuration Reference + +This section documents all of the configuration settings that apply to Agent TLS. Agent +TLS is used by the HTTP API, server RPC, and xDS interfaces. Some of these settings may also be +applied automatically by [auto_config](#auto_config) or [auto_encrypt](#auto_encrypt). + +~> **Security Note:** The Certificate Authority (CA) specified by `ca_file` or `ca_path` +should be a private CA, not a public one. We recommend using a dedicated CA +which should not be used with any other systems. Any certificate signed by the +CA will be allowed to communicate with the cluster and a specially crafted certificate +signed by the CA can be used to gain full access to Consul. + +- `ca_file` This provides a file path to a PEM-encoded certificate + authority. The certificate authority is used to check the authenticity of client + and server connections with the appropriate [`verify_incoming`](#verify_incoming) + or [`verify_outgoing`](#verify_outgoing) flags. + +- `ca_path` This provides a path to a directory of PEM-encoded + certificate authority files. These certificate authorities are used to check the + authenticity of client and server connections with the appropriate [`verify_incoming`](#verify_incoming) or [`verify_outgoing`](#verify_outgoing) flags. + +- `cert_file` This provides a file path to a PEM-encoded + certificate. The certificate is provided to clients or servers to verify the agent's + authenticity. It must be provided along with [`key_file`](#key_file). + +- `key_file` This provides a the file path to a PEM-encoded + private key. The key is used with the certificate to verify the agent's authenticity. + This must be provided along with [`cert_file`](#cert_file). + +- `server_name` When provided, this overrides the [`node_name`](#_node) + for the TLS certificate. It can be used to ensure that the certificate name matches + the hostname we declare. + +- `tls_min_version` Added in Consul 0.7.4, this specifies + the minimum supported version of TLS. Accepted values are "tls10", "tls11", "tls12", + or "tls13". This defaults to "tls12". WARNING: TLS 1.1 and lower are generally + considered less secure; avoid using these if possible. + +- `tls_cipher_suites` Added in Consul 0.8.2, this specifies the list of + supported ciphersuites as a comma-separated-list. Applicable to TLS 1.2 and below only. + The list of all supported ciphersuites is available through + [this search](https://github.com/hashicorp/consul/search?q=cipherMap+%3A%3D+map&unscoped_q=cipherMap+%3A%3D+map). + + ~> **Note:** The ordering of cipher suites will not be guaranteed from Consul 1.11 onwards. See this + [post](https://go.dev/blog/tls-cipher-suites) for details. + +- `tls_prefer_server_cipher_suites` Added in Consul 0.8.2, this + will cause Consul to prefer the server's ciphersuite over the client ciphersuites. + + ~> **Note:** This config will be deprecated in Consul 1.11. See this + [post](https://go.dev/blog/tls-cipher-suites) for details. + +- `verify_incoming` - If set to true, Consul + requires that all incoming connections make use of TLS and that the client + provides a certificate signed by a Certificate Authority from the + [`ca_file`](#ca_file) or [`ca_path`](#ca_path). This applies to both server + RPC and to the HTTPS API. By default, this is false, and Consul will not + enforce the use of TLS or verify a client's authenticity. Turning on + `verify_incoming` on consul clients protects the HTTPS endpoint, by ensuring + that the certificate that is presented by a 3rd party tool to the HTTPS + endpoint was created by the CA that the consul client was setup with. If the + UI is served, the same checks are performed. + +- `verify_incoming_rpc` - When set to true, Consul + requires that all incoming RPC connections use TLS and that the client + provides a certificate signed by a Certificate Authority from the [`ca_file`](#ca_file) + or [`ca_path`](#ca_path). By default, this is false, and Consul will not enforce + the use of TLS or verify a client's authenticity. + + ~> **Security Note:** `verify_incoming_rpc` _must_ be set to true to prevent anyone + with access to the RPC port from gaining full access to the Consul cluster. + +- `verify_incoming_https` - If set to true, + Consul requires that all incoming HTTPS connections make use of TLS and that the + client provides a certificate signed by a Certificate Authority from the [`ca_file`](#ca_file) + or [`ca_path`](#ca_path). By default, this is false, and Consul will not enforce + the use of TLS or verify a client's authenticity. To enable the HTTPS API, you + must define an HTTPS port via the [`ports`](#ports) configuration. By default, + HTTPS is disabled. + +- `verify_outgoing` - If set to true, Consul requires + that all outgoing connections from this agent make use of TLS and that the server + provides a certificate that is signed by a Certificate Authority from the [`ca_file`](#ca_file) + or [`ca_path`](#ca_path). By default, this is false, and Consul will not make use + of TLS for outgoing connections. This applies to clients and servers as both will + make outgoing connections. + + ~> **Security Note:** Note that servers that specify `verify_outgoing = true` will always talk to other servers over TLS, but they still _accept_ + non-TLS connections to allow for a transition of all clients to TLS. + Currently the only way to enforce that no client can communicate with a + server unencrypted is to also enable `verify_incoming` which requires client + certificates too. + +- `verify_server_hostname` - When set to true, Consul verifies the TLS certificate + presented by the servers match the hostname `server..`. + By default this is false, and Consul does not verify the hostname + of the certificate, only that it is signed by a trusted CA. This setting _must_ be enabled + to prevent a compromised client from gaining full read and write access to all + cluster data _including all ACL tokens and Connect CA root keys_. This is new in 0.5.1. + + ~> **Security Note:** From versions 0.5.1 to 1.4.0, due to a bug, setting + this flag alone _does not_ imply `verify_outgoing` and leaves client to server + and server to server RPCs unencrypted despite the documentation stating otherwise. See + [CVE-2018-19653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-19653) + for more details. For those versions you **must also set `verify_outgoing = true`** to ensure encrypted RPC connections. + +### Example Configuration File, with TLS + +~> **Security Note:** all three verify options should be set as `true` to enable secure mTLS communication, enabling both +encryption and authentication. Failing to set [`verify_incoming`](#verify_incoming) or [`verify_outgoing`](#verify_outgoing) +will result in TLS not being enabled at all, even when specifying a [`ca_file`](#ca_file), [`cert_file`](#cert_file), and [`key_file`](#key_file). + +```json +{ + "datacenter": "east-aws", + "data_dir": "/opt/consul", + "log_level": "INFO", + "node_name": "foobar", + "server": true, + "addresses": { + "https": "0.0.0.0" + }, + "ports": { + "https": 8501 + }, + "key_file": "/etc/pki/tls/private/my.key", + "cert_file": "/etc/pki/tls/certs/my.crt", + "ca_file": "/etc/pki/tls/certs/ca-bundle.crt", + "verify_incoming": true, + "verify_outgoing": true, + "verify_server_hostname": true +} +``` + +See, especially, the use of the `ports` setting: + +```json +"ports": { + "https": 8501 +} +``` + +Consul will not enable TLS for the HTTP API unless the `https` port has been +assigned a port number `> 0`. We recommend using `8501` for `https` as this +default will automatically work with some tooling. \ No newline at end of file diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx index 064165879..362cf36f6 100644 --- a/website/content/docs/agent/config/index.mdx +++ b/website/content/docs/agent/config/index.mdx @@ -43,2158 +43,6 @@ You can test the following configuration options by following the [Getting Started](https://learn.hashicorp.com/tutorials/consul/get-started-install?utm_source=consul.io&utm_medium=docs) tutorials to install a local agent. -## Configuration Files ((#configuration_files)) - -In addition to the command-line options, configuration for the Consul agent can be put into -files. This may be easier in certain situations, for example when Consul is -being configured using a configuration management system. - -The configuration files are formatted as HCL, or JSON. JSON formatted configs are easily readable -and editable by both humans and computers. JSON formatted configuration consists -of a single JSON object with multiple configuration keys specified within it. - -Configuration files are used for more than just setting up the agent. -They are also used to provide check and service definitions that -announce the availability of system servers to the rest of the cluster. -These definitions are documented separately under [check configuration](/docs/agent/checks) and -[service configuration](/docs/agent/services) respectively. Service and check -definitions support being updated during a reload. - - - -```hcl -datacenter = "east-aws" -data_dir = "/opt/consul" -log_level = "INFO" -node_name = "foobar" -server = true -watches = [ - { - type = "checks" - handler = "/usr/bin/health-check-handler.sh" - } -] - -telemetry { - statsite_address = "127.0.0.1:2180" -} -``` - -```json -{ - "datacenter": "east-aws", - "data_dir": "/opt/consul", - "log_level": "INFO", - "node_name": "foobar", - "server": true, - "watches": [ - { - "type": "checks", - "handler": "/usr/bin/health-check-handler.sh" - } - ], - "telemetry": { - "statsite_address": "127.0.0.1:2180" - } -} -``` - - - -#### Configuration Key Reference ((#config_key_reference)) - --> **Note:** All the TTL values described below are parsed by Go's `time` package, and have the following -[formatting specification](https://golang.org/pkg/time/#ParseDuration): "A -duration string is a possibly signed sequence of decimal numbers, each with -optional fraction and a unit suffix, such as '300ms', '-1.5h' or '2h45m'. -Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - -- `acl` ((#acl)) - This object allows a number of sub-keys to be set which - controls the ACL system. Configuring the ACL system within the ACL stanza was added - in Consul 1.4.0 - - The following sub-keys are available: - - - `enabled` ((#acl_enabled)) - Enables ACLs. - - - `policy_ttl` ((#acl_policy_ttl)) - Used to control Time-To-Live caching - of ACL policies. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL policy may be stale up to the TTL value. - - - `role_ttl` ((#acl_role_ttl)) - Used to control Time-To-Live caching - of ACL roles. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL role may be stale up to the TTL value. - - - `token_ttl` ((#acl_token_ttl)) - Used to control Time-To-Live caching - of ACL tokens. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL token may be stale up to the TTL value. - - - `down_policy` ((#acl_down_policy)) - Either "allow", "deny", "extend-cache" - or "async-cache"; "extend-cache" is the default. In the case that a policy or - token cannot be read from the [`primary_datacenter`](#primary_datacenter) or - leader node, the down policy is applied. In "allow" mode, all actions are permitted, - "deny" restricts all operations, and "extend-cache" allows any cached objects - to be used, ignoring the expiry time of the cached entry. If the request uses an - ACL that is not in the cache, "extend-cache" falls back to the behavior of - `default_policy`. - The value "async-cache" acts the same way as "extend-cache" - but performs updates asynchronously when ACL is present but its TTL is expired, - thus, if latency is bad between the primary and secondary datacenters, latency - of operations is not impacted. - - - `default_policy` ((#acl_default_policy)) - Either "allow" or "deny"; - defaults to "allow" but this will be changed in a future major release. The default - policy controls the behavior of a token when there is no matching rule. In "allow" - mode, ACLs are a denylist: any operation not specifically prohibited is allowed. - In "deny" mode, ACLs are an allowlist: any operation not specifically - allowed is blocked. **Note**: this will not take effect until you've enabled ACLs. - - - `enable_key_list_policy` ((#acl_enable_key_list_policy)) - Boolean value, defaults to false. - When true, the `list` permission will be required on the prefix being recursively read from the KV store. - Regardless of being enabled, the full set of KV entries under the prefix will be filtered - to remove any entries that the request's ACL token does not grant at least read - permissions. This option is only available in Consul 1.0 and newer. - - - `enable_token_replication` ((#acl_enable_token_replication)) - By default - secondary Consul datacenters will perform replication of only ACL policies and - roles. Setting this configuration will will enable ACL token replication and - allow for the creation of both [local tokens](/api-docs/acl/tokens#local) and - [auth methods](/docs/security/acl/auth-methods) in connected secondary datacenters. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - global tokens already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. - - - `enable_token_persistence` ((#acl_enable_token_persistence)) - Either - `true` or `false`. When `true` tokens set using the API will be persisted to - disk and reloaded when an agent restarts. - - - `tokens` ((#acl_tokens)) - This object holds all of the configured - ACL tokens for the agents usage. - - - `initial_management` ((#acl_tokens_initial_management)) - This is available in - Consul 1.11 and later. In prior versions, use [`acl.tokens.master`](#acl_tokens_master). - - Only used for servers in the [`primary_datacenter`](#primary_datacenter). - This token will be created with management-level permissions if it does not exist. - It allows operators to bootstrap the ACL system with a token Secret ID that is - well-known. - - The `initial_management` token is only installed when a server acquires cluster - leadership. If you would like to install or change it, set the new value for - `initial_management` in the configuration for all servers. Once this is done, - restart the current leader to force a leader election. If the `initial_management` - token is not supplied, then the servers do not create an initial management token. - When you provide a value, it should be a UUID. To maintain backwards compatibility - and an upgrade path this restriction is not currently enforced but will be in a - future major Consul release. - - - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to - [`acl.tokens.initial_management`](#acl_tokens_initial_management).** - - - `default` ((#acl_tokens_default)) - When provided, the agent will - use this token when making requests to the Consul servers. Clients can override - this token on a per-request basis by providing the "?token" query parameter. - When not provided, the empty token, which maps to the 'anonymous' ACL token, - is used. - - - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform - internal operations. If this isn't specified, then the - [`default`](#acl_tokens_default) will be used. - - This token must at least have write access to the node name it will - register as in order to set any of the node-level information in the - catalog such as metadata, or the node's tagged addresses. - - - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11 - and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master). - - Used to access [agent endpoints](/api-docs/agent) that require agent read or write privileges, - or node read privileges, even if Consul servers aren't present to validate any tokens. - This should only be used by operators during outages, regular ACL tokens should normally - be used by applications. - - - `agent_master` ((#acl_tokens_agent_master)) **Renamed in Consul 1.11 to - [`acl.tokens.agent_recovery`](#acl_tokens_agent_recovery).** - - - `replication` ((#acl_tokens_replication)) - The ACL token used to - authorize secondary datacenters with the primary datacenter for replication - operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - policies and roles already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. - - - `managed_service_provider` ((#acl_tokens_managed_service_provider)) - An - array of ACL tokens used by Consul managed service providers for cluster operations. - - - - ```hcl - managed_service_provider { - accessor_id = "ed22003b-0832-4e48-ac65-31de64e5c2ff" - secret_id = "cb6be010-bba8-4f30-a9ed-d347128dde17" - } - ``` - - ```json - "managed_service_provider": [ - { - "accessor_id": "ed22003b-0832-4e48-ac65-31de64e5c2ff", - "secret_id": "cb6be010-bba8-4f30-a9ed-d347128dde17" - } - ] - ``` - - - -- `acl_datacenter` - **This field is deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.** - - This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients, - it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement - of ACLs. Please review the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more details. - -- `acl_default_policy` ((#acl_default_policy_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.default_policy`](#acl_default_policy) field instead.** - Either "allow" or "deny"; defaults to "allow". The default policy controls the - behavior of a token when there is no matching rule. In "allow" mode, ACLs are a - denylist: any operation not specifically prohibited is allowed. In "deny" mode, - ACLs are an allowlist: any operation not specifically allowed is blocked. **Note**: - this will not take effect until you've set `primary_datacenter` to enable ACL support. - -- `acl_down_policy` ((#acl_down_policy_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.down_policy`](#acl_down_policy) field instead.** Either "allow", - "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the - case that the policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter) - or leader node, the down policy is applied. In "allow" mode, all actions are permitted, - "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be - used, ignoring their TTL values. If a non-cached ACL is used, "extend-cache" acts - like "deny". The value "async-cache" acts the same way as "extend-cache" but performs - updates asynchronously when ACL is present but its TTL is expired, thus, if latency - is bad between ACL authoritative and other datacenters, latency of operations is - not impacted. - -- `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated - in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master) - field instead.** Used to access [agent endpoints](/api-docs/agent) that - require agent read or write privileges, or node read privileges, even if Consul - servers aren't present to validate any tokens. This should only be used by operators - during outages, regular ACL tokens should normally be used by applications. This - was added in Consul 0.7.2 and is only used when [`acl_enforce_version_8`](#acl_enforce_version_8) is set to true. - -- `acl_agent_token` ((#acl_agent_token_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.tokens.agent`](#acl_tokens_agent) field instead.** Used for - clients and servers to perform internal operations. If this isn't specified, then - the [`acl_token`](#acl_token) will be used. This was added in Consul 0.7.2. - - This token must at least have write access to the node name it will register as in order to set any - of the node-level information in the catalog such as metadata, or the node's tagged addresses. - -- `acl_enforce_version_8` - **Deprecated in - Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should - occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2, - this defaults to false in versions of Consul prior to 0.8, and defaults to true - in Consul 0.8 and later. This helps ease the transition to the new ACL features - by allowing policies to be in place before enforcement begins. - -- `acl_master_token` ((#acl_master_token_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.tokens.master`](#acl_tokens_master) field instead.** - -- `acl_replication_token` ((#acl_replication_token_legacy)) - **Deprecated - in Consul 1.4.0. See the [`acl.tokens.replication`](#acl_tokens_replication) field - instead.** Only used for servers outside the [`primary_datacenter`](#primary_datacenter) - running Consul 0.7 or later. When provided, this will enable [ACL replication](https://learn.hashicorp.com/tutorials/consul/access-control-replication-multiple-datacenters) - using this ACL replication using this token to retrieve and replicate the ACLs - to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable - ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then - set the token later using the [agent token API](/api-docs/agent#update-acl-tokens) - on each server. If the `acl_replication_token` is set in the config, it will automatically - set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. - - If there's a partition or other outage affecting the authoritative datacenter, and the - [`acl_down_policy`](/docs/agent/options#acl_down_policy) is set to "extend-cache", tokens not - in the cache can be resolved during the outage using the replicated set of ACLs. - -- `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See - the [`acl.tokens.default`](#acl_tokens_default) field instead.** When provided, - the agent will use this token when making requests to the Consul servers. Clients - can override this token on a per-request basis by providing the "?token" query - parameter. When not provided, the empty token, which maps to the 'anonymous' ACL - policy, is used. - -- `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the - [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live - caching of ACLs. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL policy may be stale up to the TTL value. - -- `addresses` - This is a nested object that allows setting - bind addresses. In Consul 1.0 and later these can be set to a space-separated list - of addresses to bind to, or a [go-sockaddr] template that can potentially resolve to multiple addresses. - - `http`, `https` and `grpc` all support binding to a Unix domain socket. A - socket can be specified in the form `unix:///path/to/socket`. A new domain - socket will be created at the given path. If the specified file path already - exists, Consul will attempt to clear the file and create the domain socket - in its place. The permissions of the socket file are tunable via the - [`unix_sockets` config construct](#unix_sockets). - - When running Consul agent commands against Unix socket interfaces, use the - `-http-addr` argument to specify the path to the socket. You can also place - the desired values in the `CONSUL_HTTP_ADDR` environment variable. - - For TCP addresses, the environment variable value should be an IP address - _with the port_. For example: `10.0.0.1:8500` and not `10.0.0.1`. However, - ports are set separately in the [`ports`](#ports) structure when - defining them in a configuration file. - - The following keys are valid: - - - `dns` - The DNS server. Defaults to `client_addr` - - `http` - The HTTP API. Defaults to `client_addr` - - `https` - The HTTPS API. Defaults to `client_addr` - - `grpc` - The gRPC API. Defaults to `client_addr` - -- `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). - -- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). - -- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_reconnect_timeout` This is a per-agent setting of the [`reconnect_timeout`](#reconnect_timeout) parameter. - This agent will advertise to all other nodes in the cluster that after this timeout, the node may be completely - removed from the cluster. This may only be set on client agents and if unset then other nodes will use the main - `reconnect_timeout` setting when determining when this node may be removed from the cluster. - -- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](#_alt_domain) - -- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). - This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). - -- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). - -- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](#_serf_wan_bind). - -- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](#_serf_wan_allowed_cidrs). - -- `audit` - Added in Consul 1.8, the audit object allow users to enable auditing - and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](https://learn.hashicorp.com/tutorials/consul/audit-logging). - - - - ```hcl - audit { - enabled = true - sink "My sink" { - type = "file" - format = "json" - path = "data/audit/audit.json" - delivery_guarantee = "best-effort" - rotate_duration = "24h" - rotate_max_files = 15 - rotate_bytes = 25165824 - } - } - ``` - - ```json - { - "audit": { - "enabled": true, - "sink": { - "My sink": { - "type": "file", - "format": "json", - "path": "data/audit/audit.json", - "delivery_guarantee": "best-effort", - "rotate_duration": "24h", - "rotate_max_files": 15, - "rotate_bytes": 25165824 - } - } - } - } - ``` - - - - The following sub-keys are available: - - - `enabled` - Controls whether Consul logs out each time a user - performs an operation. ACLs must be enabled to use this feature. Defaults to `false`. - - - `sink` - This object provides configuration for the destination to which - Consul will log auditing events. Sink is an object containing keys to sink objects, where the key is the name of the sink. - - - `type` - Type specifies what kind of sink this is. - The following keys are valid: - - `file` - Currently only file sinks are available, they take the following keys. - - `format` - Format specifies what format the events will - be emitted with. - The following keys are valid: - - `json` - Currently only json events are offered. - - `path` - The directory and filename to write audit events to. - - `delivery_guarantee` - Specifies - the rules governing how audit events are written. - The following keys are valid: - - `best-effort` - Consul only supports `best-effort` event delivery. - - `mode` - The permissions to set on the audit log files. - - `rotate_duration` - Specifies the - interval by which the system rotates to a new log file. At least one of `rotate_duration` or `rotate_bytes` - must be configured to enable audit logging. - - `rotate_max_files` - Defines the - limit that Consul should follow before it deletes old log files. - - `rotate_bytes` - Specifies how large an - individual log file can grow before Consul rotates to a new file. At least one of `rotate_bytes` or - `rotate_duration` must be configured to enable audit logging. - -- `autopilot` Added in Consul 0.8, this object allows a - number of sub-keys to be set which can configure operator-friendly settings for - Consul servers. When these keys are provided as configuration, they will only be - respected on bootstrapping. If they are not provided, the defaults will be used. - In order to change the value of these options after bootstrapping, you will need - to use the [Consul Operator Autopilot](/commands/operator/autopilot) - command. For more information about Autopilot, review the [Autopilot tutorial](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations). - - The following sub-keys are available: - - - `cleanup_dead_servers` - This controls the - automatic removal of dead server nodes periodically and whenever a new server - is added to the cluster. Defaults to `true`. - - - `last_contact_threshold` - Controls the - maximum amount of time a server can go without contact from the leader before - being considered unhealthy. Must be a duration value such as `10s`. Defaults - to `200ms`. - - - `max_trailing_logs` - Controls the maximum number - of log entries that a server can trail the leader by before being considered - unhealthy. Defaults to 250. - - - `min_quorum` - Sets the minimum number of servers necessary - in a cluster. Autopilot will stop pruning dead servers when this minimum is reached. There is no default. - - - `server_stabilization_time` - Controls - the minimum amount of time a server must be stable in the 'healthy' state before - being added to the cluster. Only takes effect if all servers are running Raft - protocol version 3 or higher. Must be a duration value such as `30s`. Defaults - to `10s`. - - - `redundancy_zone_tag` - - This controls the [`-node-meta`](#_node_meta) key to use when Autopilot is separating - servers into zones for redundancy. Only one server in each zone can be a voting - member at one time. If left blank (the default), this feature will be disabled. - - - `disable_upgrade_migration` - - If set to `true`, this setting will disable Autopilot's upgrade migration strategy - in Consul Enterprise of waiting until enough newer-versioned servers have been - added to the cluster before promoting any of them to voters. Defaults to `false`. - - - `upgrade_version_tag` - - The node_meta tag to use for version info when performing upgrade migrations. - If this is not set, the Consul version will be used. - -- `auto_config` This object allows setting options for the `auto_config` feature. - - The following sub-keys are available: - - - `enabled` (Defaults to `false`) This option enables `auto_config` on a client - agent. When starting up but before joining the cluster, the client agent will - make an RPC to the configured server addresses to request configuration settings, - such as its `agent` ACL token, TLS certificates, Gossip encryption key as well - as other configuration settings. These configurations get merged in as defaults - with any user-supplied configuration on the client agent able to override them. - The initial RPC uses a JWT specified with either `intro_token`, - `intro_token_file` or the `CONSUL_INTRO_TOKEN` environment variable to authorize - the request. How the JWT token is verified is controlled by the `auto_config.authorizer` - object available for use on Consul servers. Enabling this option also turns - on Connect because it is vital for `auto_config`, more specifically the CA - and certificates infrastructure. - - ~> **Warning:** Enabling `auto_config` conflicts with the [`auto_encrypt.tls`](#tls) feature. - Only one option may be specified. - - - `intro_token` (Defaults to `""`) This specifies the JWT to use for the initial - `auto_config` RPC to the Consul servers. This can be overridden with the - `CONSUL_INTRO_TOKEN` environment variable - - - `intro_token_file` (Defaults to `""`) This specifies a file containing the JWT - to use for the initial `auto_config` RPC to the Consul servers. This token - from this file is only loaded if the `intro_token` configuration is unset as - well as the `CONSUL_INTRO_TOKEN` environment variable - - - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in - the local datacenter to use for the initial RPC. These addresses support - [Cloud Auto-Joining](#cloud-auto-joining) and can optionally include a port to - use when making the outbound connection. If not port is provided the `server_port` - will be used. - - - `dns_sans` (Defaults to `[]`) This is a list of extra DNS SANs to request in the - client agent's TLS certificate. The `localhost` DNS SAN is always requested. - - - `ip_sans` (Defaults to `[]`) This is a list of extra IP SANs to request in the - client agent's TLS certificate. The `::1` and `127.0.0.1` IP SANs are always requested. - - - `authorization` This object controls how a Consul server will authorize `auto_config` - requests and in particular how to verify the JWT intro token. - - - `enabled` (Defaults to `false`) This option enables `auto_config` authorization - capabilities on the server. - - - `static` This object controls configuring the static authorizer setup in the Consul - configuration file. Almost all sub-keys are identical to those provided by the [JWT - Auth Method](/docs/security/acl/auth-methods/jwt). - - - `jwt_validation_pub_keys` (Defaults to `[]`) A list of PEM-encoded public keys - to use to authenticate signatures locally. - - Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. - - - `oidc_discovery_url` (Defaults to `""`) The OIDC Discovery URL, without any - .well-known component (base path). - - Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. - - - `oidc_discovery_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS - client used to talk with the OIDC Discovery URL. NOTE: Every line must end - with a newline (`\n`). If not set, system certificates are used. - - - `jwks_url` (Defaults to `""`) The JWKS URL to use to authenticate signatures. - - Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. - - - `jwks_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS client - used to talk with the JWKS URL. NOTE: Every line must end with a newline - (`\n`). If not set, system certificates are used. - - - `claim_mappings` (Defaults to `(map[string]string)` Mappings of claims (key) that - will be copied to a metadata field (value). Use this if the claim you are capturing - is singular (such as an attribute). - - When mapped, the values can be any of a number, string, or boolean and will - all be stringified when returned. - - - `list_claim_mappings` (Defaults to `(map[string]string)`) Mappings of claims (key) - will be copied to a metadata field (value). Use this if the claim you are capturing - is list-like (such as groups). - - When mapped, the values in each list can be any of a number, string, or - boolean and will all be stringified when returned. - - - `jwt_supported_algs` (Defaults to `["RS256"]`) JWTSupportedAlgs is a list of - supported signing algorithms. - - - `bound_audiences` (Defaults to `[]`) List of `aud` claims that are valid for - login; any match is sufficient. - - - `bound_issuer` (Defaults to `""`) The value against which to match the `iss` - claim in a JWT. - - - `expiration_leeway` (Defaults to `"0s"`) Duration of leeway when - validating expiration of a token to account for clock skew. Defaults to 150s - (2.5 minutes) if set to 0s and can be disabled if set to -1ns. - - - `not_before_leeway` (Defaults to `"0s"`) Duration of leeway when - validating not before values of a token to account for clock skew. Defaults - to 150s (2.5 minutes) if set to 0s and can be disabled if set to -1. - - - `clock_skew_leeway` (Defaults to `"0s"`) Duration of leeway when - validating all claims to account for clock skew. Defaults to 60s (1 minute) - if set to 0s and can be disabled if set to -1ns. - - - `claim_assertions` (Defaults to `[]`) List of assertions about the mapped - claims required to authorize the incoming RPC request. The syntax uses - [github.com/hashicorp/go-bexpr](https://github.com/hashicorp/go-bexpr) which is shared with the - [API filtering feature](/api-docs/features/filtering). For example, the following - configurations when combined will ensure that the JWT `sub` matches the node - name requested by the client. - - - - ```hcl - claim_mappings { - sub = "node_name" - } - claim_assertions = [ - "value.node_name == \"${node}\"" - ] - ``` - - ```json - { - "claim_mappings": { - "sub": "node_name" - }, - "claim_assertions": ["value.node_name == \"${node}\""] - } - ``` - - - - The assertions are lightly templated using [HIL syntax](https://github.com/hashicorp/hil) - to interpolate some values from the RPC request. The list of variables that can be interpolated - are: - - - `node` - The node name the client agent is requesting. - - - `segment` - The network segment name the client is requesting. - - - `partition` - The admin partition name the client is requesting. - -- `auto_encrypt` This object allows setting options for the `auto_encrypt` feature. - - The following sub-keys are available: - - - `allow_tls` (Defaults to `false`) This option enables - `auto_encrypt` on the servers and allows them to automatically distribute certificates - from the Connect CA to the clients. If enabled, the server can accept incoming - connections from both the built-in CA and the Connect CA, as well as their certificates. - Note, the server will only present the built-in CA and certificate, which the - client can verify using the CA it received from `auto_encrypt` endpoint. If disabled, - a client configured with `auto_encrypt.tls` will be unable to start. - - - `tls` (Defaults to `false`) Allows the client to request the - Connect CA and certificates from the servers, for encrypting RPC communication. - The client will make the request to any servers listed in the `-join` or `-retry-join` - option. This requires that every server to have `auto_encrypt.allow_tls` enabled. - When both `auto_encrypt` options are used, it allows clients to receive certificates - that are generated on the servers. If the `-server-port` is not the default one, - it has to be provided to the client as well. Usually this is discovered through - LAN gossip, but `auto_encrypt` provision happens before the information can be - distributed through gossip. The most secure `auto_encrypt` setup is when the - client is provided with the built-in CA, `verify_server_hostname` is turned on, - and when an ACL token with `node.write` permissions is setup. It is also possible - to use `auto_encrypt` with a CA and ACL, but without `verify_server_hostname`, - or only with a ACL enabled, or only with CA and `verify_server_hostname`, or - only with a CA, or finally without a CA and without ACL enabled. In any case, - the communication to the `auto_encrypt` endpoint is always TLS encrypted. - - ~> **Warning:** Enabling `auto_encrypt.tls` conflicts with the [`auto_config`](#auto_config) feature. - Only one option may be specified. - - - `dns_san` (Defaults to `[]`) When this option is being - used, the certificates requested by `auto_encrypt` from the server have these - `dns_san` set as DNS SAN. - - - `ip_san` (Defaults to `[]`) When this option is being used, - the certificates requested by `auto_encrypt` from the server have these `ip_san` - set as IP SAN. - -- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](#_bootstrap). - -- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](#_bootstrap_expect). - -- `bind_addr` Equivalent to the [`-bind` command-line flag](#_bind). - - This parameter can be set to a go-sockaddr template that resolves to a single - address. Special characters such as backslashes `\` or double quotes `"` - within a double quoted string value must be escaped with a backslash `\`. - Some example templates: - - - -```hcl -bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}" -``` - -```json -{ - "bind_addr": "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}" -} -``` - - - -- `cache` configuration for client agents. The configurable values are the following: - - - `entry_fetch_max_burst` The size of the token bucket used to recharge the rate-limit per - cache entry. The default value is 2 and means that when cache has not been updated - for a long time, 2 successive queries can be made as long as the rate-limit is not - reached. - - - `entry_fetch_rate` configures the rate-limit at which the cache may refresh a single - entry. On a cluster with many changes/s, watching changes in the cache might put high - pressure on the servers. This ensures the number of requests for a single cache entry - will never go beyond this limit, even when a given service changes every 1/100s. - Since this is a per cache entry limit, having a highly unstable service will only rate - limit the watched on this service, but not the other services/entries. - The value is strictly positive, expressed in queries per second as a float, - 1 means 1 query per second, 0.1 mean 1 request every 10s maximum. - The default value is "No limit" and should be tuned on large - clusters to avoid performing too many RPCs on entries changing a lot. - -- `check_update_interval` ((#check_update_interval)) - This interval controls how often check output from checks in a steady state is - synchronized with the server. By default, this is set to 5 minutes ("5m"). Many - checks which are in a steady state produce slightly different output per run (timestamps, - etc) which cause constant writes. This configuration allows deferring the sync - of check output for a given interval to reduce write pressure. If a check ever - changes state, the new state and associated output is synchronized immediately. - To disable this behavior, set the value to "0s". - -- `client_addr` Equivalent to the [`-client` command-line flag](#_client). - -- `config_entries` This object allows setting options for centralized config entries. - - The following sub-keys are available: - - - `bootstrap` ((#config_entries_bootstrap)) - This is a list of inlined config entries to insert into the state store when - the Consul server gains leadership. This option is only applicable to server - nodes. Each bootstrap entry will be created only if it does not exist. When reloading, - any new entries that have been added to the configuration will be processed. - See the [configuration entry docs](/docs/agent/config-entries) for more - details about the contents of each entry. - -- `connect` This object allows setting options for the Connect feature. - - The following sub-keys are available: - - - `enabled` ((#connect_enabled)) Controls whether Connect features are - enabled on this agent. Should be enabled on all servers in the cluster - in order for Connect to function properly. Defaults to false. - - - `enable_mesh_gateway_wan_federation` ((#connect_enable_mesh_gateway_wan_federation)) Controls whether cross-datacenter federation traffic between servers is funneled - through mesh gateways. Defaults to false. This was added in Consul 1.8.0. - - - `ca_provider` ((#connect_ca_provider)) Controls which CA provider to - use for Connect's CA. Currently only the `aws-pca`, `consul`, and `vault` providers are supported. - This is only used when initially bootstrapping the cluster. For an existing cluster, - use the [Update CA Configuration Endpoint](/api-docs/connect/ca#update-ca-configuration). - - - `ca_config` ((#connect_ca_config)) An object which allows setting different - config options based on the CA provider chosen. This is only used when initially - bootstrapping the cluster. For an existing cluster, use the [Update CA Configuration - Endpoint](/api-docs/connect/ca#update-ca-configuration). - - The following providers are supported: - - #### AWS ACM Private CA Provider (`ca_provider = "aws-pca"`) - - - `existing_arn` ((#aws_ca_existing_arn)) The Amazon Resource Name (ARN) of - an existing private CA in your ACM account. If specified, Consul will - attempt to use the existing CA to issue certificates. - - #### Consul CA Provider (`ca_provider = "consul"`) - - - `private_key` ((#consul_ca_private_key)) The PEM contents of the - private key to use for the CA. - - - `root_cert` ((#consul_ca_root_cert)) The PEM contents of the root - certificate to use for the CA. - - #### Vault CA Provider (`ca_provider = "vault"`) - - - `address` ((#vault_ca_address)) The address of the Vault server to - connect to. - - - `token` ((#vault_ca_token)) The Vault token to use. In Consul 1.8.5 and later, if - the token has the [renewable](https://www.vaultproject.io/api-docs/auth/token#renewable) - flag set, Consul will attempt to renew its lease periodically after half the - duration has expired. - - - `root_pki_path` ((#vault_ca_root_pki)) The path to use for the root - CA pki backend in Vault. This can be an existing backend with a CA already - configured, or a blank/unmounted backend in which case Connect will automatically - mount/generate the CA. The Vault token given above must have `sudo` access - to this backend, as well as permission to mount the backend at this path if - it is not already mounted. - - - `intermediate_pki_path` ((#vault_ca_intermediate_pki)) - The path to use for the temporary intermediate CA pki backend in Vault. **Connect - will overwrite any data at this path in order to generate a temporary intermediate - CA**. The Vault token given above must have `write` access to this backend, - as well as permission to mount the backend at this path if it is not already - mounted. - - - `auth_method` ((#vault_ca_auth_method)) - Vault auth method to use for logging in to Vault. - Please see [Vault Auth Methods](https://www.vaultproject.io/docs/auth) for more information - on how to configure individual auth methods. If auth method is provided, Consul will obtain a - new token from Vault when the token can no longer be renewed. - - - `type` The type of Vault auth method. - - - `mount_path` The mount path of the auth method. - If not provided the auth method type will be used as the mount path. - - - `params` The parameters to configure the auth method. - Please see [Vault Auth Methods](https://www.vaultproject.io/docs/auth) for information on how - to configure the auth method you wish to use. If using the Kubernetes auth method, Consul will - read the service account token from the default mount path `/var/run/secrets/kubernetes.io/serviceaccount/token` - if the `jwt` parameter is not provided. - -#### Common CA Config Options - -There are also a number of common configuration options supported by all providers: - - - `csr_max_concurrent` ((#ca_csr_max_concurrent)) Sets a limit on the number - of Certificate Signing Requests that can be processed concurrently. Defaults - to 0 (disabled). This is useful when you want to limit the number of CPU cores - available to the server for certificate signing operations. For example, on an - 8 core server, setting this to 1 will ensure that no more than one CPU core - will be consumed when generating or rotating certificates. Setting this is - recommended **instead** of `csr_max_per_second` when you want to limit the - number of cores consumed since it is simpler to reason about limiting CSR - resources this way without artificially slowing down rotations. Added in 1.4.1. - - - `csr_max_per_second` ((#ca_csr_max_per_second)) Sets a rate limit - on the maximum number of Certificate Signing Requests (CSRs) the servers will - accept. This is used to prevent CA rotation from causing unbounded CPU usage - on servers. It defaults to 50 which is conservative – a 2017 Macbook can process - about 100 per second using only ~40% of one CPU core – but sufficient for deployments - up to ~1500 service instances before the time it takes to rotate is impacted. - For larger deployments we recommend increasing this based on the expected number - of server instances and server resources, or use `csr_max_concurrent` instead - if servers have more than one CPU core. Setting this to zero disables rate limiting. - Added in 1.4.1. - - - `leaf_cert_ttl` ((#ca_leaf_cert_ttl)) Specifies the upper bound on the expiry - of a leaf certificate issued for a service. In most cases a new leaf - certificate will be requested by a proxy before this limit is reached. This - is also the effective limit on how long a server outage can last (with no leader) - before network connections will start being rejected. Defaults to `72h`. - - You can specify a range from one hour (minimum) up to one year (maximum) using - the following units: `h`, `m`, `s`, `ms`, `us` (or `µs`), `ns`, or a combination - of those units, e.g. `1h5m`. - - This value is also used when rotating out old root certificates from - the cluster. When a root certificate has been inactive (rotated out) - for more than twice the _current_ `leaf_cert_ttl`, it will be removed - from the trusted list. - - - `intermediate_cert_ttl` ((#ca_intermediate_cert_ttl)) Specifies the expiry for the - intermediate certificates. Defaults to `8760h` (1 year). Must be at least 3 times `leaf_cert_ttl`. - - - `root_cert_ttl` ((#ca_root_cert_ttl)) Specifies the expiry for a root certificate. - Defaults to 10 years as `87600h`. This value, if provided, needs to be higher than the - intermediate certificate TTL. - - This setting applies to all Consul CA providers. - - For the Vault provider, this value is only used if the backend is not initialized at first. - - This value is also applied on the `ca set-config` command. - - - `private_key_type` ((#ca_private_key_type)) The type of key to generate - for this CA. This is only used when the provider is generating a new key. If - `private_key` is set for the Consul provider, or existing root or intermediate - PKI paths given for Vault then this will be ignored. Currently supported options - are `ec` or `rsa`. Default is `ec`. - - It is required that all servers in a datacenter have - the same config for the CA. It is recommended that servers in - different datacenters use the same key type and size, - although the built-in CA and Vault provider will both allow mixed CA - key types. - - Some CA providers (currently Vault) will not allow cross-signing a - new CA certificate with a different key type. This means that if you - migrate from an RSA-keyed Vault CA to an EC-keyed CA from any - provider, you may have to proceed without cross-signing which risks - temporary connection issues for workloads during the new certificate - rollout. We highly recommend testing this outside of production to - understand the impact and suggest sticking to same key type where - possible. - - Note that this only affects _CA_ keys generated by the provider. - Leaf certificate keys are always EC 256 regardless of the CA - configuration. - - - `private_key_bits` ((#ca_private_key_bits)) The length of key to - generate for this CA. This is only used when the provider is generating a new - key. If `private_key` is set for the Consul provider, or existing root or intermediate - PKI paths given for Vault then this will be ignored. - - Currently supported values are: - - - `private_key_type = ec` (default): `224, 256, 384, 521` - corresponding to the NIST P-\* curves of the same name. - - `private_key_type = rsa`: `2048, 4096` - -- `datacenter` Equivalent to the [`-datacenter` command-line flag](#_datacenter). - -- `data_dir` Equivalent to the [`-data-dir` command-line flag](#_data_dir). - -- `disable_anonymous_signature` Disables providing an anonymous - signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). - -- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](#_disable_host_node_id). - -- `disable_http_unprintable_char_filter` Defaults to false. Consul 1.0.3 fixed a potential security vulnerability where malicious users could craft KV keys with unprintable chars that would confuse operators using the CLI or UI into taking wrong actions. Users who had data written in older versions of Consul that did not have this restriction will be unable to delete those values by default in 1.0.3 or later. This setting enables those users to **temporarily** disable the filter such that delete operations can work on those keys again to get back to a healthy state. It is strongly recommended that this filter is not disabled permanently as it exposes the original security vulnerability. - -- `disable_remote_exec` Disables support for remote execution. When set to true, the agent will ignore - any incoming remote exec requests. In versions of Consul prior to 0.8, this defaulted - to false. In Consul 0.8 the default was changed to true, to make remote exec opt-in - instead of opt-out. - -- `disable_update_check` Disables automatic checking for security bulletins and new version releases. This is disabled in Consul Enterprise. - -- `discard_check_output` Discards the output of health checks before storing them. This reduces the number of writes to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ... - -- `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is - equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server can handle the service discovery request. If a Consul server is behind the leader by more than `discovery_max_stale`, the query will be re-evaluated on the leader to get more up-to-date results. Consul agents also add a new `X-Consul-Effective-Consistency` response header which indicates if the agent did a stale read. `discover-max-stale` was introduced in Consul 1.0.7 as a way for Consul operators to force stale requests from clients at the agent level, and defaults to zero which matches default consistency behavior in earlier Consul versions. - -- `dns_config` This object allows a number of sub-keys - to be set which can tune how DNS queries are serviced. Check the tutorial on [DNS caching](https://learn.hashicorp.com/tutorials/consul/dns-caching) for more detail. - - The following sub-keys are available: - - - `allow_stale` - Enables a stale query for DNS information. - This allows any Consul server, rather than only the leader, to service the request. - The advantage of this is you get linear read scalability with Consul servers. - In versions of Consul prior to 0.7, this defaulted to false, meaning all requests - are serviced by the leader, providing stronger consistency but less throughput - and higher latency. In Consul 0.7 and later, this defaults to true for better - utilization of available servers. - - - `max_stale` - When [`allow_stale`](#allow_stale) is - specified, this is used to limit how stale results are allowed to be. If a Consul - server is behind the leader by more than `max_stale`, the query will be re-evaluated - on the leader to get more up-to-date results. Prior to Consul 0.7.1 this defaulted - to 5 seconds; in Consul 0.7.1 and later this defaults to 10 years ("87600h") - which effectively allows DNS queries to be answered by any server, no matter - how stale. In practice, servers are usually only milliseconds behind the leader, - so this lets Consul continue serving requests in long outage scenarios where - no leader can be elected. - - - `node_ttl` - By default, this is "0s", so all node lookups - are served with a 0 TTL value. DNS caching for node lookups can be enabled by - setting this value. This should be specified with the "s" suffix for second or - "m" for minute. - - - `service_ttl` - This is a sub-object which allows - for setting a TTL on service lookups with a per-service policy. The "\*" wildcard - service can be used when there is no specific policy available for a service. - By default, all services are served with a 0 TTL value. DNS caching for service - lookups can be enabled by setting this value. - - - `enable_truncate` - If set to true, a UDP DNS - query that would return more than 3 records, or more than would fit into a valid - UDP response, will set the truncated flag, indicating to clients that they should - re-query using TCP to get the full set of records. - - - `only_passing` - If set to true, any nodes whose - health checks are warning or critical will be excluded from DNS results. If false, - the default, only nodes whose health checks are failing as critical will be excluded. - For service lookups, the health checks of the node itself, as well as the service-specific - checks are considered. For example, if a node has a health check that is critical - then all services on that node will be excluded because they are also considered - critical. - - - `recursor_strategy` - If set to `sequential`, Consul will query recursors in the - order listed in the [`recursors`](#recursors) option. If set to `random`, - Consul will query an upstream DNS resolvers in a random order. Defaults to - `sequential`. - - - `recursor_timeout` - Timeout used by Consul when - recursively querying an upstream DNS server. See [`recursors`](#recursors) for more details. Default is 2s. This is available in Consul 0.7 and later. - - - `disable_compression` - If set to true, DNS - responses will not be compressed. Compression was added and enabled by default - in Consul 0.7. - - - `udp_answer_limit` - Limit the number of resource - records contained in the answer section of a UDP-based DNS response. This parameter - applies only to UDP DNS queries that are less than 512 bytes. This setting is - deprecated and replaced in Consul 1.0.7 by [`a_record_limit`](#a_record_limit). - - - `a_record_limit` - Limit the number of resource - records contained in the answer section of a A, AAAA or ANY DNS response (both - TCP and UDP). When answering a question, Consul will use the complete list of - matching hosts, shuffle the list randomly, and then limit the number of answers - to `a_record_limit` (default: no limit). This limit does not apply to SRV records. - - In environments where [RFC 3484 Section 6](https://tools.ietf.org/html/rfc3484#section-6) Rule 9 - is implemented and enforced (i.e. DNS answers are always sorted and - therefore never random), clients may need to set this value to `1` to - preserve the expected randomized distribution behavior (note: - [RFC 3484](https://tools.ietf.org/html/rfc3484) has been obsoleted by - [RFC 6724](https://tools.ietf.org/html/rfc6724) and as a result it should - be increasingly uncommon to need to change this value with modern - resolvers). - - - `enable_additional_node_meta_txt` - When set to true, Consul - will add TXT records for Node metadata into the Additional section of the DNS responses for several query types such as SRV queries. When set to false those records are not emitted. This does not impact the behavior of those same TXT records when they would be added to the Answer section of the response like when querying with type TXT or ANY. This defaults to true. - - - `soa` Allow to tune the setting set up in SOA. Non specified - values fallback to their default values, all values are integers and expressed - as seconds. - - The following settings are available: - - - `expire` ((#soa_expire)) - Configure SOA Expire duration in seconds, - default value is 86400, ie: 24 hours. - - - `min_ttl` ((#soa_min_ttl)) - Configure SOA DNS minimum TTL. As explained - in [RFC-2308](https://tools.ietf.org/html/rfc2308) this also controls negative - cache TTL in most implementations. Default value is 0, ie: no minimum delay - or negative TTL. - - - `refresh` ((#soa_refresh)) - Configure SOA Refresh duration in seconds, - default value is `3600`, ie: 1 hour. - - - `retry` ((#soa_retry)) - Configures the Retry duration expressed - in seconds, default value is 600, ie: 10 minutes. - - - `use_cache` ((#dns_use_cache)) - When set to true, DNS resolution will - use the agent cache described in [agent caching](/api-docs/features/caching). - This setting affects all service and prepared queries DNS requests. Implies [`allow_stale`](#allow_stale) - - - `cache_max_age` ((#dns_cache_max_age)) - When [use_cache](#dns_use_cache) - is enabled, the agent will attempt to re-fetch the result from the servers if - the cached value is older than this duration. See: [agent caching](/api-docs/features/caching). - - **Note** that unlike the `max-age` HTTP header, a value of 0 for this field is - equivalent to "no max age". To get a fresh value from the cache use a very small value - of `1ns` instead of 0. - - - `prefer_namespace` ((#dns_prefer_namespace)) **Deprecated in - Consul 1.11. Use the [canonical DNS format](/docs/discovery/dns#namespaced-partitioned-services) instead.** - - When set to true, in a DNS query for a service, the label between the domain - and the `service` label will be treated as a namespace name instead of a datacenter. - When set to false, the default, the behavior will be the same as non-Enterprise - versions and will assume the label is the datacenter. See: [this section](/docs/discovery/dns#namespaced-services) - for more details. - -- `domain` Equivalent to the [`-domain` command-line flag](#_domain). - -- `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.** - When set on a Consul server, enables ACL replication without having to set - the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication - and then introduce the token using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. - See [`acl_replication_token`](#acl_replication_token) for more details. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - policies and roles already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. - -- `enable_agent_tls_for_checks` When set, uses a subset of the agent's TLS configuration (`key_file`, - `cert_file`, `ca_file`, `ca_path`, and `server_name`) to set up the client for HTTP or gRPC health checks. This allows services requiring 2-way TLS to be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false. - -- `enable_central_service_config` When set, the Consul agent will look for any - [centralized service configuration](/docs/agent/config-entries) - that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. - This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. - -- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to - access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. - -- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](#_enable_script_checks). - - ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. - - ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) - for more details. - -- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](#_enable_local_script_checks). - -- `enable_syslog` Equivalent to the [`-syslog` command-line flag](#_syslog). - -- `encrypt` Equivalent to the [`-encrypt` command-line flag](#_encrypt). - -- `encrypt_verify_incoming` - This is an optional - parameter that can be used to disable enforcing encryption for incoming gossip - in order to upshift from unencrypted to encrypted gossip on a running cluster. - See [this section](/docs/security/encryption#configuring-gossip-encryption-on-an-existing-cluster) - for more information. Defaults to true. - -- `encrypt_verify_outgoing` - This is an optional - parameter that can be used to disable enforcing encryption for outgoing gossip - in order to upshift from unencrypted to encrypted gossip on a running cluster. - See [this section](/docs/security/encryption#configuring-gossip-encryption-on-an-existing-cluster) - for more information. Defaults to true. - -- `disable_keyring_file` - Equivalent to the - [`-disable-keyring-file` command-line flag](#_disable_keyring_file). - -- `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). - When network coordinates are disabled the `near` query param will not work to sort the nodes, - and the [`consul rtt`](/commands/rtt) command will not be able to provide round trip time between nodes. - -- `gossip_lan` - **(Advanced)** This object contains a - number of sub-keys which can be set to tune the LAN gossip communications. These - are only provided for users running especially large clusters that need fine tuning - and are prepared to spend significant effort correctly tuning them for their environment - and workload. **Tuning these improperly can cause Consul to fail in unexpected - ways**. The default values are appropriate in almost all deployments. - - - `gossip_nodes` - The number of random nodes to send - gossip messages to per gossip_interval. Increasing this number causes the gossip - messages to propagate across the cluster more quickly at the expense of increased - bandwidth. The default is 3. - - - `gossip_interval` - The interval between sending - messages that need to be gossiped that haven't been able to piggyback on probing - messages. If this is set to zero, non-piggyback gossip is disabled. By lowering - this value (more frequent) gossip messages are propagated across the cluster - more quickly at the expense of increased bandwidth. The default is 200ms. - - - `probe_interval` - The interval between random - node probes. Setting this lower (more frequent) will cause the cluster to detect - failed nodes more quickly at the expense of increased bandwidth usage. The default - is 1s. - - - `probe_timeout` - The timeout to wait for an ack - from a probed node before assuming it is unhealthy. This should be at least the - 99-percentile of RTT (round-trip time) on your network. The default is 500ms - and is a conservative value suitable for almost all realistic deployments. - - - `retransmit_mult` - The multiplier for the number - of retransmissions that are attempted for messages broadcasted over gossip. The - number of retransmits is scaled using this multiplier and the cluster size. The - higher the multiplier, the more likely a failed broadcast is to converge at the - expense of increased bandwidth. The default is 4. - - - `suspicion_mult` - The multiplier for determining - the time an inaccessible node is considered suspect before declaring it dead. - The timeout is scaled with the cluster size and the probe_interval. This allows - the timeout to scale properly with expected propagation delay with a larger cluster - size. The higher the multiplier, the longer an inaccessible node is considered - part of the cluster before declaring it dead, giving that suspect node more time - to refute if it is indeed still alive. The default is 4. - -- `gossip_wan` - **(Advanced)** This object contains a - number of sub-keys which can be set to tune the WAN gossip communications. These - are only provided for users running especially large clusters that need fine tuning - and are prepared to spend significant effort correctly tuning them for their environment - and workload. **Tuning these improperly can cause Consul to fail in unexpected - ways**. The default values are appropriate in almost all deployments. - - - `gossip_nodes` - The number of random nodes to send - gossip messages to per gossip_interval. Increasing this number causes the gossip - messages to propagate across the cluster more quickly at the expense of increased - bandwidth. The default is 4. - - - `gossip_interval` - The interval between sending - messages that need to be gossiped that haven't been able to piggyback on probing - messages. If this is set to zero, non-piggyback gossip is disabled. By lowering - this value (more frequent) gossip messages are propagated across the cluster - more quickly at the expense of increased bandwidth. The default is 500ms. - - - `probe_interval` - The interval between random - node probes. Setting this lower (more frequent) will cause the cluster to detect - failed nodes more quickly at the expense of increased bandwidth usage. The default - is 5s. - - - `probe_timeout` - The timeout to wait for an ack - from a probed node before assuming it is unhealthy. This should be at least the - 99-percentile of RTT (round-trip time) on your network. The default is 3s - and is a conservative value suitable for almost all realistic deployments. - - - `retransmit_mult` - The multiplier for the number - of retransmissions that are attempted for messages broadcasted over gossip. The - number of retransmits is scaled using this multiplier and the cluster size. The - higher the multiplier, the more likely a failed broadcast is to converge at the - expense of increased bandwidth. The default is 4. - - - `suspicion_mult` - The multiplier for determining - the time an inaccessible node is considered suspect before declaring it dead. - The timeout is scaled with the cluster size and the probe_interval. This allows - the timeout to scale properly with expected propagation delay with a larger cluster - size. The higher the multiplier, the longer an inaccessible node is considered - part of the cluster before declaring it dead, giving that suspect node more time - to refute if it is indeed still alive. The default is 6. - -- `http_config` This object allows setting options for the HTTP API and UI. - - The following sub-keys are available: - - - `block_endpoints` - This object is a list of HTTP API endpoint prefixes to block on the agent, and - defaults to an empty list, meaning all endpoints are enabled. Any endpoint that - has a common prefix with one of the entries on this list will be blocked and - will return a 403 response code when accessed. For example, to block all of the - V1 ACL endpoints, set this to `["/v1/acl"]`, which will block `/v1/acl/create`, - `/v1/acl/update`, and the other ACL endpoints that begin with `/v1/acl`. This - only works with API endpoints, not `/ui` or `/debug`, those must be disabled - with their respective configuration options. Any CLI commands that use disabled - endpoints will no longer function as well. For more general access control, Consul's - [ACL system](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) - should be used, but this option is useful for removing access to HTTP API endpoints - completely, or on specific agents. This is available in Consul 0.9.0 and later. - - - `response_headers` This object allows adding headers to the HTTP API and UI responses. For example, the following config can be used to enable [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) on the HTTP API endpoints: - - - - ```hcl - http_config { - response_headers { - Access-Control-Allow-Origin = "*" - } - } - ``` - - ```json - { - "http_config": { - "response_headers": { - "Access-Control-Allow-Origin": "*" - } - } - } - ``` - - - - - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]` - - - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api-docs/features/caching) to answer the request. Even when the url parameter is provided. - - - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte. - -- `leave_on_terminate` If enabled, when the agent receives a TERM signal, it will send a `Leave` message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server (prior to Consul 0.7 the default value was unconditionally set to `false`). On agents in client-mode, this defaults to `true` and for agents in server-mode, this defaults to `false`. - -- `license_path` This specifies the path to a file that contains the Consul Enterprise license. Alternatively the license may also be specified in either the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. See the [licensing documentation](/docs/enterprise/license/overview) for more information about Consul Enterprise license management. Added in versions 1.10.0, 1.9.7 and 1.8.13. Prior to version 1.10.0 the value may be set for all agents to facilitate forwards compatibility with 1.10 but will only actually be used by client agents. - -- `limits` Available in Consul 0.9.3 and later, this is a nested - object that configures limits that are enforced by the agent. Prior to Consul 1.5.2, - this only applied to agents in client mode, not Consul servers. The following parameters - are available: - - - `http_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single client IP address is allowed to open to the agent's HTTP(S) server. This affects the HTTP(S) servers in both client and server agents. Default value is `200`. - - `https_handshake_timeout` - Configures the limit for how long the HTTPS server in both client and server agents will wait for a client to complete a TLS handshake. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). Default value is `5s`. - - `rpc_handshake_timeout` - Configures the limit for how long servers will wait after a client TCP connection is established before they complete the connection handshake. When TLS is used, the same timeout applies to the TLS handshake separately from the initial protocol negotiation. All Consul clients should perform this immediately on establishing a new connection. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). When `verify_incoming` is true on servers, this limits how long the connection socket and associated goroutines will be held open before the client successfully authenticates. Default value is `5s`. - - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though. - - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting. - - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate. - - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api-docs/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. - - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api-docs/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. - -- `log_file` Equivalent to the [`-log-file` command-line flag](#_log_file). - -- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](#_log_rotate_duration). - -- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](#_log_rotate_bytes). - -- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](#_log_rotate_max_files). - -- `log_level` Equivalent to the [`-log-level` command-line flag](#_log_level). - -- `auto-reload-config` Equivalent to the [`-auto-reload-config` command-line flag](#_auto_reload_config). - -- `log_json` Equivalent to the [`-log-json` command-line flag](#_log_json). - -- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). - -- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](#_max_query_time). - -- `node_id` Equivalent to the [`-node-id` command-line flag](#_node_id). - -- `node_name` Equivalent to the [`-node` command-line flag](#_node). - -- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](#_node_meta) for more information. - - - - ```hcl - node_meta { - instance_type = "t2.medium" - } - ``` - - ```json - { - "node_meta": { - "instance_type": "t2.medium" - } - } - ``` - - - -- `partition` - This flag is used to set - the name of the admin partition the agent belongs to. An agent can only join - and communicate with other agents within its admin partition. Review the - [Admin Partitions documentation](/docs/enterprise/admin-partitions) for more - details. By default, this is an empty string, which is the `default` admin - partition. This cannot be set on a server agent. - - ~> **Warning:** The `partition` option cannot be used either the - [`segment`](#segment-2) option or [`-segment`](#_segment) flag. - -- `performance` Available in Consul 0.7 and later, this is a nested object that allows tuning the performance of different subsystems in Consul. See the [Server Performance](/docs/install/performance) documentation for more details. The following parameters are available: - - - `leave_drain_time` - A duration that a server will dwell during a graceful leave in order to allow requests to be retried against other Consul servers. Under normal circumstances, this can prevent clients from experiencing "no leader" errors when performing a rolling update of the Consul servers. This was added in Consul 1.0. Must be a duration value such as 10s. Defaults to 5s. - - - `raft_multiplier` - An integer multiplier used by Consul servers to scale key Raft timing parameters. Omitting this value or setting it to 0 uses default timing described below. Lower values are used to tighten timing and increase sensitivity while higher values relax timings and reduce sensitivity. Tuning this affects the time it takes Consul to detect leader failures and to perform leader elections, at the expense of requiring more network and CPU resources for better performance. - - By default, Consul will use a lower-performance timing that's suitable - for [minimal Consul servers](/docs/install/performance#minimum), currently equivalent - to setting this to a value of 5 (this default may be changed in future versions of Consul, - depending if the target minimum server profile changes). Setting this to a value of 1 will - configure Raft to its highest-performance mode, equivalent to the default timing of Consul - prior to 0.7, and is recommended for [production Consul servers](/docs/install/performance#production). - - See the note on [last contact](/docs/install/performance#production-server-requirements) timing for more - details on tuning this parameter. The maximum allowed value is 10. - - - `rpc_hold_timeout` - A duration that a client - or server will retry internal RPC requests during leader elections. Under normal - circumstances, this can prevent clients from experiencing "no leader" errors. - This was added in Consul 1.0. Must be a duration value such as 10s. Defaults - to 7s. - -- `pid_file` Equivalent to the [`-pid-file` command line flag](#_pid_file). - -- `ports` This is a nested object that allows setting the bind ports for the following keys: - - - `dns` ((#dns_port)) - The DNS server, -1 to disable. Default 8600. - TCP and UDP. - - `http` ((#http_port)) - The HTTP API, -1 to disable. Default 8500. - TCP only. - - `https` ((#https_port)) - The HTTPS API, -1 to disable. Default -1 - (disabled). **We recommend using `8501`** for `https` by convention as some tooling - will work automatically with this. - - `grpc` ((#grpc_port)) - The gRPC API, -1 to disable. Default -1 (disabled). - **We recommend using `8502`** for `grpc` by convention as some tooling will work - automatically with this. This is set to `8502` by default when the agent runs - in `-dev` mode. Currently gRPC is only used to expose Envoy xDS API to Envoy - proxies. - - `serf_lan` ((#serf_lan_port)) - The Serf LAN port. Default 8301. TCP - and UDP. Equivalent to the [`-serf-lan-port` command line flag](#_serf_lan_port). - - `serf_wan` ((#serf_wan_port)) - The Serf WAN port. Default 8302. - Equivalent to the [`-serf-wan-port` command line flag](#_serf_wan_port). Set - to -1 to disable. **Note**: this will disable WAN federation which is not recommended. - Various catalog and WAN related endpoints will return errors or empty results. - TCP and UDP. - - `server` ((#server_rpc_port)) - Server RPC address. Default 8300. TCP - only. - - `sidecar_min_port` ((#sidecar_min_port)) - Inclusive minimum port number - to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). - Default 21000. Set to `0` to disable automatic port assignment. - - `sidecar_max_port` ((#sidecar_max_port)) - Inclusive maximum port number - to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). - Default 21255. Set to `0` to disable automatic port assignment. - - `expose_min_port` ((#expose_min_port)) - Inclusive minimum port number - to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). - Default 21500. Set to `0` to disable automatic port assignment. - - `expose_max_port` ((#expose_max_port)) - Inclusive maximum port number - to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). - Default 21755. Set to `0` to disable automatic port assignment. - -- `primary_datacenter` - This designates the datacenter - which is authoritative for ACL information, intentions and is the root Certificate - Authority for Connect. It must be provided to enable ACLs. All servers and datacenters - must agree on the primary datacenter. Setting it on the servers is all you need - for cluster-level enforcement, but for the APIs to forward properly from the clients, - it must be set on them too. In Consul 0.8 and later, this also enables agent-level - enforcement of ACLs. - -- `primary_gateways` Equivalent to the [`-primary-gateway` - command-line flag](#_primary_gateway). Takes a list of addresses to use as the - mesh gateways for the primary datacenter when authoritative replicated catalog - data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) - until at least one primary mesh gateway is discovered. This was added in Consul - 1.8.0. - -- `primary_gateways_interval` Time to wait - between [`primary_gateways`](#primary_gateways) discovery attempts. Defaults to - 30s. This was added in Consul 1.8.0. - -- `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line - flag](#_protocol). - -- `raft_boltdb` ((#raft_boltdb)) This is a nested object that allows configuring - options for Raft's BoltDB based log store. - - - `NoFreelistSync` ((#NoFreelistSync)) Setting this to `true` will disable - syncing the BoltDB freelist to disk within the raft.db file. Not syncing - the freelist to disk will reduce disk IO required for write operations - at the expense of potentially increasing start up time due to needing - to scan the db to discover where the free space resides within the file. - -- `raft_protocol` ((#raft_protocol)) Equivalent to the [`-raft-protocol` - command-line flag](#_raft_protocol). - -- `raft_snapshot_threshold` ((#\_raft_snapshot_threshold)) This controls the - minimum number of raft commit entries between snapshots that are saved to - disk. This is a low-level parameter that should rarely need to be changed. - Very busy clusters experiencing excessive disk IO may increase this value to - reduce disk IO, and minimize the chances of all servers taking snapshots at - the same time. Increasing this trades off disk IO for disk space since the log - will grow much larger and the space in the raft.db file can't be reclaimed - till the next snapshot. Servers may take longer to recover from crashes or - failover if this is increased significantly as more logs will need to be - replayed. In Consul 1.1.0 and later this defaults to 16384, and in prior - versions it was set to 8192. - - Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the - server a `SIGHUP` to allow tuning snapshot activity without a rolling restart - in emergencies. - -- `raft_snapshot_interval` ((#\_raft_snapshot_interval)) This controls how often - servers check if they need to save a snapshot to disk. This is a low-level - parameter that should rarely need to be changed. Very busy clusters - experiencing excessive disk IO may increase this value to reduce disk IO, and - minimize the chances of all servers taking snapshots at the same time. - Increasing this trades off disk IO for disk space since the log will grow much - larger and the space in the raft.db file can't be reclaimed till the next - snapshot. Servers may take longer to recover from crashes or failover if this - is increased significantly as more logs will need to be replayed. In Consul - 1.1.0 and later this defaults to `30s`, and in prior versions it was set to - `5s`. - - Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the - server a `SIGHUP` to allow tuning snapshot activity without a rolling restart - in emergencies. - -- `raft_trailing_logs` - This controls how many log entries are left in the log - store on disk after a snapshot is made. This should only be adjusted when - followers cannot catch up to the leader due to a very large snapshot size - and high write throughput causing log truncation before an snapshot can be - fully installed on a follower. If you need to use this to recover a cluster, - consider reducing write throughput or the amount of data stored on Consul as - it is likely under a load it is not designed to handle. The default value is - 10000 which is suitable for all normal workloads. Added in Consul 1.5.3. - - Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the - server a `SIGHUP` to allow recovery without downtime when followers can't keep - up. - -- `reap` This controls Consul's automatic reaping of child processes, - which is useful if Consul is running as PID 1 in a Docker container. If this isn't - specified, then Consul will automatically reap child processes if it detects it - is running as PID 1. If this is set to true or false, then it controls reaping - regardless of Consul's PID (forces reaping on or off, respectively). This option - was removed in Consul 0.7.1. For later versions of Consul, you will need to reap - processes using a wrapper, please see the [Consul Docker image entry point script](https://github.com/hashicorp/docker-consul/blob/master/0.X/docker-entrypoint.sh) - for an example. If you are using Docker 1.13.0 or later, you can use the new `--init` - option of the `docker run` command and docker will enable an init process with - PID 1 that reaps child processes for the container. More info on [Docker docs](https://docs.docker.com/engine/reference/commandline/run/#options). - -- `reconnect_timeout` This controls how long it - takes for a failed node to be completely removed from the cluster. This defaults - to 72 hours and it is recommended that this is set to at least double the maximum - expected recoverable outage time for a node or network partition. WARNING: Setting - this time too low could cause Consul servers to be removed from quorum during an - extended node failure or partition, which could complicate recovery of the cluster. - The value is a time with a unit suffix, which can be "s", "m", "h" for seconds, - minutes, or hours. The value must be >= 8 hours. - -- `reconnect_timeout_wan` This is the WAN equivalent - of the [`reconnect_timeout`](#reconnect_timeout) parameter, which controls - how long it takes for a failed server to be completely removed from the WAN pool. - This also defaults to 72 hours, and must be >= 8 hours. - -- `recursors` This flag provides addresses of upstream DNS - servers that are used to recursively resolve queries if they are not inside the - service domain for Consul. For example, a node can use Consul directly as a DNS - server, and if the record is outside of the "consul." domain, the query will be - resolved upstream. As of Consul 1.0.1 recursors can be provided as IP addresses - or as go-sockaddr templates. IP addresses are resolved in order, and duplicates - are ignored. - -- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](#_rejoin). - -- `retry_join` - Equivalent to the [`-retry-join`](#retry-join) command-line flag. - -- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](#_retry_interval). - -- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. - -- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](#_retry_interval_wan). - -- `rpc` configuration for Consul servers. - - - `enable_streaming` ((#rpc_enable_streaming)) defaults to true. If set to false it will disable - the gRPC subscribe endpoint on a Consul Server. All - servers in all federated datacenters must have this enabled before any client can use - [`use_streaming_backend`](#use_streaming_backend). - -- `segment` - Equivalent to the [`-segment` command-line flag](#_segment). - - ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. - -- `segments` - (Server agents only) This is a list of nested objects - that specifies user-defined network segments, not including the `` segment, which is - created automatically. Review the [Network Segments documentation](/docs/enterprise/network-segments) - for more details. - - - `name` ((#segment_name)) - The name of the segment. Must be a string - between 1 and 64 characters in length. - - `bind` ((#segment_bind)) - The bind address to use for the segment's - gossip layer. Defaults to the [`-bind`](#_bind) value if not provided. - - `port` ((#segment_port)) - The port to use for the segment's gossip - layer (required). - - `advertise` ((#segment_advertise)) - The advertise address to use for - the segment's gossip layer. Defaults to the [`-advertise`](#_advertise) value - if not provided. - - `rpc_listener` ((#segment_rpc_listener)) - If true, a separate RPC - listener will be started on this segment's [`-bind`](#_bind) address on the rpc - port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) - address. Defaults to false. - -- `server` Equivalent to the [`-server` command-line flag](#_server). - -- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** - -- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica). - -- `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTLs - shorter than the specified limit. It is recommended to keep this limit at or above - the default to encourage clients to send infrequent heartbeats. Defaults to 10s. - -- `skip_leave_on_interrupt` This is similar - to [`leave_on_terminate`](#leave_on_terminate) but only affects interrupt handling. - When Consul receives an interrupt signal (such as hitting Control-C in a terminal), - Consul will gracefully leave the cluster. Setting this to `true` disables that - behavior. The default behavior for this feature varies based on whether or not - the agent is running as a client or a server (prior to Consul 0.7 the default value - was unconditionally set to `false`). On agents in client-mode, this defaults to - `false` and for agents in server-mode, this defaults to `true` (i.e. Ctrl-C on - a server will keep the server in the cluster and therefore quorum, and Ctrl-C on - a client will gracefully leave). - -- `start_join` An array of strings specifying addresses - of nodes to [`-join`](#_join) upon startup. Note that using - `retry_join` could be more appropriate to help mitigate - node startup race conditions when automating a Consul cluster deployment. - -- `start_join_wan` An array of strings specifying addresses - of WAN nodes to [`-join-wan`](#_join_wan) upon startup. - -- `telemetry` This is a nested object that configures where - Consul sends its runtime telemetry, and contains the following keys: - - - `circonus_api_token` ((#telemetry-circonus_api_token)) A valid API - Token used to create/manage check. If provided, metric management is - enabled. - - - `circonus_api_app` ((#telemetry-circonus_api_app)) A valid app name - associated with the API token. By default, this is set to "consul". - - - `circonus_api_url` ((#telemetry-circonus_api_url)) - The base URL to use for contacting the Circonus API. By default, this is set - to "https://api.circonus.com/v2". - - - `circonus_submission_interval` ((#telemetry-circonus_submission_interval)) The interval at which metrics are submitted to Circonus. By default, this is set to "10s" (ten seconds). - - - `circonus_submission_url` ((#telemetry-circonus_submission_url)) - The `check.config.submission_url` field, of a Check API object, from a previously - created HTTPTrap check. - - - `circonus_check_id` ((#telemetry-circonus_check_id)) - The Check ID (not **check bundle**) from a previously created HTTPTrap check. - The numeric portion of the `check._cid` field in the Check API object. - - - `circonus_check_force_metric_activation` ((#telemetry-circonus_check_force_metric_activation)) Force activation of metrics which already exist and are not currently active. - If check management is enabled, the default behavior is to add new metrics as - they are encountered. If the metric already exists in the check, it will **not** - be activated. This setting overrides that behavior. By default, this is set to - false. - - - `circonus_check_instance_id` ((#telemetry-circonus_check_instance_id)) Uniquely identifies the metrics coming from this **instance**. It can be used to - maintain metric continuity with transient or ephemeral instances as they move - around within an infrastructure. By default, this is set to hostname:application - name (e.g. "host123:consul"). - - - `circonus_check_search_tag` ((#telemetry-circonus_check_search_tag)) A special tag which, when coupled with the instance id, helps to narrow down - the search results when neither a Submission URL or Check ID is provided. By - default, this is set to service:application name (e.g. "service:consul"). - - - `circonus_check_display_name` ((#telemetry-circonus_check_display_name)) Specifies a name to give a check when it is created. This name is displayed in - the Circonus UI Checks list. Available in Consul 0.7.2 and later. - - - `circonus_check_tags` ((#telemetry-circonus_check_tags)) - Comma separated list of additional tags to add to a check when it is created. - Available in Consul 0.7.2 and later. - - - `circonus_broker_id` ((#telemetry-circonus_broker_id)) - The ID of a specific Circonus Broker to use when creating a new check. The numeric - portion of `broker._cid` field in a Broker API object. If metric management is - enabled and neither a Submission URL nor Check ID is provided, an attempt will - be made to search for an existing check using Instance ID and Search Tag. If - one is not found, a new HTTPTrap check will be created. By default, this is not - used and a random Enterprise Broker is selected, or the default Circonus Public - Broker. - - - `circonus_broker_select_tag` ((#telemetry-circonus_broker_select_tag)) A special tag which will be used to select a Circonus Broker when a Broker ID - is not provided. The best use of this is to as a hint for which broker should - be used based on **where** this particular instance is running (e.g. a specific - geo location or datacenter, dc:sfo). By default, this is left blank and not used. - - - `disable_compat_1.9` ((#telemetry-disable_compat_1.9)) - This allows users to disable metrics deprecated in 1.9 so they are no longer emitted, improving performance and reducing storage in large deployments. As of 1.12 this defaults to `true` and will be removed, along with 1.9 style http metrics in 1.13. - - - `disable_hostname` ((#telemetry-disable_hostname)) - This controls whether or not to prepend runtime telemetry with the machine's - hostname, defaults to false. - - - `dogstatsd_addr` ((#telemetry-dogstatsd_addr)) This provides the address - of a DogStatsD instance in the format `host:port`. DogStatsD is a protocol-compatible - flavor of statsd, with the added ability to decorate metrics with tags and event - information. If provided, Consul will send various telemetry information to that - instance for aggregation. This can be used to capture runtime information. - - - `dogstatsd_tags` ((#telemetry-dogstatsd_tags)) This provides a list - of global tags that will be added to all telemetry packets sent to DogStatsD. - It is a list of strings, where each string looks like "my_tag_name:my_tag_value". - - - `filter_default` ((#telemetry-filter_default)) - This controls whether to allow metrics that have not been specified by the filter. - Defaults to `true`, which will allow all metrics when no filters are provided. - When set to `false` with no filters, no metrics will be sent. - - - `metrics_prefix` ((#telemetry-metrics_prefix)) - The prefix used while writing all telemetry data. By default, this is set to - "consul". This was added in Consul 1.0. For previous versions of Consul, use - the config option `statsite_prefix` in this same structure. This was renamed - in Consul 1.0 since this prefix applied to all telemetry providers, not just - statsite. - - - `prefix_filter` ((#telemetry-prefix_filter)) - This is a list of filter rules to apply for allowing/blocking metrics by - prefix in the following format: - - - - ```hcl - telemetry { - prefix_filter = ["+consul.raft.apply", "-consul.http", "+consul.http.GET"] - } - ``` - - ```json - { - "telemetry": { - "prefix_filter": [ - "+consul.raft.apply", - "-consul.http", - "+consul.http.GET" - ] - } - } - ``` - - - - A leading "**+**" will enable any metrics with the given prefix, and a leading "**-**" will block them. If there is overlap between two rules, the more specific rule will take precedence. Blocking will take priority if the same prefix is listed multiple times. - - - `prometheus_retention_time` ((#telemetry-prometheus_retention_time)) If the value is greater than `0s` (the default), this enables [Prometheus](https://prometheus.io/) - export of metrics. The duration can be expressed using the duration semantics - and will aggregates all counters for the duration specified (it might have an - impact on Consul's memory usage). A good value for this parameter is at least - 2 times the interval of scrape of Prometheus, but you might also put a very high - retention time such as a few days (for instance 744h to enable retention to 31 - days). Fetching the metrics using prometheus can then be performed using the - [`/v1/agent/metrics?format=prometheus`](/api-docs/agent#view-metrics) endpoint. - The format is compatible natively with prometheus. When running in this mode, - it is recommended to also enable the option [`disable_hostname`](#telemetry-disable_hostname) - to avoid having prefixed metrics with hostname. Consul does not use the default - Prometheus path, so Prometheus must be configured as follows. Note that using - `?format=prometheus` in the path won't work as `?` will be escaped, so it must be - specified as a parameter. - - - - ```yaml - metrics_path: '/v1/agent/metrics' - params: - format: ['prometheus'] - ``` - - - - - `statsd_address` ((#telemetry-statsd_address)) This provides the address - of a statsd instance in the format `host:port`. If provided, Consul will send - various telemetry information to that instance for aggregation. This can be used - to capture runtime information. This sends UDP packets only and can be used with - statsd or statsite. - - - `statsite_address` ((#telemetry-statsite_address)) This provides the - address of a statsite instance in the format `host:port`. If provided, Consul - will stream various telemetry information to that instance for aggregation. This - can be used to capture runtime information. This streams via TCP and can only - be used with statsite. - -- `syslog_facility` When [`enable_syslog`](#enable_syslog) - is provided, this controls to which facility messages are sent. By default, `LOCAL0` - will be used. - -- `translate_wan_addrs` If set to true, Consul - will prefer a node's configured [WAN address](#_advertise-wan) - when servicing DNS and HTTP requests for a node in a remote datacenter. This allows - the node to be reached within its own datacenter using its local address, and reached - from other datacenters using its WAN address, which is useful in hybrid setups - with mixed networks. This is disabled by default. - - Starting in Consul 0.7 and later, node addresses in responses to HTTP requests will also prefer a - node's configured [WAN address](#_advertise-wan) when querying for a node in a remote - datacenter. An [`X-Consul-Translate-Addresses`](/api#translated-addresses) header - will be present on all responses when translation is enabled to help clients know that the addresses - may be translated. The `TaggedAddresses` field in responses also have a `lan` address for clients that - need knowledge of that address, regardless of translation. - - The following endpoints translate addresses: - - - [`/v1/catalog/nodes`](/api-docs/catalog#list-nodes) - - [`/v1/catalog/node/`](/api-docs/catalog#retrieve-map-of-services-for-a-node) - - [`/v1/catalog/service/`](/api-docs/catalog#list-nodes-for-service) - - [`/v1/health/service/`](/api-docs/health#list-nodes-for-service) - - [`/v1/query//execute`](/api-docs/query#execute-prepared-query) - -- `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** - Equivalent to the [`-ui`](#_ui) command-line flag. - -- `ui_config` - This object allows a number of sub-keys to be set which controls - the display or features available in the UI. Configuring the UI with this - stanza was added in Consul 1.9.0. - - The following sub-keys are available: - - - `enabled` ((#ui_config_enabled)) - This enables the service of the web UI - from this agent. Boolean value, defaults to false. In `-dev` mode this - defaults to true. Replaces `ui` from before 1.9.0. Equivalent to the - [`-ui`](#_ui) command-line flag. - - - `dir` ((#ui_config_dir)) - This specifies that the web UI should be served - from an external dir rather than the build in one. This allows for - customization or development. Replaces `ui_dir` from before 1.9.0. - Equivalent to the [`-ui-dir`](#_ui_dir) command-line flag. - - - `content_path` ((#ui_config_content_path)) - This specifies the HTTP path - that the web UI should be served from. Defaults to `/ui/`. Equivalent to the - [`-ui-content-path`](#_ui_content_path) flag. - - - `metrics_provider` ((#ui_config_metrics_provider)) - Specifies a named - metrics provider implementation the UI should use to fetch service metrics. - By default metrics are disabled. Consul 1.9.0 includes a built-in provider - named `prometheus` that can be enabled explicitly here. It also requires the - `metrics_proxy` to be configured below and direct queries to a Prometheus - instance that has Envoy metrics for all services in the datacenter. - - - `metrics_provider_files` ((#ui_config_metrics_provider_files)) - An optional array - of absolute paths to javascript files on the Agent's disk which will be - served as part of the UI. These files should contain metrics provider - implementations and registration enabling UI metric queries to be customized - or implemented for an alternative time-series backend. - - ~> **Security Note:** These javascript files are included in the UI with no - further validation or sand-boxing. By configuring them here the operator is - fully trusting anyone able to write to them as well as the original authors - not to include malicious code in the UI being served. - - - `metrics_provider_options_json` ((#ui_config_metrics_provider_options_json)) - - This is an optional raw JSON object as a string which is passed to the - provider implementation's `init` method at startup to allow arbitrary - configuration to be passed through. - - - `metrics_proxy` ((#ui_config_metrics_proxy)) - This object configures an - internal agent API endpoint that will proxy GET requests to a metrics - backend to allow querying metrics data in the UI. This simplifies deployment - where the metrics backend is not exposed externally to UI users' browsers. - It may also be used to augment requests with API credentials to allow - serving graphs to UI users without them needing individual access tokens for - the metrics backend. - - ~> **Security Note:** Exposing your metrics backend via Consul in this way - should be carefully considered in production. As Consul doesn't understand - the requests, it can't limit access to only specific resources. For example - **this might make it possible for a malicious user on the network to query - for arbitrary metrics about any server or workload in your infrastructure, - or overload the metrics infrastructure with queries**. See [Metrics Proxy - Security](/docs/connect/observability/ui-visualization#metrics-proxy-security) - for more details. - - The following sub-keys are available: - - - `base_url` ((#ui_config_metrics_provider_base_url)) - This is required to - enable the proxy. It should be set to the base URL that the Consul agent - should proxy requests for metrics too. For example a value of - `http://prometheus-server` would target a Prometheus instance with local - DNS name "prometheus-server" on port 80. This may include a path prefix - which will then not be necessary in provider requests to the backend and - the proxy will prevent any access to paths without that prefix on the - backend. - - - `path_allowlist` ((#ui_config_metrics_provider_path_allowlist)) - This - specifies the paths that may be proxies to when appended to the - `base_url`. It defaults to `["/api/v1/query_range", "/api/v1/query"]` - which are the endpoints required for the built-in Prometheus provider. If - a [custom - provider](/docs/connect/observability/ui-visualization#custom-metrics-providers) - is used that requires the metrics proxy, the correct allowlist must be - specified to enable proxying to necessary endpoints. See [Path - Allowlist](/docs/connect/observability/ui-visualization#path-allowlist) - for more information. - - - `add_headers` ((#ui_config_metrics_proxy_add_headers)) - This is an - optional list if headers to add to requests that are proxied to the - metrics backend. It may be used to inject Authorization tokens within the - agent without exposing those to UI users. - - Each item in the list is an object with the following keys: - - - `name` ((#ui_config_metrics_proxy_add_headers_name)) - Specifies the - HTTP header name to inject into proxied requests. - - - `value` ((#ui_config_metrics_proxy_add_headers_value)) - Specifies the - value in inject into proxied requests. - - - `dashboard_url_templates` ((#ui_config_dashboard_url_templates)) - This map - specifies URL templates that may be used to render links to external - dashboards in various contexts in the UI. It is a map with the name of the - template as a key. The value is a string URL with optional placeholders. - - Each template may contain placeholders which will be substituted for the - correct values in content when rendered in the UI. The placeholders - available are listed for each template. - - For more information and examples see [UI - Visualization](/docs/connect/observability/ui-visualization#configuring-dashboard-urls) - - The following named templates are defined: - - - `service` ((#ui_config_dashboard_url_templates_service)) - This is the URL - to use when linking to the dashboard for a specific service. It is shown - as part of the [Topology - Visualization](/docs/connect/observability/ui-visualization). - - The placeholders available are: - - - `{{Service.Name}}` - Replaced with the current service's name. - - `{{Service.Namespace}}` - Replaced with the current service's namespace or empty if namespaces are not enabled. - - `{{Service.Partition}}` - Replaced with the current service's admin - partition or empty if admin partitions are not enabled. - - `{{Datacenter}}` - Replaced with the current service's datacenter. - -- `ui_dir` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.dir`](#ui_config_dir) field instead.** - Equivalent to the [`-ui-dir`](#_ui_dir) command-line - flag. This configuration key is not required as of Consul version 0.7.0 and later. - Specifying this configuration key will enable the web UI. There is no need to specify - both ui-dir and ui. Specifying both will result in an error. - -- `unix_sockets` - This allows tuning the ownership and - permissions of the Unix domain socket files created by Consul. Domain sockets are - only used if the HTTP address is configured with the `unix://` prefix. - - It is important to note that this option may have different effects on - different operating systems. Linux generally observes socket file permissions - while many BSD variants ignore permissions on the socket file itself. It is - important to test this feature on your specific distribution. This feature is - currently not functional on Windows hosts. - - The following options are valid within this construct and apply globally to all - sockets created by Consul: - - - `user` - The name or ID of the user who will own the socket file. - - `group` - The group ID ownership of the socket file. This option - currently only supports numeric IDs. - - `mode` - The permission bits to set on the file. - -- `use_streaming_backend` defaults to true. When enabled Consul client agents will use - streaming rpc, instead of the traditional blocking queries, for endpoints which support - streaming. All servers must have [`rpc.enable_streaming`](#rpc_enable_streaming) - enabled before any client can enable `use_streaming_backend`. - -- `watches` - Watches is a list of watch specifications which - allow an external process to be automatically invoked when a particular data view - is updated. See the [watch documentation](/docs/dynamic-app-config/watches) for more detail. - Watches can be modified when the configuration is reloaded. - -## TLS Configuration Reference - -This section documents all of the configuration settings that apply to Agent TLS. Agent -TLS is used by the HTTP API, internal RPC, and gRPC/xDS interfaces. Some of these settings -may also be applied automatically by [auto_config](#auto_config) or [auto_encrypt](#auto_encrypt). - -~> **Security Note:** The Certificate Authority (CA) configured on the internal RPC interface -(either explicitly by `tls.internal_rpc` or implicitly by `tls.defaults`) should be a private -CA, not a public one. We recommend using a dedicated CA which should not be used with any other -systems. Any certificate signed by the CA will be allowed to communicate with the cluster and a -specially crafted certificate signed by the CA can be used to gain full access to Consul. - -- `tls` Added in Consul 1.12, for previous versions see - [Deprecated Options](#tls_deprecated_options). - - - `defaults` ((#tls_defaults)) Provides default settings that will be applied - to every interface unless explicitly overridden by `tls.grpc`, `tls.https`, - or `tls.internal_rpc`. - - - `ca_file` ((#tls_defaults_ca_file)) This provides a file path to a - PEM-encoded certificate authority. The certificate authority is used to - check the authenticity of client and server connections with the - appropriate [`verify_incoming`](#tls_defaults_verify_incoming) or - [`verify_outgoing`](#tls_defaults_verify_outgoing) flags. - - - `ca_path` ((#tls_defaults_ca_path)) This provides a path to a directory - of PEM-encoded certificate authority files. These certificate authorities - are used to check the authenticity of client and server connections with - the appropriate [`verify_incoming`](#tls_defaults_verify_incoming) or - [`verify_outgoing`](#tls_defaults_verify_outgoing) flags. - - - `cert_file` ((#tls_defaults_cert_file)) This provides a file path to a - PEM-encoded certificate. The certificate is provided to clients or servers - to verify the agent's authenticity. It must be provided along with - [`key_file`](#tls_defaults_key_file). - - - `key_file` ((#tls_defaults_key_file)) This provides a the file path to a - PEM-encoded private key. The key is used with the certificate to verify - the agent's authenticity. This must be provided along with - [`cert_file`](#tls_defaults_cert_file). - - - `tls_min_version` ((#tls_defaults_tls_min_version)) This specifies the - minimum supported version of TLS. The following values are accepted: - * `TLSv1_0` - * `TLSv1_1` - * `TLSv1_2` (default) - * `TLSv1_3` - - **WARNING: TLS 1.1 and lower are generally considered less secure and - should not be used if possible.** - - The following values are also valid, but only when using the - [deprecated top-level `tls_min_version` config](#tls_deprecated_options), - and will be removed in a future release: - - * `tls10` - * `tls11` - * `tls12` - * `tls13` - - A warning message will appear if a deprecated value is specified. - - - `tls_cipher_suites` ((#tls_defaults_tls_cipher_suites)) This specifies - the list of supported ciphersuites as a comma-separated-list. Applicable - to TLS 1.2 and below only. The list of all supported ciphersuites is - available through [this search](https://github.com/hashicorp/consul/search?q=goTLSCipherSuites+%3D+map). - - ~> **Note:** The ordering of cipher suites will not be guaranteed from - Consul 1.11 onwards. See this [post](https://go.dev/blog/tls-cipher-suites) - for details. - - - `verify_incoming` - ((#tls_defaults_verify_incoming)) If set to true, - Consul requires that all incoming connections make use of TLS and that - the client provides a certificate signed by a Certificate Authority from - the [`ca_file`](#tls_defaults_ca_file) or [`ca_path`](#tls_defaults_ca_path). - By default, this is false, and Consul will not enforce the use of TLS or - verify a client's authenticity. - - - `verify_outgoing` - ((#tls_defaults_verify_outgoing)) If set to true, - Consul requires that all outgoing connections from this agent make use - of TLS and that the server provides a certificate that is signed by a - Certificate Authority from the [`ca_file`](#tls_defaults_ca_file) or - [`ca_path`](#tls_defaults_ca_path). By default, this is false, and Consul - will not make use of TLS for outgoing connections. This applies to clients - and servers as both will make outgoing connections. This setting *does not* - apply to the gRPC interface as Consul makes no outgoing connections on this - interface. - - - `grpc` ((#tls_grpc)) Provides settings for the gRPC/xDS interface. To enable - the gRPC interface you must define a port via [`ports.grpc`](#grpc_port). - To enable TLS on the gRPC interface you also must define an HTTPS port via - [`ports.https`](#https_port). - - - `ca_file` ((#tls_grpc_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file). - - - `ca_path` ((#tls_grpc_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path). - - - `cert_file` ((#tls_grpc_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file). - - - `key_file` ((#tls_grpc_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file). - - - `tls_min_version` ((#tls_grpc_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version). - - - `tls_cipher_suites` ((#tls_grpc_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites). - - - `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). - - - `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable - the HTTPS interface you must define a port via [`ports.https`](#https_port). - - - `ca_file` ((#tls_https_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file). - - - `ca_path` ((#tls_https_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path). - - - `cert_file` ((#tls_https_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file). - - - `key_file` ((#tls_https_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file). - - - `tls_min_version` ((#tls_https_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version). - - - `tls_cipher_suites` ((#tls_https_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites). - - - `verify_incoming` - ((#tls_https_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). - - - `verify_outgoing` - ((#tls_https_verify_outgoing)) Overrides [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing). - - - `internal_rpc` ((#tls_internal_rpc)) Provides settings for the internal - "server" RPC interface configured by [`ports.server`](#server_rpc_port). - - - `ca_file` ((#tls_internal_rpc_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file). - - - `ca_path` ((#tls_internal_rpc_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path). - - - `cert_file` ((#tls_internal_rpc_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file). - - - `key_file` ((#tls_internal_rpc_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file). - - - `tls_min_version` ((#tls_internal_rpc_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version). - - - `tls_cipher_suites` ((#tls_internal_rpc_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites). - - - `verify_incoming` - ((#tls_internal_rpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). - - ~> **Security Note:** `verify_incoming` *must* be set to true to prevent - anyone with access to the internal RPC port from gaining full access to - the Consul cluster. - - - `verify_outgoing` ((#tls_internal_rpc_verify_outgoing)) Overrides [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing). - - ~> **Security Note:** Servers that specify `verify_outgoing = true` will - always talk to other servers over TLS, but they still _accept_ non-TLS - connections to allow for a transition of all clients to TLS. Currently the - only way to enforce that no client can communicate with a server unencrypted - is to also enable `verify_incoming` which requires client certificates too. - - - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When - set to true, Consul verifies the TLS certificate presented by the servers - match the hostname `server..`. By default this is false, - and Consul does not verify the hostname of the certificate, only that it - is signed by a trusted CA. This setting *must* be enabled to prevent a - compromised client from gaining full read and write access to all cluster - data *including all ACL tokens and Connect CA root keys*. - -- `server_name` When provided, this overrides the [`node_name`](#_node) - for the TLS certificate. It can be used to ensure that the certificate name matches - the hostname we declare. - -### Deprecated Options ((#tls_deprecated_options)) - -The following options were deprecated in Consul 1.12, please use the -[`tls`](#tls) stanza instead. - -- `ca_file` See: [`tls.defaults.ca_file`](#tls_defaults_ca_file). - -- `ca_path` See: [`tls.defaults.ca_path`](#tls_defaults_ca_path). - -- `cert_file` See: [`tls.defaults.cert_file`](#tls_defaults_cert_file). - -- `key_file` See: [`tls.defaults.key_file`](#tls_defaults_key_file). - -- `tls_min_version` Added in Consul 0.7.4. - See: [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version). - -- `tls_cipher_suites` Added in Consul 0.8.2. - See: [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites). - -- `tls_prefer_server_cipher_suites` Added in Consul 0.8.2. This setting will - be ignored (see [this post](https://go.dev/blog/tls-cipher-suites) for details). - -- `verify_incoming` See: [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). - -- `verify_incoming_rpc` See: [`tls.internal_rpc.verify_incoming`](#tls_internal_rpc_verify_incoming). - -- `verify_incoming_https` See: [`tls.https.verify_incoming`](#tls_https_verify_incoming). - -- `verify_outgoing` See: [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing). - -- `verify_server_hostname` See: [`tls.internal_rpc.verify_server_hostname`](#tls_internal_rpc_verify_server_hostname). - -### Example Configuration File, with TLS - -~> **Security Note:** all three verify options should be set as `true` to enable -secure mTLS communication, enabling both encryption and authentication. Failing -to set [`verify_incoming`](#tls_defaults_verify_incoming) or -[`verify_outgoing`](#tls_defaults_verify_outgoing) either in the -interface-specific stanza (e.g. `tls.internal_rpc`, `tls.https`) or in -`tls.defaults` will result in TLS not being enabled at all, even when specifying -a [`ca_file`](#tls_defaults_ca_file), [`cert_file`](#tls_defaults_cert_file), -and [`key_file`](#tls_defaults_key_file). - -See, especially, the use of the `ports` setting highlighted below. - - - - - -```hcl -datacenter = "east-aws" -data_dir = "/opt/consul" -log_level = "INFO" -node_name = "foobar" -server = true - -addresses = { - https = "0.0.0.0" -} -ports { - https = 8501 -} - -tls { - defaults { - key_file = "/etc/pki/tls/private/my.key" - cert_file = "/etc/pki/tls/certs/my.crt" - ca_file = "/etc/pki/tls/certs/ca-bundle.crt" - verify_incoming = true - verify_outgoing = true - } - - internal_rpc { - verify_server_hostname = true - } -} -``` - - - - - -```json -{ - "datacenter": "east-aws", - "data_dir": "/opt/consul", - "log_level": "INFO", - "node_name": "foobar", - "server": true, - "addresses": { - "https": "0.0.0.0" - }, - "ports": { - "https": 8501 - }, - "tls": { - "defaults": { - "key_file": "/etc/pki/tls/private/my.key", - "cert_file": "/etc/pki/tls/certs/my.crt", - "ca_file": "/etc/pki/tls/certs/ca-bundle.crt", - "verify_incoming": true, - "verify_outgoing": true - }, - "internal_rpc": { - "verify_server_hostname": true - } - } -} -``` - - - - - -Consul will not enable TLS for the HTTP or gRPC API unless the `https` port has -been assigned a port number `> 0`. We recommend using `8501` for `https` as this -default will automatically work with some tooling. - ## Ports Used Consul requires up to 6 different ports to work properly, some on From 02dc86cad1fce586f5718f84f0c4e4fa227ea4f8 Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 11:30:56 -0800 Subject: [PATCH 120/785] docs: arrange agent configuration file parameters into logical groups --- .../docs/agent/config/agent-config-cli.mdx | 24 +- .../docs/agent/config/agent-config-files.mdx | 1278 +++++++++-------- 2 files changed, 666 insertions(+), 636 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx index ff19b147f..8daeebcc1 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/agent-config-cli.mdx @@ -56,17 +56,6 @@ information. intended for production use as it does not write any data to disk. The gRPC port is also defaulted to `8502` in this mode. -- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to - true will prevent Consul from using information from the host to generate a deterministic - node ID, and will instead generate a random node ID which will be persisted in - the data directory. This is useful when running multiple Consul agents on the same - host for testing. This defaults to false in Consul prior to version 0.8.5 and in - 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based - IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/tree/master/v3/host), which - is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in - to host-based IDs then Consul and Nomad will use information on the host to automatically - assign the same ID in both systems. - - `-disable-keyring-file` ((#\_disable_keyring_file)) - If set, the keyring will not be persisted to a file. Any installed keys will be lost on shutdown, and only the given `-encrypt` key will be available on startup. This defaults to false. @@ -166,7 +155,7 @@ information. accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] template that is resolved at runtime. -## Bind Options +## Address Bind Options - `-bind` ((#\_bind)) - The address that should be bound to for internal cluster communications. This is an IP address that should be reachable by all other @@ -452,6 +441,17 @@ information. - Metadata values for keys beginning with `rfc1035-` are encoded verbatim in DNS TXT requests, otherwise the metadata kv-pair is encoded according [RFC1464](https://www.ietf.org/rfc/rfc1464.txt). +- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to + true will prevent Consul from using information from the host to generate a deterministic + node ID, and will instead generate a random node ID which will be persisted in + the data directory. This is useful when running multiple Consul agents on the same + host for testing. This defaults to false in Consul prior to version 0.8.5 and in + 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based + IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/tree/master/v3/host), which + is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in + to host-based IDs then Consul and Nomad will use information on the host to automatically + assign the same ID in both systems. + ## Serf Options - `-serf-lan-allowed-cidrs` ((#\_serf_lan_allowed_cidrs)) - The Serf LAN allowed CIDRs allow to accept incoming diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index a454adb0e..f22a1eb69 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -43,7 +43,7 @@ definitions support being updated during a reload. } ``` -#### Configuration Key Reference +# Configuration Key Reference -> **Note:** All the TTL values described below are parsed by Go's `time` package, and have the following [formatting specification](https://golang.org/pkg/time/#ParseDuration): "A @@ -51,221 +51,7 @@ duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as '300ms', '-1.5h' or '2h45m'. Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." -- `acl` ((#acl)) - This object allows a number of sub-keys to be set which - controls the ACL system. Configuring the ACL system within the ACL stanza was added - in Consul 1.4.0 - - The following sub-keys are available: - - - `enabled` ((#acl_enabled)) - Enables ACLs. - - - `policy_ttl` ((#acl_policy_ttl)) - Used to control Time-To-Live caching - of ACL policies. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL policy may be stale up to the TTL value. - - - `role_ttl` ((#acl_role_ttl)) - Used to control Time-To-Live caching - of ACL roles. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL role may be stale up to the TTL value. - - - `token_ttl` ((#acl_token_ttl)) - Used to control Time-To-Live caching - of ACL tokens. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL token may be stale up to the TTL value. - - - `down_policy` ((#acl_down_policy)) - Either "allow", "deny", "extend-cache" - or "async-cache"; "extend-cache" is the default. In the case that a policy or - token cannot be read from the [`primary_datacenter`](#primary_datacenter) or - leader node, the down policy is applied. In "allow" mode, all actions are permitted, - "deny" restricts all operations, and "extend-cache" allows any cached objects - to be used, ignoring the expiry time of the cached entry. If the request uses an - ACL that is not in the cache, "extend-cache" falls back to the behaviour of - `default_policy`. - The value "async-cache" acts the same way as "extend-cache" - but performs updates asynchronously when ACL is present but its TTL is expired, - thus, if latency is bad between the primary and secondary datacenters, latency - of operations is not impacted. - - - `default_policy` ((#acl_default_policy)) - Either "allow" or "deny"; - defaults to "allow" but this will be changed in a future major release. The default - policy controls the behavior of a token when there is no matching rule. In "allow" - mode, ACLs are a denylist: any operation not specifically prohibited is allowed. - In "deny" mode, ACLs are an allowlist: any operation not specifically - allowed is blocked. **Note**: this will not take effect until you've enabled ACLs. - - - `enable_key_list_policy` ((#acl_enable_key_list_policy)) - Boolean value, defaults to false. - When true, the `list` permission will be required on the prefix being recursively read from the KV store. - Regardless of being enabled, the full set of KV entries under the prefix will be filtered - to remove any entries that the request's ACL token does not grant at least read - permissions. This option is only available in Consul 1.0 and newer. - - - `enable_token_replication` ((#acl_enable_token_replication)) - By default - secondary Consul datacenters will perform replication of only ACL policies and - roles. Setting this configuration will will enable ACL token replication and - allow for the creation of both [local tokens](/api/acl/tokens#local) and - [auth methods](/docs/acl/auth-methods) in connected secondary datacenters. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - global tokens already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. - - - `enable_token_persistence` ((#acl_enable_token_persistence)) - Either - `true` or `false`. When `true` tokens set using the API will be persisted to - disk and reloaded when an agent restarts. - - - `tokens` ((#acl_tokens)) - This object holds all of the configured - ACL tokens for the agents usage. - - - `initial_management` ((#acl_tokens_initial_management)) - This is available in - Consul 1.11 and later. In prior versions, use [`acl.tokens.master`](#acl_tokens_master). - - Only used for servers in the [`primary_datacenter`](#primary_datacenter). - This token will be created with management-level permissions if it does not exist. - It allows operators to bootstrap the ACL system with a token Secret ID that is - well-known. - - The `initial_management` token is only installed when a server acquires cluster - leadership. If you would like to install or change it, set the new value for - `initial_management` in the configuration for all servers. Once this is done, - restart the current leader to force a leader election. If the `initial_management` - token is not supplied, then the servers do not create an initial management token. - When you provide a value, it should be a UUID. To maintain backwards compatibility - and an upgrade path this restriction is not currently enforced but will be in a - future major Consul release. - - - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to - [`acl.tokens.initial_management`](#acl_tokens_initial_management).** - - - `default` ((#acl_tokens_default)) - When provided, the agent will - use this token when making requests to the Consul servers. Clients can override - this token on a per-request basis by providing the "?token" query parameter. - When not provided, the empty token, which maps to the 'anonymous' ACL token, - is used. - - - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform - internal operations. If this isn't specified, then the - [`default`](#acl_tokens_default) will be used. - - This token must at least have write access to the node name it will - register as in order to set any of the node-level information in the - catalog such as metadata, or the node's tagged addresses. - - - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11 - and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master). - - Used to access [agent endpoints](/api/agent) that require agent read or write privileges, - or node read privileges, even if Consul servers aren't present to validate any tokens. - This should only be used by operators during outages, regular ACL tokens should normally - be used by applications. - - - `agent_master` ((#acl_tokens_agent_master)) **Renamed in Consul 1.11 to - [`acl.tokens.agent_recovery`](#acl_tokens_agent_recovery).** - - - `replication` ((#acl_tokens_replication)) - The ACL token used to - authorize secondary datacenters with the primary datacenter for replication - operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - policies and roles already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. - - - `managed_service_provider` ((#acl_tokens_managed_service_provider)) - An - array of ACL tokens used by Consul managed service providers for cluster operations. - - ```json - "managed_service_provider": [ - { - "accessor_id": "ed22003b-0832-4e48-ac65-31de64e5c2ff", - "secret_id": "cb6be010-bba8-4f30-a9ed-d347128dde17" - } - ] - ``` - -- `acl_datacenter` - **This field is deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.** - - This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients, - it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement - of ACLs. Please review the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more details. - -- `acl_default_policy` ((#acl_default_policy_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.default_policy`](#acl_default_policy) field instead.** - Either "allow" or "deny"; defaults to "allow". The default policy controls the - behavior of a token when there is no matching rule. In "allow" mode, ACLs are a - denylist: any operation not specifically prohibited is allowed. In "deny" mode, - ACLs are an allowlist: any operation not specifically allowed is blocked. **Note**: - this will not take effect until you've set `primary_datacenter` to enable ACL support. - -- `acl_down_policy` ((#acl_down_policy_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.down_policy`](#acl_down_policy) field instead.** Either "allow", - "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the - case that the policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter) - or leader node, the down policy is applied. In "allow" mode, all actions are permitted, - "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be - used, ignoring their TTL values. If a non-cached ACL is used, "extend-cache" acts - like "deny". The value "async-cache" acts the same way as "extend-cache" but performs - updates asynchronously when ACL is present but its TTL is expired, thus, if latency - is bad between ACL authoritative and other datacenters, latency of operations is - not impacted. - -- `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated - in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master) - field instead.** Used to access [agent endpoints](/api/agent) that - require agent read or write privileges, or node read privileges, even if Consul - servers aren't present to validate any tokens. This should only be used by operators - during outages, regular ACL tokens should normally be used by applications. This - was added in Consul 0.7.2 and is only used when [`acl_enforce_version_8`](#acl_enforce_version_8) is set to true. - -- `acl_agent_token` ((#acl_agent_token_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.tokens.agent`](#acl_tokens_agent) field instead.** Used for - clients and servers to perform internal operations. If this isn't specified, then - the [`acl_token`](#acl_token) will be used. This was added in Consul 0.7.2. - - This token must at least have write access to the node name it will register as in order to set any - of the node-level information in the catalog such as metadata, or the node's tagged addresses. - -- `acl_enforce_version_8` - **Deprecated in - Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should - occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2, - this defaults to false in versions of Consul prior to 0.8, and defaults to true - in Consul 0.8 and later. This helps ease the transition to the new ACL features - by allowing policies to be in place before enforcement begins. - -- `acl_master_token` ((#acl_master_token_legacy)) - **Deprecated in Consul - 1.4.0. See the [`acl.tokens.master`](#acl_tokens_master) field instead.** - -- `acl_replication_token` ((#acl_replication_token_legacy)) - **Deprecated - in Consul 1.4.0. See the [`acl.tokens.replication`](#acl_tokens_replication) field - instead.** Only used for servers outside the [`primary_datacenter`](#primary_datacenter) - running Consul 0.7 or later. When provided, this will enable [ACL replication](https://learn.hashicorp.com/tutorials/consul/access-control-replication-multiple-datacenters) - using this ACL replication using this token to retrieve and replicate the ACLs - to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable - ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then - set the token later using the [agent token API](/api/agent#update-acl-tokens) - on each server. If the `acl_replication_token` is set in the config, it will automatically - set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. - - If there's a partition or other outage affecting the authoritative datacenter, and the - [`acl_down_policy`](/docs/agent/options#acl_down_policy) is set to "extend-cache", tokens not - in the cache can be resolved during the outage using the replicated set of ACLs. - -- `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See - the [`acl.tokens.default`](#acl_tokens_default) field instead.** When provided, - the agent will use this token when making requests to the Consul servers. Clients - can override this token on a per-request basis by providing the "?token" query - parameter. When not provided, the empty token, which maps to the 'anonymous' ACL - policy, is used. - -- `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the - [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live - caching of ACLs. By default, this is 30 seconds. This setting has a major performance - impact: reducing it will cause more frequent refreshes while increasing it reduces - the number of refreshes. However, because the caches are not actively invalidated, - ACL policy may be stale up to the TTL value. +## General - `addresses` - This is a nested object that allows setting bind addresses. In Consul 1.0 and later these can be set to a space-separated list @@ -294,34 +80,8 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `https` - The HTTPS API. Defaults to `client_addr` - `grpc` - The gRPC API. Defaults to `client_addr` -- `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). - -- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). - -- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - -- `advertise_reconnect_timeout` This is a per-agent setting of the [`reconnect_timeout`](#reconnect_timeout) parameter. - This agent will advertise to all other nodes in the cluster that after this timeout, the node may be completely - removed from the cluster. This may only be set on client agents and if unset then other nodes will use the main - `reconnect_timeout` setting when determining when this node may be removed from the cluster. - - `alt_domain` Equivalent to the [`-alt-domain` command-line flag](#_alt_domain) -- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). - This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). - -- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). - -- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](#_serf_wan_bind). - -- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](#_serf_wan_allowed_cidrs). - - `audit` - Added in Consul 1.8, the audit object allow users to enable auditing and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](https://learn.hashicorp.com/tutorials/consul/audit-logging). @@ -550,49 +310,6 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `partition` - The admin partition name the client is requesting. -- `auto_encrypt` This object allows setting options for the `auto_encrypt` feature. - - The following sub-keys are available: - - - `allow_tls` (Defaults to `false`) This option enables - `auto_encrypt` on the servers and allows them to automatically distribute certificates - from the Connect CA to the clients. If enabled, the server can accept incoming - connections from both the built-in CA and the Connect CA, as well as their certificates. - Note, the server will only present the built-in CA and certificate, which the - client can verify using the CA it received from `auto_encrypt` endpoint. If disabled, - a client configured with `auto_encrypt.tls` will be unable to start. - - - `tls` (Defaults to `false`) Allows the client to request the - Connect CA and certificates from the servers, for encrypting RPC communication. - The client will make the request to any servers listed in the `-join` or `-retry-join` - option. This requires that every server to have `auto_encrypt.allow_tls` enabled. - When both `auto_encrypt` options are used, it allows clients to receive certificates - that are generated on the servers. If the `-server-port` is not the default one, - it has to be provided to the client as well. Usually this is discovered through - LAN gossip, but `auto_encrypt` provision happens before the information can be - distributed through gossip. The most secure `auto_encrypt` setup is when the - client is provided with the built-in CA, `verify_server_hostname` is turned on, - and when an ACL token with `node.write` permissions is setup. It is also possible - to use `auto_encrypt` with a CA and ACL, but without `verify_server_hostname`, - or only with a ACL enabled, or only with CA and `verify_server_hostname`, or - only with a CA, or finally without a CA and without ACL enabled. In any case, - the communication to the `auto_encrypt` endpoint is always TLS encrypted. - - ~> **Warning:** Enabling `auto_encrypt.tls` conflicts with the [`auto_config`](#auto_config) feature. - Only one option may be specified. - - - `dns_san` (Defaults to `[]`) When this option is being - used, the certificates requested by `auto_encrypt` from the server have these - `dns_san` set as DNS SAN. - - - `ip_san` (Defaults to `[]`) When this option is being used, - the certificates requested by `auto_encrypt` from the server have these `ip_san` - set as IP SAN. - -- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](#_bootstrap). - -- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](#_bootstrap_expect). - - `bind_addr` Equivalent to the [`-bind` command-line flag](#_bind). This parameter can be set to a go-sockaddr template that resolves to a single @@ -655,6 +372,594 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr See the [configuration entry docs](/docs/agent/config-entries) for more details about the contents of each entry. +- `datacenter` Equivalent to the [`-datacenter` command-line flag](#_datacenter). + +- `data_dir` Equivalent to the [`-data-dir` command-line flag](#_data_dir). + +- `disable_anonymous_signature` Disables providing an anonymous + signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). + +- `disable_http_unprintable_char_filter` Defaults to false. Consul 1.0.3 fixed a potential security vulnerability where malicious users could craft KV keys with unprintable chars that would confuse operators using the CLI or UI into taking wrong actions. Users who had data written in older versions of Consul that did not have this restriction will be unable to delete those values by default in 1.0.3 or later. This setting enables those users to **temporarily** disable the filter such that delete operations can work on those keys again to get back to a healthy state. It is strongly recommended that this filter is not disabled permanently as it exposes the original security vulnerability. + +- `disable_remote_exec` Disables support for remote execution. When set to true, the agent will ignore + any incoming remote exec requests. In versions of Consul prior to 0.8, this defaulted + to false. In Consul 0.8 the default was changed to true, to make remote exec opt-in + instead of opt-out. + +- `disable_update_check` Disables automatic checking for security bulletins and new version releases. This is disabled in Consul Enterprise. + +- `discard_check_output` Discards the output of health checks before storing them. This reduces the number of writes to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ... + +- `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is + equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server can handle the service discovery request. If a Consul server is behind the leader by more than `discovery_max_stale`, the query will be re-evaluated on the leader to get more up-to-date results. Consul agents also add a new `X-Consul-Effective-Consistency` response header which indicates if the agent did a stale read. `discover-max-stale` was introduced in Consul 1.0.7 as a way for Consul operators to force stale requests from clients at the agent level, and defaults to zero which matches default consistency behavior in earlier Consul versions. + +- `enable_agent_tls_for_checks` When set, uses a subset of the agent's TLS configuration (`key_file`, + `cert_file`, `ca_file`, `ca_path`, and `server_name`) to set up the client for HTTP or gRPC health checks. This allows services requiring 2-way TLS to be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false. + +- `enable_central_service_config` When set, the Consul agent will look for any + [centralized service configuration](/docs/agent/config-entries) + that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. + This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. + +- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to + access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. + +- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](#_enable_script_checks). + + ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. + + ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) + for more details. + +- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](#_enable_local_script_checks). + +- `disable_keyring_file` - Equivalent to the + [`-disable-keyring-file` command-line flag](#_disable_keyring_file). + +- `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). + When network coordinates are disabled the `near` query param will not work to sort the nodes, + and the [`consul rtt`](/commands/rtt) command will not be able to provide round trip time between nodes. + +- `http_config` This object allows setting options for the HTTP API and UI. + + The following sub-keys are available: + + - `block_endpoints` + This object is a list of HTTP API endpoint prefixes to block on the agent, and + defaults to an empty list, meaning all endpoints are enabled. Any endpoint that + has a common prefix with one of the entries on this list will be blocked and + will return a 403 response code when accessed. For example, to block all of the + V1 ACL endpoints, set this to `["/v1/acl"]`, which will block `/v1/acl/create`, + `/v1/acl/update`, and the other ACL endpoints that begin with `/v1/acl`. This + only works with API endpoints, not `/ui` or `/debug`, those must be disabled + with their respective configuration options. Any CLI commands that use disabled + endpoints will no longer function as well. For more general access control, Consul's + [ACL system](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) + should be used, but this option is useful for removing access to HTTP API endpoints + completely, or on specific agents. This is available in Consul 0.9.0 and later. + + - `response_headers` This object allows adding headers to the HTTP API and UI responses. For example, the following config can be used to enable [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) on the HTTP API endpoints: + + ```json + { + "http_config": { + "response_headers": { + "Access-Control-Allow-Origin": "*" + } + } + } + ``` + + - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]` + + - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api/features/caching) to answer the request. Even when the url parameter is provided. + + - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte. + +- `leave_on_terminate` If enabled, when the agent receives a TERM signal, it will send a `Leave` message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server (prior to Consul 0.7 the default value was unconditionally set to `false`). On agents in client-mode, this defaults to `true` and for agents in server-mode, this defaults to `false`. + +- `license_path` This specifies the path to a file that contains the Consul Enterprise license. Alternatively the license may also be specified in either the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. See the [licensing documentation](/docs/enterprise/license/overview) for more information about Consul Enterprise license management. Added in versions 1.10.0, 1.9.7 and 1.8.13. Prior to version 1.10.0 the value may be set for all agents to facilitate forwards compatibility with 1.10 but will only actually be used by client agents. + +- `limits` Available in Consul 0.9.3 and later, this is a nested + object that configures limits that are enforced by the agent. Prior to Consul 1.5.2, + this only applied to agents in client mode, not Consul servers. The following parameters + are available: + + - `http_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single client IP address is allowed to open to the agent's HTTP(S) server. This affects the HTTP(S) servers in both client and server agents. Default value is `200`. + - `https_handshake_timeout` - Configures the limit for how long the HTTPS server in both client and server agents will wait for a client to complete a TLS handshake. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). Default value is `5s`. + - `rpc_handshake_timeout` - Configures the limit for how long servers will wait after a client TCP connection is established before they complete the connection handshake. When TLS is used, the same timeout applies to the TLS handshake separately from the initial protocol negotiation. All Consul clients should perform this immediately on establishing a new connection. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). When `verify_incoming` is true on servers, this limits how long the connection socket and associated goroutines will be held open before the client successfully authenticates. Default value is `5s`. + - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though. + - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting. + - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate. + - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. + - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. + +- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). + +- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](#_max_query_time). + +- `partition` - This flag is used to set + the name of the admin partition the agent belongs to. An agent can only join + and communicate with other agents within its admin partition. Review the + [Admin Partitions documentation](/docs/enterprise/admin-partitions) for more + details. By default, this is an empty string, which is the `default` admin + partition. This cannot be set on a server agent. + + ~> **Warning:** The `partition` option cannot be used either the + [`segment`](#segment-2) option or [`-segment`](#_segment) flag. + +- `performance` Available in Consul 0.7 and later, this is a nested object that allows tuning the performance of different subsystems in Consul. See the [Server Performance](/docs/install/performance) documentation for more details. The following parameters are available: + + - `leave_drain_time` - A duration that a server will dwell during a graceful leave in order to allow requests to be retried against other Consul servers. Under normal circumstances, this can prevent clients from experiencing "no leader" errors when performing a rolling update of the Consul servers. This was added in Consul 1.0. Must be a duration value such as 10s. Defaults to 5s. + + - `raft_multiplier` - An integer multiplier used by Consul servers to scale key Raft timing parameters. Omitting this value or setting it to 0 uses default timing described below. Lower values are used to tighten timing and increase sensitivity while higher values relax timings and reduce sensitivity. Tuning this affects the time it takes Consul to detect leader failures and to perform leader elections, at the expense of requiring more network and CPU resources for better performance. + + By default, Consul will use a lower-performance timing that's suitable + for [minimal Consul servers](/docs/install/performance#minimum), currently equivalent + to setting this to a value of 5 (this default may be changed in future versions of Consul, + depending if the target minimum server profile changes). Setting this to a value of 1 will + configure Raft to its highest-performance mode, equivalent to the default timing of Consul + prior to 0.7, and is recommended for [production Consul servers](/docs/install/performance#production). + + See the note on [last contact](/docs/install/performance#production-server-requirements) timing for more + details on tuning this parameter. The maximum allowed value is 10. + + - `rpc_hold_timeout` - A duration that a client + or server will retry internal RPC requests during leader elections. Under normal + circumstances, this can prevent clients from experiencing "no leader" errors. + This was added in Consul 1.0. Must be a duration value such as 10s. Defaults + to 7s. + +- `pid_file` Equivalent to the [`-pid-file` command line flag](#_pid_file). + +- `ports` This is a nested object that allows setting the bind ports for the following keys: + + - `dns` ((#dns_port)) - The DNS server, -1 to disable. Default 8600. + TCP and UDP. + - `http` ((#http_port)) - The HTTP API, -1 to disable. Default 8500. + TCP only. + - `https` ((#https_port)) - The HTTPS API, -1 to disable. Default -1 + (disabled). **We recommend using `8501`** for `https` by convention as some tooling + will work automatically with this. + - `grpc` ((#grpc_port)) - The gRPC API, -1 to disable. Default -1 (disabled). + **We recommend using `8502`** for `grpc` by convention as some tooling will work + automatically with this. This is set to `8502` by default when the agent runs + in `-dev` mode. Currently gRPC is only used to expose Envoy xDS API to Envoy + proxies. + - `serf_lan` ((#serf_lan_port)) - The Serf LAN port. Default 8301. TCP + and UDP. Equivalent to the [`-serf-lan-port` command line flag](#_serf_lan_port). + - `serf_wan` ((#serf_wan_port)) - The Serf WAN port. Default 8302. + Equivalent to the [`-serf-wan-port` command line flag](#_serf_wan_port). Set + to -1 to disable. **Note**: this will disable WAN federation which is not recommended. + Various catalog and WAN related endpoints will return errors or empty results. + TCP and UDP. + - `server` ((#server_rpc_port)) - Server RPC address. Default 8300. TCP + only. + - `sidecar_min_port` ((#sidecar_min_port)) - Inclusive minimum port number + to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). + Default 21000. Set to `0` to disable automatic port assignment. + - `sidecar_max_port` ((#sidecar_max_port)) - Inclusive maximum port number + to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). + Default 21255. Set to `0` to disable automatic port assignment. + - `expose_min_port` ((#expose_min_port)) - Inclusive minimum port number + to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). + Default 21500. Set to `0` to disable automatic port assignment. + - `expose_max_port` ((#expose_max_port)) - Inclusive maximum port number + to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). + Default 21755. Set to `0` to disable automatic port assignment. + +- `primary_datacenter` - This designates the datacenter + which is authoritative for ACL information, intentions and is the root Certificate + Authority for Connect. It must be provided to enable ACLs. All servers and datacenters + must agree on the primary datacenter. Setting it on the servers is all you need + for cluster-level enforcement, but for the APIs to forward properly from the clients, + it must be set on them too. In Consul 0.8 and later, this also enables agent-level + enforcement of ACLs. + +- `primary_gateways` Equivalent to the [`-primary-gateway` + command-line flag](#_primary_gateway). Takes a list of addresses to use as the + mesh gateways for the primary datacenter when authoritative replicated catalog + data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) + until at least one primary mesh gateway is discovered. This was added in Consul + 1.8.0. + +- `primary_gateways_interval` Time to wait + between [`primary_gateways`](#primary_gateways) discovery attempts. Defaults to + 30s. This was added in Consul 1.8.0. + +- `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line + flag](#_protocol). + +- `reap` This controls Consul's automatic reaping of child processes, + which is useful if Consul is running as PID 1 in a Docker container. If this isn't + specified, then Consul will automatically reap child processes if it detects it + is running as PID 1. If this is set to true or false, then it controls reaping + regardless of Consul's PID (forces reaping on or off, respectively). This option + was removed in Consul 0.7.1. For later versions of Consul, you will need to reap + processes using a wrapper, please see the [Consul Docker image entry point script](https://github.com/hashicorp/docker-consul/blob/master/0.X/docker-entrypoint.sh) + for an example. If you are using Docker 1.13.0 or later, you can use the new `--init` + option of the `docker run` command and docker will enable an init process with + PID 1 that reaps child processes for the container. More info on [Docker docs](https://docs.docker.com/engine/reference/commandline/run/#options). + +- `reconnect_timeout` This controls how long it + takes for a failed node to be completely removed from the cluster. This defaults + to 72 hours and it is recommended that this is set to at least double the maximum + expected recoverable outage time for a node or network partition. WARNING: Setting + this time too low could cause Consul servers to be removed from quorum during an + extended node failure or partition, which could complicate recovery of the cluster. + The value is a time with a unit suffix, which can be "s", "m", "h" for seconds, + minutes, or hours. The value must be >= 8 hours. + +- `reconnect_timeout_wan` This is the WAN equivalent + of the [`reconnect_timeout`](#reconnect_timeout) parameter, which controls + how long it takes for a failed server to be completely removed from the WAN pool. + This also defaults to 72 hours, and must be >= 8 hours. + +- `recursors` This flag provides addresses of upstream DNS + servers that are used to recursively resolve queries if they are not inside the + service domain for Consul. For example, a node can use Consul directly as a DNS + server, and if the record is outside of the "consul." domain, the query will be + resolved upstream. As of Consul 1.0.1 recursors can be provided as IP addresses + or as go-sockaddr templates. IP addresses are resolved in order, and duplicates + are ignored. + +- `rpc` configuration for Consul servers. + + - `enable_streaming` ((#rpc_enable_streaming)) defaults to true. If set to false it will disable + the gRPC subscribe endpoint on a Consul Server. All + servers in all federated datacenters must have this enabled before any client can use + [`use_streaming_backend`](#use_streaming_backend). + +- `segment` - Equivalent to the [`-segment` command-line flag](#_segment). + + ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. + +- `segments` - (Server agents only) This is a list of nested objects + that specifies user-defined network segments, not including the `` segment, which is + created automatically. Review the [Network Segments documentation](/docs/enterprise/network-segments) + for more details. + + - `name` ((#segment_name)) - The name of the segment. Must be a string + between 1 and 64 characters in length. + - `bind` ((#segment_bind)) - The bind address to use for the segment's + gossip layer. Defaults to the [`-bind`](#_bind) value if not provided. + - `port` ((#segment_port)) - The port to use for the segment's gossip + layer (required). + - `advertise` ((#segment_advertise)) - The advertise address to use for + the segment's gossip layer. Defaults to the [`-advertise`](#_advertise) value + if not provided. + - `rpc_listener` ((#segment_rpc_listener)) - If true, a separate RPC + listener will be started on this segment's [`-bind`](#_bind) address on the rpc + port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) + address. Defaults to false. + +- `server` Equivalent to the [`-server` command-line flag](#_server). + +- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** + +- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica). + +- `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTL's + shorter than the specified limit. It is recommended to keep this limit at or above + the default to encourage clients to send infrequent heartbeats. Defaults to 10s. + +- `skip_leave_on_interrupt` This is similar + to [`leave_on_terminate`](#leave_on_terminate) but only affects interrupt handling. + When Consul receives an interrupt signal (such as hitting Control-C in a terminal), + Consul will gracefully leave the cluster. Setting this to `true` disables that + behavior. The default behavior for this feature varies based on whether or not + the agent is running as a client or a server (prior to Consul 0.7 the default value + was unconditionally set to `false`). On agents in client-mode, this defaults to + `false` and for agents in server-mode, this defaults to `true` (i.e. Ctrl-C on + a server will keep the server in the cluster and therefore quorum, and Ctrl-C on + a client will gracefully leave). + +- `translate_wan_addrs` If set to true, Consul + will prefer a node's configured [WAN address](#_advertise-wan) + when servicing DNS and HTTP requests for a node in a remote datacenter. This allows + the node to be reached within its own datacenter using its local address, and reached + from other datacenters using its WAN address, which is useful in hybrid setups + with mixed networks. This is disabled by default. + + Starting in Consul 0.7 and later, node addresses in responses to HTTP requests will also prefer a + node's configured [WAN address](#_advertise-wan) when querying for a node in a remote + datacenter. An [`X-Consul-Translate-Addresses`](/api#translated-addresses) header + will be present on all responses when translation is enabled to help clients know that the addresses + may be translated. The `TaggedAddresses` field in responses also have a `lan` address for clients that + need knowledge of that address, regardless of translation. + + The following endpoints translate addresses: + + - [`/v1/catalog/nodes`](/api/catalog#list-nodes) + - [`/v1/catalog/node/`](/api/catalog#retrieve-map-of-services-for-a-node) + - [`/v1/catalog/service/`](/api/catalog#list-nodes-for-service) + - [`/v1/health/service/`](/api/health#list-nodes-for-service) + - [`/v1/query//execute`](/api/query#execute-prepared-query) + +- `unix_sockets` - This allows tuning the ownership and + permissions of the Unix domain socket files created by Consul. Domain sockets are + only used if the HTTP address is configured with the `unix://` prefix. + + It is important to note that this option may have different effects on + different operating systems. Linux generally observes socket file permissions + while many BSD variants ignore permissions on the socket file itself. It is + important to test this feature on your specific distribution. This feature is + currently not functional on Windows hosts. + + The following options are valid within this construct and apply globally to all + sockets created by Consul: + + - `user` - The name or ID of the user who will own the socket file. + - `group` - The group ID ownership of the socket file. This option + currently only supports numeric IDs. + - `mode` - The permission bits to set on the file. + +- `use_streaming_backend` defaults to true. When enabled Consul client agents will use + streaming rpc, instead of the traditional blocking queries, for endpoints which support + streaming. All servers must have [`rpc.enable_streaming`](#rpc_enable_streaming) + enabled before any client can enable `use_streaming_backend`. + +- `watches` - Watches is a list of watch specifications which + allow an external process to be automatically invoked when a particular data view + is updated. See the [watch documentation](/docs/agent/watches) for more detail. + Watches can be modified when the configuration is reloaded. + +## ACL Paramters + +- `acl` ((#acl)) - This object allows a number of sub-keys to be set which + controls the ACL system. Configuring the ACL system within the ACL stanza was added + in Consul 1.4.0 + + The following sub-keys are available: + + - `enabled` ((#acl_enabled)) - Enables ACLs. + + - `policy_ttl` ((#acl_policy_ttl)) - Used to control Time-To-Live caching + of ACL policies. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL policy may be stale up to the TTL value. + + - `role_ttl` ((#acl_role_ttl)) - Used to control Time-To-Live caching + of ACL roles. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL role may be stale up to the TTL value. + + - `token_ttl` ((#acl_token_ttl)) - Used to control Time-To-Live caching + of ACL tokens. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL token may be stale up to the TTL value. + + - `down_policy` ((#acl_down_policy)) - Either "allow", "deny", "extend-cache" + or "async-cache"; "extend-cache" is the default. In the case that a policy or + token cannot be read from the [`primary_datacenter`](#primary_datacenter) or + leader node, the down policy is applied. In "allow" mode, all actions are permitted, + "deny" restricts all operations, and "extend-cache" allows any cached objects + to be used, ignoring the expiry time of the cached entry. If the request uses an + ACL that is not in the cache, "extend-cache" falls back to the behaviour of + `default_policy`. + The value "async-cache" acts the same way as "extend-cache" + but performs updates asynchronously when ACL is present but its TTL is expired, + thus, if latency is bad between the primary and secondary datacenters, latency + of operations is not impacted. + + - `default_policy` ((#acl_default_policy)) - Either "allow" or "deny"; + defaults to "allow" but this will be changed in a future major release. The default + policy controls the behavior of a token when there is no matching rule. In "allow" + mode, ACLs are a denylist: any operation not specifically prohibited is allowed. + In "deny" mode, ACLs are an allowlist: any operation not specifically + allowed is blocked. **Note**: this will not take effect until you've enabled ACLs. + + - `enable_key_list_policy` ((#acl_enable_key_list_policy)) - Boolean value, defaults to false. + When true, the `list` permission will be required on the prefix being recursively read from the KV store. + Regardless of being enabled, the full set of KV entries under the prefix will be filtered + to remove any entries that the request's ACL token does not grant at least read + permissions. This option is only available in Consul 1.0 and newer. + + - `enable_token_replication` ((#acl_enable_token_replication)) - By default + secondary Consul datacenters will perform replication of only ACL policies and + roles. Setting this configuration will will enable ACL token replication and + allow for the creation of both [local tokens](/api/acl/tokens#local) and + [auth methods](/docs/acl/auth-methods) in connected secondary datacenters. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + global tokens already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + + - `enable_token_persistence` ((#acl_enable_token_persistence)) - Either + `true` or `false`. When `true` tokens set using the API will be persisted to + disk and reloaded when an agent restarts. + + - `tokens` ((#acl_tokens)) - This object holds all of the configured + ACL tokens for the agents usage. + + - `initial_management` ((#acl_tokens_initial_management)) - This is available in + Consul 1.11 and later. In prior versions, use [`acl.tokens.master`](#acl_tokens_master). + + Only used for servers in the [`primary_datacenter`](#primary_datacenter). + This token will be created with management-level permissions if it does not exist. + It allows operators to bootstrap the ACL system with a token Secret ID that is + well-known. + + The `initial_management` token is only installed when a server acquires cluster + leadership. If you would like to install or change it, set the new value for + `initial_management` in the configuration for all servers. Once this is done, + restart the current leader to force a leader election. If the `initial_management` + token is not supplied, then the servers do not create an initial management token. + When you provide a value, it should be a UUID. To maintain backwards compatibility + and an upgrade path this restriction is not currently enforced but will be in a + future major Consul release. + + - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to + [`acl.tokens.initial_management`](#acl_tokens_initial_management).** + + - `default` ((#acl_tokens_default)) - When provided, the agent will + use this token when making requests to the Consul servers. Clients can override + this token on a per-request basis by providing the "?token" query parameter. + When not provided, the empty token, which maps to the 'anonymous' ACL token, + is used. + + - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform + internal operations. If this isn't specified, then the + [`default`](#acl_tokens_default) will be used. + + This token must at least have write access to the node name it will + register as in order to set any of the node-level information in the + catalog such as metadata, or the node's tagged addresses. + + - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11 + and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master). + + Used to access [agent endpoints](/api/agent) that require agent read or write privileges, + or node read privileges, even if Consul servers aren't present to validate any tokens. + This should only be used by operators during outages, regular ACL tokens should normally + be used by applications. + + - `agent_master` ((#acl_tokens_agent_master)) **Renamed in Consul 1.11 to + [`acl.tokens.agent_recovery`](#acl_tokens_agent_recovery).** + + - `replication` ((#acl_tokens_replication)) - The ACL token used to + authorize secondary datacenters with the primary datacenter for replication + operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + policies and roles already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + + - `managed_service_provider` ((#acl_tokens_managed_service_provider)) - An + array of ACL tokens used by Consul managed service providers for cluster operations. + + ```json + "managed_service_provider": [ + { + "accessor_id": "ed22003b-0832-4e48-ac65-31de64e5c2ff", + "secret_id": "cb6be010-bba8-4f30-a9ed-d347128dde17" + } + ] + ``` + +- `acl_datacenter` - **This field is deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.** + + This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients, + it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement + of ACLs. Please review the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more details. + +- `acl_default_policy` ((#acl_default_policy_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.default_policy`](#acl_default_policy) field instead.** + Either "allow" or "deny"; defaults to "allow". The default policy controls the + behavior of a token when there is no matching rule. In "allow" mode, ACLs are a + denylist: any operation not specifically prohibited is allowed. In "deny" mode, + ACLs are an allowlist: any operation not specifically allowed is blocked. **Note**: + this will not take effect until you've set `primary_datacenter` to enable ACL support. + +- `acl_down_policy` ((#acl_down_policy_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.down_policy`](#acl_down_policy) field instead.** Either "allow", + "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the + case that the policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter) + or leader node, the down policy is applied. In "allow" mode, all actions are permitted, + "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be + used, ignoring their TTL values. If a non-cached ACL is used, "extend-cache" acts + like "deny". The value "async-cache" acts the same way as "extend-cache" but performs + updates asynchronously when ACL is present but its TTL is expired, thus, if latency + is bad between ACL authoritative and other datacenters, latency of operations is + not impacted. + +- `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated + in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master) + field instead.** Used to access [agent endpoints](/api/agent) that + require agent read or write privileges, or node read privileges, even if Consul + servers aren't present to validate any tokens. This should only be used by operators + during outages, regular ACL tokens should normally be used by applications. This + was added in Consul 0.7.2 and is only used when [`acl_enforce_version_8`](#acl_enforce_version_8) is set to true. + +- `acl_agent_token` ((#acl_agent_token_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.tokens.agent`](#acl_tokens_agent) field instead.** Used for + clients and servers to perform internal operations. If this isn't specified, then + the [`acl_token`](#acl_token) will be used. This was added in Consul 0.7.2. + + This token must at least have write access to the node name it will register as in order to set any + of the node-level information in the catalog such as metadata, or the node's tagged addresses. + +- `acl_enforce_version_8` - **Deprecated in + Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should + occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2, + this defaults to false in versions of Consul prior to 0.8, and defaults to true + in Consul 0.8 and later. This helps ease the transition to the new ACL features + by allowing policies to be in place before enforcement begins. + +- `acl_master_token` ((#acl_master_token_legacy)) - **Deprecated in Consul + 1.4.0. See the [`acl.tokens.master`](#acl_tokens_master) field instead.** + +- `acl_replication_token` ((#acl_replication_token_legacy)) - **Deprecated + in Consul 1.4.0. See the [`acl.tokens.replication`](#acl_tokens_replication) field + instead.** Only used for servers outside the [`primary_datacenter`](#primary_datacenter) + running Consul 0.7 or later. When provided, this will enable [ACL replication](https://learn.hashicorp.com/tutorials/consul/access-control-replication-multiple-datacenters) + using this ACL replication using this token to retrieve and replicate the ACLs + to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable + ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then + set the token later using the [agent token API](/api/agent#update-acl-tokens) + on each server. If the `acl_replication_token` is set in the config, it will automatically + set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. + + If there's a partition or other outage affecting the authoritative datacenter, and the + [`acl_down_policy`](/docs/agent/options#acl_down_policy) is set to "extend-cache", tokens not + in the cache can be resolved during the outage using the replicated set of ACLs. + +- `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See + the [`acl.tokens.default`](#acl_tokens_default) field instead.** When provided, + the agent will use this token when making requests to the Consul servers. Clients + can override this token on a per-request basis by providing the "?token" query + parameter. When not provided, the empty token, which maps to the 'anonymous' ACL + policy, is used. + +- `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the + [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live + caching of ACLs. By default, this is 30 seconds. This setting has a major performance + impact: reducing it will cause more frequent refreshes while increasing it reduces + the number of refreshes. However, because the caches are not actively invalidated, + ACL policy may be stale up to the TTL value. + +- `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.** + When set on a Consul server, enables ACL replication without having to set + the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication + and then introduce the token using the [agent token API](/api/agent#update-acl-tokens) on each server. + See [`acl_replication_token`](#acl_replication_token) for more details. + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + policies and roles already present in the secondary datacenter will be lost. For + production environments, consider configuring ACL replication in your initial + datacenter bootstrapping process. + +## Advertise Address Parameters + +- `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). + +- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). + +- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_reconnect_timeout` This is a per-agent setting of the [`reconnect_timeout`](#reconnect_timeout) parameter. + This agent will advertise to all other nodes in the cluster that after this timeout, the node may be completely + removed from the cluster. This may only be set on client agents and if unset then other nodes will use the main + `reconnect_timeout` setting when determining when this node may be removed from the cluster. + +## Bootstrap Parameters + +- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](#_bootstrap). + +- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](#_bootstrap_expect). + +## Connect Parameters + - `connect` This object allows setting options for the Connect feature. The following sub-keys are available: @@ -799,28 +1104,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr corresponding to the NIST P-\* curves of the same name. - `private_key_type = rsa`: `2048, 4096` -- `datacenter` Equivalent to the [`-datacenter` command-line flag](#_datacenter). - -- `data_dir` Equivalent to the [`-data-dir` command-line flag](#_data_dir). - -- `disable_anonymous_signature` Disables providing an anonymous - signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). - -- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](#_disable_host_node_id). - -- `disable_http_unprintable_char_filter` Defaults to false. Consul 1.0.3 fixed a potential security vulnerability where malicious users could craft KV keys with unprintable chars that would confuse operators using the CLI or UI into taking wrong actions. Users who had data written in older versions of Consul that did not have this restriction will be unable to delete those values by default in 1.0.3 or later. This setting enables those users to **temporarily** disable the filter such that delete operations can work on those keys again to get back to a healthy state. It is strongly recommended that this filter is not disabled permanently as it exposes the original security vulnerability. - -- `disable_remote_exec` Disables support for remote execution. When set to true, the agent will ignore - any incoming remote exec requests. In versions of Consul prior to 0.8, this defaulted - to false. In Consul 0.8 the default was changed to true, to make remote exec opt-in - instead of opt-out. - -- `disable_update_check` Disables automatic checking for security bulletins and new version releases. This is disabled in Consul Enterprise. - -- `discard_check_output` Discards the output of health checks before storing them. This reduces the number of writes to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ... - -- `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is - equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server can handle the service discovery request. If a Consul server is behind the leader by more than `discovery_max_stale`, the query will be re-evaluated on the leader to get more up-to-date results. Consul agents also add a new `X-Consul-Effective-Consistency` response header which indicates if the agent did a stale read. `discover-max-stale` was introduced in Consul 1.0.7 as a way for Consul operators to force stale requests from clients at the agent level, and defaults to zero which matches default consistency behavior in earlier Consul versions. +## DNS and Domain Parameters - `dns_config` This object allows a number of sub-keys to be set which can tune how DNS queries are serviced. Check the tutorial on [DNS caching](https://learn.hashicorp.com/tutorials/consul/dns-caching) for more detail. @@ -946,38 +1230,46 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `domain` Equivalent to the [`-domain` command-line flag](#_domain). -- `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.** - When set on a Consul server, enables ACL replication without having to set - the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication - and then introduce the token using the [agent token API](/api/agent#update-acl-tokens) on each server. - See [`acl_replication_token`](#acl_replication_token) for more details. +## Encryption Parameters - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, - policies and roles already present in the secondary datacenter will be lost. For - production environments, consider configuring ACL replication in your initial - datacenter bootstrapping process. +- `auto_encrypt` This object allows setting options for the `auto_encrypt` feature. -- `enable_agent_tls_for_checks` When set, uses a subset of the agent's TLS configuration (`key_file`, - `cert_file`, `ca_file`, `ca_path`, and `server_name`) to set up the client for HTTP or gRPC health checks. This allows services requiring 2-way TLS to be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false. + The following sub-keys are available: -- `enable_central_service_config` When set, the Consul agent will look for any - [centralized service configuration](/docs/agent/config-entries) - that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. - This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. + - `allow_tls` (Defaults to `false`) This option enables + `auto_encrypt` on the servers and allows them to automatically distribute certificates + from the Connect CA to the clients. If enabled, the server can accept incoming + connections from both the built-in CA and the Connect CA, as well as their certificates. + Note, the server will only present the built-in CA and certificate, which the + client can verify using the CA it received from `auto_encrypt` endpoint. If disabled, + a client configured with `auto_encrypt.tls` will be unable to start. -- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to - access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. + - `tls` (Defaults to `false`) Allows the client to request the + Connect CA and certificates from the servers, for encrypting RPC communication. + The client will make the request to any servers listed in the `-join` or `-retry-join` + option. This requires that every server to have `auto_encrypt.allow_tls` enabled. + When both `auto_encrypt` options are used, it allows clients to receive certificates + that are generated on the servers. If the `-server-port` is not the default one, + it has to be provided to the client as well. Usually this is discovered through + LAN gossip, but `auto_encrypt` provision happens before the information can be + distributed through gossip. The most secure `auto_encrypt` setup is when the + client is provided with the built-in CA, `verify_server_hostname` is turned on, + and when an ACL token with `node.write` permissions is setup. It is also possible + to use `auto_encrypt` with a CA and ACL, but without `verify_server_hostname`, + or only with a ACL enabled, or only with CA and `verify_server_hostname`, or + only with a CA, or finally without a CA and without ACL enabled. In any case, + the communication to the `auto_encrypt` endpoint is always TLS encrypted. -- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](#_enable_script_checks). + ~> **Warning:** Enabling `auto_encrypt.tls` conflicts with the [`auto_config`](#auto_config) feature. + Only one option may be specified. - ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. + - `dns_san` (Defaults to `[]`) When this option is being + used, the certificates requested by `auto_encrypt` from the server have these + `dns_san` set as DNS SAN. - ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) - for more details. - -- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](#_enable_local_script_checks). - -- `enable_syslog` Equivalent to the [`-syslog` command-line flag](#_syslog). + - `ip_san` (Defaults to `[]`) When this option is being used, + the certificates requested by `auto_encrypt` from the server have these `ip_san` + set as IP SAN. - `encrypt` Equivalent to the [`-encrypt` command-line flag](#_encrypt). @@ -993,12 +1285,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr See [this section](/docs/agent/encryption#configuring-gossip-encryption-on-an-existing-cluster) for more information. Defaults to true. -- `disable_keyring_file` - Equivalent to the - [`-disable-keyring-file` command-line flag](#_disable_keyring_file). - -- `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). - When network coordinates are disabled the `near` query param will not work to sort the nodes, - and the [`consul rtt`](/commands/rtt) command will not be able to provide round trip time between nodes. +## Gossip Parameters - `gossip_lan` - **(Advanced)** This object contains a number of sub-keys which can be set to tune the LAN gossip communications. These @@ -1084,59 +1371,27 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr part of the cluster before declaring it dead, giving that suspect node more time to refute if it is indeed still alive. The default is 6. -- `http_config` This object allows setting options for the HTTP API and UI. +## Join Parameters - The following sub-keys are available: +- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](#_rejoin). - - `block_endpoints` - This object is a list of HTTP API endpoint prefixes to block on the agent, and - defaults to an empty list, meaning all endpoints are enabled. Any endpoint that - has a common prefix with one of the entries on this list will be blocked and - will return a 403 response code when accessed. For example, to block all of the - V1 ACL endpoints, set this to `["/v1/acl"]`, which will block `/v1/acl/create`, - `/v1/acl/update`, and the other ACL endpoints that begin with `/v1/acl`. This - only works with API endpoints, not `/ui` or `/debug`, those must be disabled - with their respective configuration options. Any CLI commands that use disabled - endpoints will no longer function as well. For more general access control, Consul's - [ACL system](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) - should be used, but this option is useful for removing access to HTTP API endpoints - completely, or on specific agents. This is available in Consul 0.9.0 and later. +- `retry_join` - Equivalent to the [`-retry-join`](#retry-join) command-line flag. - - `response_headers` This object allows adding headers to the HTTP API and UI responses. For example, the following config can be used to enable [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) on the HTTP API endpoints: +- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](#_retry_interval). - ```json - { - "http_config": { - "response_headers": { - "Access-Control-Allow-Origin": "*" - } - } - } - ``` +- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. - - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]` +- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](#_retry_interval_wan). - - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/api/features/caching) to answer the request. Even when the url parameter is provided. +- `start_join` An array of strings specifying addresses + of nodes to [`-join`](#_join) upon startup. Note that using + `retry_join` could be more appropriate to help mitigate + node startup race conditions when automating a Consul cluster deployment. - - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte. +- `start_join_wan` An array of strings specifying addresses + of WAN nodes to [`-join-wan`](#_join_wan) upon startup. -- `leave_on_terminate` If enabled, when the agent receives a TERM signal, it will send a `Leave` message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server (prior to Consul 0.7 the default value was unconditionally set to `false`). On agents in client-mode, this defaults to `true` and for agents in server-mode, this defaults to `false`. - -- `license_path` This specifies the path to a file that contains the Consul Enterprise license. Alternatively the license may also be specified in either the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. See the [licensing documentation](/docs/enterprise/license/overview) for more information about Consul Enterprise license management. Added in versions 1.10.0, 1.9.7 and 1.8.13. Prior to version 1.10.0 the value may be set for all agents to facilitate forwards compatibility with 1.10 but will only actually be used by client agents. - -- `limits` Available in Consul 0.9.3 and later, this is a nested - object that configures limits that are enforced by the agent. Prior to Consul 1.5.2, - this only applied to agents in client mode, not Consul servers. The following parameters - are available: - - - `http_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single client IP address is allowed to open to the agent's HTTP(S) server. This affects the HTTP(S) servers in both client and server agents. Default value is `200`. - - `https_handshake_timeout` - Configures the limit for how long the HTTPS server in both client and server agents will wait for a client to complete a TLS handshake. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). Default value is `5s`. - - `rpc_handshake_timeout` - Configures the limit for how long servers will wait after a client TCP connection is established before they complete the connection handshake. When TLS is used, the same timeout applies to the TLS handshake separately from the initial protocol negotiation. All Consul clients should perform this immediately on establishing a new connection. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). When `verify_incoming` is true on servers, this limits how long the connection socket and associated goroutines will be held open before the client successfully authenticates. Default value is `5s`. - - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though. - - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting. - - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate. - - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. - - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. +## Log Parameters - `log_file` Equivalent to the [`-log-file` command-line flag](#_log_file). @@ -1150,9 +1405,13 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `log_json` Equivalent to the [`-log-json` command-line flag](#_log_json). -- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). +- `enable_syslog` Equivalent to the [`-syslog` command-line flag](#_syslog). -- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](#_max_query_time). +- `syslog_facility` When [`enable_syslog`](#enable_syslog) + is provided, this controls to which facility messages are sent. By default, `LOCAL0` + will be used. + +## Node Parameters - `node_id` Equivalent to the [`-node-id` command-line flag](#_node_id). @@ -1168,97 +1427,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr } ``` -- `partition` - This flag is used to set - the name of the admin partition the agent belongs to. An agent can only join - and communicate with other agents within its admin partition. Review the - [Admin Partitions documentation](/docs/enterprise/admin-partitions) for more - details. By default, this is an empty string, which is the `default` admin - partition. This cannot be set on a server agent. +- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](#_disable_host_node_id). - ~> **Warning:** The `partition` option cannot be used either the - [`segment`](#segment-2) option or [`-segment`](#_segment) flag. - -- `performance` Available in Consul 0.7 and later, this is a nested object that allows tuning the performance of different subsystems in Consul. See the [Server Performance](/docs/install/performance) documentation for more details. The following parameters are available: - - - `leave_drain_time` - A duration that a server will dwell during a graceful leave in order to allow requests to be retried against other Consul servers. Under normal circumstances, this can prevent clients from experiencing "no leader" errors when performing a rolling update of the Consul servers. This was added in Consul 1.0. Must be a duration value such as 10s. Defaults to 5s. - - - `raft_multiplier` - An integer multiplier used by Consul servers to scale key Raft timing parameters. Omitting this value or setting it to 0 uses default timing described below. Lower values are used to tighten timing and increase sensitivity while higher values relax timings and reduce sensitivity. Tuning this affects the time it takes Consul to detect leader failures and to perform leader elections, at the expense of requiring more network and CPU resources for better performance. - - By default, Consul will use a lower-performance timing that's suitable - for [minimal Consul servers](/docs/install/performance#minimum), currently equivalent - to setting this to a value of 5 (this default may be changed in future versions of Consul, - depending if the target minimum server profile changes). Setting this to a value of 1 will - configure Raft to its highest-performance mode, equivalent to the default timing of Consul - prior to 0.7, and is recommended for [production Consul servers](/docs/install/performance#production). - - See the note on [last contact](/docs/install/performance#production-server-requirements) timing for more - details on tuning this parameter. The maximum allowed value is 10. - - - `rpc_hold_timeout` - A duration that a client - or server will retry internal RPC requests during leader elections. Under normal - circumstances, this can prevent clients from experiencing "no leader" errors. - This was added in Consul 1.0. Must be a duration value such as 10s. Defaults - to 7s. - -- `pid_file` Equivalent to the [`-pid-file` command line flag](#_pid_file). - -- `ports` This is a nested object that allows setting the bind ports for the following keys: - - - `dns` ((#dns_port)) - The DNS server, -1 to disable. Default 8600. - TCP and UDP. - - `http` ((#http_port)) - The HTTP API, -1 to disable. Default 8500. - TCP only. - - `https` ((#https_port)) - The HTTPS API, -1 to disable. Default -1 - (disabled). **We recommend using `8501`** for `https` by convention as some tooling - will work automatically with this. - - `grpc` ((#grpc_port)) - The gRPC API, -1 to disable. Default -1 (disabled). - **We recommend using `8502`** for `grpc` by convention as some tooling will work - automatically with this. This is set to `8502` by default when the agent runs - in `-dev` mode. Currently gRPC is only used to expose Envoy xDS API to Envoy - proxies. - - `serf_lan` ((#serf_lan_port)) - The Serf LAN port. Default 8301. TCP - and UDP. Equivalent to the [`-serf-lan-port` command line flag](#_serf_lan_port). - - `serf_wan` ((#serf_wan_port)) - The Serf WAN port. Default 8302. - Equivalent to the [`-serf-wan-port` command line flag](#_serf_wan_port). Set - to -1 to disable. **Note**: this will disable WAN federation which is not recommended. - Various catalog and WAN related endpoints will return errors or empty results. - TCP and UDP. - - `server` ((#server_rpc_port)) - Server RPC address. Default 8300. TCP - only. - - `sidecar_min_port` ((#sidecar_min_port)) - Inclusive minimum port number - to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). - Default 21000. Set to `0` to disable automatic port assignment. - - `sidecar_max_port` ((#sidecar_max_port)) - Inclusive maximum port number - to use for automatically assigned [sidecar service registrations](/docs/connect/registration/sidecar-service). - Default 21255. Set to `0` to disable automatic port assignment. - - `expose_min_port` ((#expose_min_port)) - Inclusive minimum port number - to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). - Default 21500. Set to `0` to disable automatic port assignment. - - `expose_max_port` ((#expose_max_port)) - Inclusive maximum port number - to use for automatically assigned [exposed check listeners](/docs/connect/registration/service-registration#expose-paths-configuration-reference). - Default 21755. Set to `0` to disable automatic port assignment. - -- `primary_datacenter` - This designates the datacenter - which is authoritative for ACL information, intentions and is the root Certificate - Authority for Connect. It must be provided to enable ACLs. All servers and datacenters - must agree on the primary datacenter. Setting it on the servers is all you need - for cluster-level enforcement, but for the APIs to forward properly from the clients, - it must be set on them too. In Consul 0.8 and later, this also enables agent-level - enforcement of ACLs. - -- `primary_gateways` Equivalent to the [`-primary-gateway` - command-line flag](#_primary_gateway). Takes a list of addresses to use as the - mesh gateways for the primary datacenter when authoritative replicated catalog - data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) - until at least one primary mesh gateway is discovered. This was added in Consul - 1.8.0. - -- `primary_gateways_interval` Time to wait - between [`primary_gateways`](#primary_gateways) discovery attempts. Defaults to - 30s. This was added in Consul 1.8.0. - -- `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line - flag](#_protocol). +## Raft Parameters - `raft_boltdb` ((#raft_boltdb)) This is a nested object that allows configuring options for Raft's BoltDB based log store. @@ -1318,107 +1489,18 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr server a `SIGHUP` to allow recovery without downtime when followers can't keep up. -- `reap` This controls Consul's automatic reaping of child processes, - which is useful if Consul is running as PID 1 in a Docker container. If this isn't - specified, then Consul will automatically reap child processes if it detects it - is running as PID 1. If this is set to true or false, then it controls reaping - regardless of Consul's PID (forces reaping on or off, respectively). This option - was removed in Consul 0.7.1. For later versions of Consul, you will need to reap - processes using a wrapper, please see the [Consul Docker image entry point script](https://github.com/hashicorp/docker-consul/blob/master/0.X/docker-entrypoint.sh) - for an example. If you are using Docker 1.13.0 or later, you can use the new `--init` - option of the `docker run` command and docker will enable an init process with - PID 1 that reaps child processes for the container. More info on [Docker docs](https://docs.docker.com/engine/reference/commandline/run/#options). +## Serf Parameters -- `reconnect_timeout` This controls how long it - takes for a failed node to be completely removed from the cluster. This defaults - to 72 hours and it is recommended that this is set to at least double the maximum - expected recoverable outage time for a node or network partition. WARNING: Setting - this time too low could cause Consul servers to be removed from quorum during an - extended node failure or partition, which could complicate recovery of the cluster. - The value is a time with a unit suffix, which can be "s", "m", "h" for seconds, - minutes, or hours. The value must be >= 8 hours. +- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). + This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). -- `reconnect_timeout_wan` This is the WAN equivalent - of the [`reconnect_timeout`](#reconnect_timeout) parameter, which controls - how long it takes for a failed server to be completely removed from the WAN pool. - This also defaults to 72 hours, and must be >= 8 hours. +- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). -- `recursors` This flag provides addresses of upstream DNS - servers that are used to recursively resolve queries if they are not inside the - service domain for Consul. For example, a node can use Consul directly as a DNS - server, and if the record is outside of the "consul." domain, the query will be - resolved upstream. As of Consul 1.0.1 recursors can be provided as IP addresses - or as go-sockaddr templates. IP addresses are resolved in order, and duplicates - are ignored. +- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](#_serf_wan_bind). -- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](#_rejoin). +- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](#_serf_wan_allowed_cidrs). -- `retry_join` - Equivalent to the [`-retry-join`](#retry-join) command-line flag. - -- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](#_retry_interval). - -- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. - -- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](#_retry_interval_wan). - -- `rpc` configuration for Consul servers. - - - `enable_streaming` ((#rpc_enable_streaming)) defaults to true. If set to false it will disable - the gRPC subscribe endpoint on a Consul Server. All - servers in all federated datacenters must have this enabled before any client can use - [`use_streaming_backend`](#use_streaming_backend). - -- `segment` - Equivalent to the [`-segment` command-line flag](#_segment). - - ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. - -- `segments` - (Server agents only) This is a list of nested objects - that specifies user-defined network segments, not including the `` segment, which is - created automatically. Review the [Network Segments documentation](/docs/enterprise/network-segments) - for more details. - - - `name` ((#segment_name)) - The name of the segment. Must be a string - between 1 and 64 characters in length. - - `bind` ((#segment_bind)) - The bind address to use for the segment's - gossip layer. Defaults to the [`-bind`](#_bind) value if not provided. - - `port` ((#segment_port)) - The port to use for the segment's gossip - layer (required). - - `advertise` ((#segment_advertise)) - The advertise address to use for - the segment's gossip layer. Defaults to the [`-advertise`](#_advertise) value - if not provided. - - `rpc_listener` ((#segment_rpc_listener)) - If true, a separate RPC - listener will be started on this segment's [`-bind`](#_bind) address on the rpc - port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) - address. Defaults to false. - -- `server` Equivalent to the [`-server` command-line flag](#_server). - -- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** - -- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica). - -- `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTL's - shorter than the specified limit. It is recommended to keep this limit at or above - the default to encourage clients to send infrequent heartbeats. Defaults to 10s. - -- `skip_leave_on_interrupt` This is similar - to [`leave_on_terminate`](#leave_on_terminate) but only affects interrupt handling. - When Consul receives an interrupt signal (such as hitting Control-C in a terminal), - Consul will gracefully leave the cluster. Setting this to `true` disables that - behavior. The default behavior for this feature varies based on whether or not - the agent is running as a client or a server (prior to Consul 0.7 the default value - was unconditionally set to `false`). On agents in client-mode, this defaults to - `false` and for agents in server-mode, this defaults to `true` (i.e. Ctrl-C on - a server will keep the server in the cluster and therefore quorum, and Ctrl-C on - a client will gracefully leave). - -- `start_join` An array of strings specifying addresses - of nodes to [`-join`](#_join) upon startup. Note that using - `retry_join` could be more appropriate to help mitigate - node startup race conditions when automating a Consul cluster deployment. - -- `start_join_wan` An array of strings specifying addresses - of WAN nodes to [`-join-wan`](#_join_wan) upon startup. +## Telemetry Paramters - `telemetry` This is a nested object that configures where Consul sends its runtime telemetry, and contains the following keys: @@ -1552,31 +1634,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr can be used to capture runtime information. This streams via TCP and can only be used with statsite. -- `syslog_facility` When [`enable_syslog`](#enable_syslog) - is provided, this controls to which facility messages are sent. By default, `LOCAL0` - will be used. - -- `translate_wan_addrs` If set to true, Consul - will prefer a node's configured [WAN address](#_advertise-wan) - when servicing DNS and HTTP requests for a node in a remote datacenter. This allows - the node to be reached within its own datacenter using its local address, and reached - from other datacenters using its WAN address, which is useful in hybrid setups - with mixed networks. This is disabled by default. - - Starting in Consul 0.7 and later, node addresses in responses to HTTP requests will also prefer a - node's configured [WAN address](#_advertise-wan) when querying for a node in a remote - datacenter. An [`X-Consul-Translate-Addresses`](/api#translated-addresses) header - will be present on all responses when translation is enabled to help clients know that the addresses - may be translated. The `TaggedAddresses` field in responses also have a `lan` address for clients that - need knowledge of that address, regardless of translation. - - The following endpoints translate addresses: - - - [`/v1/catalog/nodes`](/api/catalog#list-nodes) - - [`/v1/catalog/node/`](/api/catalog#retrieve-map-of-services-for-a-node) - - [`/v1/catalog/service/`](/api/catalog#list-nodes-for-service) - - [`/v1/health/service/`](/api/health#list-nodes-for-service) - - [`/v1/query//execute`](/api/query#execute-prepared-query) +## UI Parameters - `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** Equivalent to the [`-ui`](#_ui) command-line flag. @@ -1709,34 +1767,6 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr Specifying this configuration key will enable the web UI. There is no need to specify both ui-dir and ui. Specifying both will result in an error. -- `unix_sockets` - This allows tuning the ownership and - permissions of the Unix domain socket files created by Consul. Domain sockets are - only used if the HTTP address is configured with the `unix://` prefix. - - It is important to note that this option may have different effects on - different operating systems. Linux generally observes socket file permissions - while many BSD variants ignore permissions on the socket file itself. It is - important to test this feature on your specific distribution. This feature is - currently not functional on Windows hosts. - - The following options are valid within this construct and apply globally to all - sockets created by Consul: - - - `user` - The name or ID of the user who will own the socket file. - - `group` - The group ID ownership of the socket file. This option - currently only supports numeric IDs. - - `mode` - The permission bits to set on the file. - -- `use_streaming_backend` defaults to true. When enabled Consul client agents will use - streaming rpc, instead of the traditional blocking queries, for endpoints which support - streaming. All servers must have [`rpc.enable_streaming`](#rpc_enable_streaming) - enabled before any client can enable `use_streaming_backend`. - -- `watches` - Watches is a list of watch specifications which - allow an external process to be automatically invoked when a particular data view - is updated. See the [watch documentation](/docs/agent/watches) for more detail. - Watches can be modified when the configuration is reloaded. - ## TLS Configuration Reference This section documents all of the configuration settings that apply to Agent TLS. Agent From 9f693afcbac840276b86a9fbdb4c353b636fba53 Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 12:06:21 -0800 Subject: [PATCH 121/785] docs: fix agent config links --- .../docs/agent/config/agent-config-cli.mdx | 14 +-- .../docs/agent/config/agent-config-files.mdx | 94 +++++++++---------- website/content/docs/agent/config/index.mdx | 4 +- 3 files changed, 56 insertions(+), 56 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx index 8daeebcc1..82d660803 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/agent-config-cli.mdx @@ -26,7 +26,7 @@ information. limit of 4k for maximum size of checks, this is a positive value. By limiting this size, it allows to put less pressure on Consul servers when many checks are having a very large output in their checks. In order to completely disable check output - capture, it is possible to use [`discard_check_output`](#discard_check_output). + capture, it is possible to use [`discard_check_output`](/docs/agent/config/agent-config-files#discard_check_output). - `-client` ((#\_client)) - The address to which Consul will bind client interfaces, including the HTTP and DNS servers. By default, this is "127.0.0.1", @@ -122,7 +122,7 @@ information. - `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version of the Raft consensus protocol used for server communications. This must be set - to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrade-specific#raft-protocol-version-compatibility) for more details. + to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](/docs/agent/config/agent-config-files#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrade-specific#raft-protocol-version-compatibility) for more details. - `-segment` ((#\_segment)) - This flag is used to set the name of the network segment the agent belongs to. An agent can only join and @@ -146,13 +146,13 @@ information. - `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used to change the address that we advertise to server nodes joining through the WAN. - This can also be set on client agents when used in combination with the [`translate_wan_addrs`](#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address + This can also be set on client agents when used in combination with the [`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address is advertised. However, in some cases all members of all datacenters cannot be on the same physical or virtual network, especially on hybrid setups mixing cloud and private datacenters. This flag enables server nodes gossiping through the public network for the WAN while using private VLANs for gossiping to each other and their client agents, and it allows client agents to be reached at this address when being - accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] template that is resolved at runtime. ## Address Bind Options @@ -160,10 +160,10 @@ information. - `-bind` ((#\_bind)) - The address that should be bound to for internal cluster communications. This is an IP address that should be reachable by all other nodes in the cluster. By default, this is "0.0.0.0", meaning Consul will bind to - all addresses on the local machine and will [advertise](/docs/agent/options#_advertise) + all addresses on the local machine and will [advertise](#_advertise) the private IPv4 address to the rest of the cluster. If there are multiple private IPv4 addresses available, Consul will exit with an error at startup. If you specify - `"[::]"`, Consul will [advertise](/docs/agent/options#_advertise) the public + `"[::]"`, Consul will [advertise](#_advertise) the public IPv6 address. If there are multiple public IPv6 addresses available, Consul will exit with an error at startup. Consul uses both TCP and UDP and the same port for both. If you have any firewalls, be sure to allow both protocols. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] @@ -290,7 +290,7 @@ information. If Consul is running on the non-default Serf LAN port, the port must be specified in the join address, or configured as the agent's default Serf port - using the [`ports.serf_lan`](#serf_lan_port) configuration option or + using the [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port) configuration option or [`-serf-lan-port`](#_serf_lan_port) command line flag. If using network segments (Enterprise), see [additional documentation on diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index f22a1eb69..40dae7878 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -80,7 +80,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `https` - The HTTPS API. Defaults to `client_addr` - `grpc` - The gRPC API. Defaults to `client_addr` -- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](#_alt_domain) +- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](/docs/agent/config/agent-config-cli#_alt_domain) - `audit` - Added in Consul 1.8, the audit object allow users to enable auditing and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](https://learn.hashicorp.com/tutorials/consul/audit-logging). @@ -207,7 +207,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in the local datacenter to use for the initial RPC. These addresses support - [Cloud Auto-Joining](#cloud-auto-joining) and can optionally include a port to + [Cloud Auto-Joining](/docs/agent/config/agent-config-cli#cloud-auto-joining) and can optionally include a port to use when making the outbound connection. If not port is provided the `server_port` will be used. @@ -310,7 +310,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `partition` - The admin partition name the client is requesting. -- `bind_addr` Equivalent to the [`-bind` command-line flag](#_bind). +- `bind_addr` Equivalent to the [`-bind` command-line flag](/docs/agent/config/agent-config-cli#_bind). This parameter can be set to a go-sockaddr template that resolves to a single address. Special characters such as backslashes `\` or double quotes `"` @@ -358,7 +358,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr changes state, the new state and associated output is synchronized immediately. To disable this behavior, set the value to "0s". -- `client_addr` Equivalent to the [`-client` command-line flag](#_client). +- `client_addr` Equivalent to the [`-client` command-line flag](/docs/agent/config/agent-config-cli#_client). - `config_entries` This object allows setting options for centralized config entries. @@ -372,9 +372,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr See the [configuration entry docs](/docs/agent/config-entries) for more details about the contents of each entry. -- `datacenter` Equivalent to the [`-datacenter` command-line flag](#_datacenter). +- `datacenter` Equivalent to the [`-datacenter` command-line flag](/docs/agent/config/agent-config-cli#_datacenter). -- `data_dir` Equivalent to the [`-data-dir` command-line flag](#_data_dir). +- `data_dir` Equivalent to the [`-data-dir` command-line flag](/docs/agent/config/agent-config-cli#_data_dir). - `disable_anonymous_signature` Disables providing an anonymous signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). @@ -404,17 +404,17 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `enable_debug` When set, enables some additional debugging features. Currently, this is only used to access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. -- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](#_enable_script_checks). +- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/docs/agent/config/agent-config-cli#_enable_script_checks). ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) for more details. -- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](#_enable_local_script_checks). +- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](/docs/agent/config/agent-config-cli#_enable_local_script_checks). - `disable_keyring_file` - Equivalent to the - [`-disable-keyring-file` command-line flag](#_disable_keyring_file). + [`-disable-keyring-file` command-line flag](/docs/agent/config/agent-config-cli#_disable_keyring_file). - `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). When network coordinates are disabled the `near` query param will not work to sort the nodes, @@ -474,9 +474,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. -- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](#_default_query_time). +- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](/docs/agent/config/agent-config-cli#_default_query_time). -- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](#_max_query_time). +- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](/docs/agent/config/agent-config-cli#_max_query_time). - `partition` - This flag is used to set the name of the admin partition the agent belongs to. An agent can only join @@ -557,7 +557,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr enforcement of ACLs. - `primary_gateways` Equivalent to the [`-primary-gateway` - command-line flag](#_primary_gateway). Takes a list of addresses to use as the + command-line flag](/docs/agent/config/agent-config-cli#_primary_gateway). Takes a list of addresses to use as the mesh gateways for the primary datacenter when authoritative replicated catalog data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) until at least one primary mesh gateway is discovered. This was added in Consul @@ -568,7 +568,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr 30s. This was added in Consul 1.8.0. - `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line - flag](#_protocol). + flag](/docs/agent/config/agent-config-cli#_protocol). - `reap` This controls Consul's automatic reaping of child processes, which is useful if Consul is running as PID 1 in a Docker container. If this isn't @@ -610,7 +610,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr servers in all federated datacenters must have this enabled before any client can use [`use_streaming_backend`](#use_streaming_backend). -- `segment` - Equivalent to the [`-segment` command-line flag](#_segment). +- `segment` - Equivalent to the [`-segment` command-line flag](/docs/agent/config/agent-config-cli#_segment). ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. @@ -633,11 +633,11 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) address. Defaults to false. -- `server` Equivalent to the [`-server` command-line flag](#_server). +- `server` Equivalent to the [`-server` command-line flag](/docs/agent/config/agent-config-cli#_server). - `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** -- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica). +- `read_replica` - Equivalent to the [`-read-replica` command-line flag](/docs/agent/config/agent-config-cli#_read_replica). - `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTL's shorter than the specified limit. It is recommended to keep this limit at or above @@ -935,13 +935,13 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Advertise Address Parameters -- `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). +- `advertise_addr` Equivalent to the [`-advertise` command-line flag](/docs/agent/config/agent-config-cli#_advertise). - `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. -- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). +- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](/docs/agent/config/agent-config-cli#_advertise-wan). - `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. @@ -954,9 +954,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Bootstrap Parameters -- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](#_bootstrap). +- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](/docs/agent/config/agent-config-cli#_bootstrap). -- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](#_bootstrap_expect). +- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](/docs/agent/config/agent-config-cli#_bootstrap_expect). ## Connect Parameters @@ -1228,7 +1228,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr versions and will assume the label is the datacenter. See: [this section](/docs/discovery/dns#namespaced-services) for more details. -- `domain` Equivalent to the [`-domain` command-line flag](#_domain). +- `domain` Equivalent to the [`-domain` command-line flag](/docs/agent/config/agent-config-cli#_domain). ## Encryption Parameters @@ -1271,7 +1271,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr the certificates requested by `auto_encrypt` from the server have these `ip_san` set as IP SAN. -- `encrypt` Equivalent to the [`-encrypt` command-line flag](#_encrypt). +- `encrypt` Equivalent to the [`-encrypt` command-line flag](/docs/agent/config/agent-config-cli#_encrypt). - `encrypt_verify_incoming` - This is an optional parameter that can be used to disable enforcing encryption for incoming gossip @@ -1373,15 +1373,15 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Join Parameters -- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](#_rejoin). +- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](/docs/agent/config/agent-config-cli#_rejoin). -- `retry_join` - Equivalent to the [`-retry-join`](#retry-join) command-line flag. +- `retry_join` - Equivalent to the [`-retry-join`](/docs/agent/config/agent-config-cli#retry-join) command-line flag. -- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](#_retry_interval). +- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](/docs/agent/config/agent-config-cli#_retry_interval). -- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. +- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](/docs/agent/config/agent-config-cli#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. -- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](#_retry_interval_wan). +- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](/docs/agent/config/agent-config-cli#_retry_interval_wan). - `start_join` An array of strings specifying addresses of nodes to [`-join`](#_join) upon startup. Note that using @@ -1393,19 +1393,19 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Log Parameters -- `log_file` Equivalent to the [`-log-file` command-line flag](#_log_file). +- `log_file` Equivalent to the [`-log-file` command-line flag](/docs/agent/config/agent-config-cli#_log_file). -- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](#_log_rotate_duration). +- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_duration). -- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](#_log_rotate_bytes). +- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_bytes). -- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](#_log_rotate_max_files). +- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_max_files). -- `log_level` Equivalent to the [`-log-level` command-line flag](#_log_level). +- `log_level` Equivalent to the [`-log-level` command-line flag](/docs/agent/config/agent-config-cli#_log_level). -- `log_json` Equivalent to the [`-log-json` command-line flag](#_log_json). +- `log_json` Equivalent to the [`-log-json` command-line flag](/docs/agent/config/agent-config-cli#_log_json). -- `enable_syslog` Equivalent to the [`-syslog` command-line flag](#_syslog). +- `enable_syslog` Equivalent to the [`-syslog` command-line flag](/docs/agent/config/agent-config-cli#_syslog). - `syslog_facility` When [`enable_syslog`](#enable_syslog) is provided, this controls to which facility messages are sent. By default, `LOCAL0` @@ -1413,11 +1413,11 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Node Parameters -- `node_id` Equivalent to the [`-node-id` command-line flag](#_node_id). +- `node_id` Equivalent to the [`-node-id` command-line flag](/docs/agent/config/agent-config-cli#_node_id). -- `node_name` Equivalent to the [`-node` command-line flag](#_node). +- `node_name` Equivalent to the [`-node` command-line flag](/docs/agent/config/agent-config-cli#_node). -- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](#_node_meta) for more information. +- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](/docs/agent/config/agent-config-cli#_node_meta) for more information. ```json { @@ -1427,7 +1427,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr } ``` -- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](#_disable_host_node_id). +- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](/docs/agent/config/agent-config-cli#_disable_host_node_id). ## Raft Parameters @@ -1442,7 +1442,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `raft_protocol` ((#raft_protocol)) Equivalent to the [`-raft-protocol` - command-line flag](#_raft_protocol). + command-line flag](/docs/agent/config/agent-config-cli#_raft_protocol). - `raft_snapshot_threshold` ((#\_raft_snapshot_threshold)) This controls the minimum number of raft commit entries between snapshots that are saved to @@ -1491,14 +1491,14 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Serf Parameters -- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). +- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](/docs/agent/config/agent-config-cli#_serf_lan_bind). This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). -- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). +- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](/docs/agent/config/agent-config-cli#_serf_lan_allowed_cidrs). -- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](#_serf_wan_bind). +- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](/docs/agent/config/agent-config-cli#_serf_wan_bind). -- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](#_serf_wan_allowed_cidrs). +- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](/docs/agent/config/agent-config-cli#_serf_wan_allowed_cidrs). ## Telemetry Paramters @@ -1637,7 +1637,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## UI Parameters - `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** - Equivalent to the [`-ui`](#_ui) command-line flag. + Equivalent to the [`-ui`](/docs/agent/config/agent-config-cli#_ui) command-line flag. - `ui_config` - This object allows a number of sub-keys to be set which controls the display or features available in the UI. Configuring the UI with this @@ -1648,12 +1648,12 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `enabled` ((#ui_config_enabled)) - This enables the service of the web UI from this agent. Boolean value, defaults to false. In `-dev` mode this defaults to true. Replaces `ui` from before 1.9.0. Equivalent to the - [`-ui`](#_ui) command-line flag. + [`-ui`](/docs/agent/config/agent-config-cli#_ui) command-line flag. - `dir` ((#ui_config_dir)) - This specifies that the web UI should be served from an external dir rather than the build in one. This allows for customization or development. Replaces `ui_dir` from before 1.9.0. - Equivalent to the [`-ui-dir`](#_ui_dir) command-line flag. + Equivalent to the [`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) command-line flag. - `content_path` ((#ui_config_content_path)) - This specifies the HTTP path that the web UI should be served from. Defaults to `/ui/`. Equivalent to the @@ -1762,7 +1762,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `{{Datacenter}}` - Replaced with the current service's datacenter. - `ui_dir` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.dir`](#ui_config_dir) field instead.** - Equivalent to the [`-ui-dir`](#_ui_dir) command-line + Equivalent to the [`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) command-line flag. This configuration key is not required as of Consul version 0.7.0 and later. Specifying this configuration key will enable the web UI. There is no need to specify both ui-dir and ui. Specifying both will result in an error. diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx index 362cf36f6..44ced8493 100644 --- a/website/content/docs/agent/config/index.mdx +++ b/website/content/docs/agent/config/index.mdx @@ -16,8 +16,8 @@ descriptions. Configuration precedence is evaluated in the following order: -1. Command line arguments -2. Configuration files +1. [Command line arguments](/docs/agent/config/agent-config-cli) +2. [Configuration files](/docs/agent/config/agent-config-files) When loading configuration, the Consul agent loads the configuration from files and directories in lexical order. For example, configuration file From cd73f27c8404fe1391621c19820983effa7bb092 Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 13:13:13 -0800 Subject: [PATCH 122/785] docs: fix external links to agent config pages --- .../network-areas/README.md | 2 +- docs/config/README.md | 6 +- docs/config/checklist-adding-config-fields.md | 4 +- docs/rpc/README.md | 2 +- website/content/api-docs/acl/index.mdx | 10 +- website/content/api-docs/agent/index.mdx | 20 +-- website/content/api-docs/config.mdx | 2 +- .../content/api-docs/connect/intentions.mdx | 2 +- website/content/api-docs/health.mdx | 2 +- website/content/api-docs/index.mdx | 4 +- .../content/api-docs/operator/autopilot.mdx | 2 +- .../content/commands/acl/set-agent-token.mdx | 2 +- website/content/commands/config/index.mdx | 2 +- website/content/commands/connect/envoy.mdx | 2 +- website/content/commands/debug.mdx | 2 +- website/content/commands/index.mdx | 2 +- .../content/commands/operator/autopilot.mdx | 4 +- website/content/commands/reload.mdx | 2 +- website/content/commands/validate.mdx | 2 +- website/content/docs/agent/config-entries.mdx | 6 +- .../docs/agent/config/agent-config-files.mdx | 2 +- website/content/docs/agent/config/index.mdx | 18 +-- website/content/docs/agent/index.mdx | 32 ++--- website/content/docs/agent/telemetry.mdx | 26 ++-- website/content/docs/connect/ca/aws.mdx | 6 +- website/content/docs/connect/ca/consul.mdx | 2 +- website/content/docs/connect/ca/index.mdx | 2 +- website/content/docs/connect/ca/vault.mdx | 4 +- .../config-entries/exported-services.mdx | 2 +- .../docs/connect/config-entries/index.mdx | 2 +- .../connect/config-entries/proxy-defaults.mdx | 4 +- .../config-entries/service-defaults.mdx | 4 +- .../config-entries/service-intentions.mdx | 4 +- .../content/docs/connect/configuration.mdx | 14 +-- .../docs/connect/connect-internals.mdx | 4 +- .../docs/connect/gateways/ingress-gateway.mdx | 4 +- ...service-to-service-traffic-datacenters.mdx | 26 ++-- .../service-to-service-traffic-partitions.mdx | 20 +-- .../wan-federation-via-mesh-gateways.mdx | 5 +- .../connect/gateways/terminating-gateway.mdx | 4 +- .../docs/connect/intentions-legacy.mdx | 2 +- website/content/docs/connect/intentions.mdx | 2 +- .../docs/connect/observability/index.mdx | 6 +- .../observability/ui-visualization.mdx | 14 +-- .../content/docs/connect/proxies/built-in.mdx | 4 +- .../content/docs/connect/proxies/envoy.mdx | 2 +- .../connect/proxies/managed-deprecated.mdx | 6 +- .../registration/service-registration.mdx | 4 +- .../connect/registration/sidecar-service.mdx | 4 +- website/content/docs/discovery/checks.mdx | 10 +- website/content/docs/discovery/dns.mdx | 26 ++-- .../content/docs/dynamic-app-config/kv.mdx | 4 +- .../docs/dynamic-app-config/watches.mdx | 2 +- .../content/docs/enterprise/audit-logging.mdx | 8 +- .../docs/enterprise/license/overview.mdx | 8 +- .../docs/enterprise/network-segments.mdx | 32 ++--- .../content/docs/enterprise/read-scale.mdx | 4 +- website/content/docs/index.mdx | 2 +- .../content/docs/install/bootstrapping.mdx | 14 +-- .../content/docs/install/cloud-auto-join.mdx | 4 +- .../content/docs/install/manual-bootstrap.mdx | 2 +- website/content/docs/install/performance.mdx | 20 +-- website/content/docs/install/ports.mdx | 2 +- .../docs/k8s/connect/connect-ca-provider.mdx | 4 +- website/content/docs/k8s/helm.mdx | 16 +-- .../servers-outside-kubernetes.mdx | 4 +- .../installation/multi-cluster/kubernetes.mdx | 4 +- .../multi-cluster/vms-and-kubernetes.mdx | 2 +- website/content/docs/nia/configuration.mdx | 6 +- .../docs/nia/installation/requirements.mdx | 2 +- .../docs/releases/release-notes/v1_9_0.mdx | 2 +- .../content/docs/security/acl/acl-legacy.mdx | 102 +++++++-------- .../content/docs/security/acl/acl-rules.mdx | 26 ++-- .../docs/security/acl/auth-methods/index.mdx | 2 +- website/content/docs/security/encryption.mdx | 22 ++-- .../docs/security/security-models/core.mdx | 54 ++++---- .../docs/troubleshoot/common-errors.mdx | 6 +- website/content/docs/troubleshoot/faq.mdx | 8 +- .../instructions/general-process.mdx | 4 +- .../instructions/upgrade-to-1-6-x.mdx | 8 +- .../docs/upgrading/upgrade-specific.mdx | 119 +++++++++--------- .../partials/http_api_options_client.mdx | 2 +- 82 files changed, 422 insertions(+), 418 deletions(-) diff --git a/docs/cluster-federation/network-areas/README.md b/docs/cluster-federation/network-areas/README.md index 0b62162e4..efe10aa06 100644 --- a/docs/cluster-federation/network-areas/README.md +++ b/docs/cluster-federation/network-areas/README.md @@ -35,7 +35,7 @@ Every Consul Enterprise server maintains a reconciliation routine where every 30 Joining a network area pool involves: 1. Setting memberlist and Serf configuration. - * Prior to Consul `v1.8.11` and `v1.9.5`, network areas were configured with memberlist's [DefaultWANConfig](https://github.com/hashicorp/memberlist/blob/838073fef1a4e1f6cb702a57a8075304098b1c31/config.go#L315). This was then updated to instead use the server's [gossip_wan](https://www.consul.io/docs/agent/options#gossip_wan) configuration, which falls back to the DefaultWANConfig if it was not specified. + * Prior to Consul `v1.8.11` and `v1.9.5`, network areas were configured with memberlist's [DefaultWANConfig](https://github.com/hashicorp/memberlist/blob/838073fef1a4e1f6cb702a57a8075304098b1c31/config.go#L315). This was then updated to instead use the server's [gossip_wan](https://www.consul.io/docs/agent/config/agent-config-files#gossip_wan) configuration, which falls back to the DefaultWANConfig if it was not specified. * As of Consul `v1.8.11`/`v1.9.5` it is not possible to tune gossip communication on a per-area basis. 2. Update the server's gossip network, which keeps track of network areas that the server is a part of. This gossip network is also used to dispatch incoming **gossip** connections to handlers for the appropriate area. diff --git a/docs/config/README.md b/docs/config/README.md index 49f8014cf..fe38011b9 100644 --- a/docs/config/README.md +++ b/docs/config/README.md @@ -10,10 +10,10 @@ specified using command line flags, and some can be loaded with [Auto-Config]. See also the [checklist for adding a new field] to the configuration. [hcl]: https://github.com/hashicorp/hcl/tree/hcl1 -[Agent Configuration]: https://www.consul.io/docs/agent/options +[Agent Configuration]: https://www.consul.io/docs/agent/config [checklist for adding a new field]: ./checklist-adding-config-fields.md [Auto-Config]: #auto-config -[Config Entries]: https://www.consul.io/docs/agent/options#config_entries +[Config Entries]: https://www.consul.io/docs/agent/config/agent-config-files#config_entries [Services]: https://www.consul.io/docs/discovery/services [Checks]: https://www.consul.io/docs/discovery/checks @@ -53,6 +53,6 @@ implemented in a couple packages. * the server RPC endpoint is in [agent/consul/auto_config_endpoint.go] * the client that receives and applies the config is implemented in [agent/auto-config] -[auto_config]: https://www.consul.io/docs/agent/options#auto_config +[auto_config]: https://www.consul.io/docs/agent/config/agent-config-files#auto_config [agent/consul/auto_config_endpoint.go]: https://github.com/hashicorp/consul/blob/main/agent/consul/auto_config_endpoint.go [agent/auto-config]: https://github.com/hashicorp/consul/tree/main/agent/auto-config diff --git a/docs/config/checklist-adding-config-fields.md b/docs/config/checklist-adding-config-fields.md index fff3fba36..66807c072 100644 --- a/docs/config/checklist-adding-config-fields.md +++ b/docs/config/checklist-adding-config-fields.md @@ -55,7 +55,7 @@ There are four specific cases covered with increasing complexity: state for client agent's RPC client. - [ ] Add a test to `agent/agent_test.go` similar to others with prefix `TestAgent_reloadConfig*`. - - [ ] Add documentation to `website/content/docs/agent/options.mdx`. + - [ ] Add documentation to `website/content/docs/agent/config/agent-config-files.mdx`. Done! You can now use your new field in a client agent by accessing `s.agent.Config.`. @@ -75,7 +75,7 @@ If the config field also needs a CLI flag, then follow these steps. `TestLoad_IntegrationWithFlags` in `agent/config/runtime_test.go` to ensure setting the flag works. - [ ] Add flag (as well as config file) documentation to - `website/source/docs/agent/options.html.md`. + `website/source/docs/agent/config/agent-config-files.mdx` and `website/source/docs/agent/config/agent-config-cli.mdx`. ## Adding a Simple Config Field for Servers Consul servers have a separate Config struct for reasons. Note that Consul diff --git a/docs/rpc/README.md b/docs/rpc/README.md index 8a5236d4a..7d8a75cad 100644 --- a/docs/rpc/README.md +++ b/docs/rpc/README.md @@ -22,7 +22,7 @@ The "RPC Server" accepts requests to the [server port] and routes the requests b configuration of the Server and the the first byte in the request. The diagram below shows all the possible routing flows. -[server port]: https://www.consul.io/docs/agent/options#server_rpc_port +[server port]: https://www.consul.io/docs/agent/config/agent-config-files#server_rpc_port ![RPC Routing](./routing.svg) diff --git a/website/content/api-docs/acl/index.mdx b/website/content/api-docs/acl/index.mdx index 081025ced..6a39bef52 100644 --- a/website/content/api-docs/acl/index.mdx +++ b/website/content/api-docs/acl/index.mdx @@ -16,7 +16,7 @@ the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-s ## Bootstrap ACLs This endpoint does a special one-time bootstrap of the ACL system, making the first -management token if the [`acl.tokens.initial_management`](/docs/agent/options#acl_tokens_initial_management) +management token if the [`acl.tokens.initial_management`](/docs/agent/config/agent-config-files#acl_tokens_initial_management) configuration entry is not specified in the Consul server configuration and if the cluster has not been bootstrapped previously. This is available in Consul 0.9.1 and later, and requires all Consul servers to be upgraded in order to operate. @@ -143,7 +143,7 @@ $ curl \ - `SourceDatacenter` - The authoritative ACL datacenter that ACLs are being replicated from and will match the - [`primary_datacenter`](/docs/agent/options#primary_datacenter) configuration. + [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) configuration. - `ReplicationType` - The type of replication that is currently in use. @@ -295,7 +295,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/options#acl_enable_token_replication) must be +replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. @@ -425,7 +425,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/options#acl_enable_token_replication) must be +replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. @@ -505,7 +505,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/options#acl_enable_token_replication) must be +replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. diff --git a/website/content/api-docs/agent/index.mdx b/website/content/api-docs/agent/index.mdx index 8a7b4a693..54360d802 100644 --- a/website/content/api-docs/agent/index.mdx +++ b/website/content/api-docs/agent/index.mdx @@ -360,7 +360,7 @@ This endpoint instructs the agent to reload its configuration. Any errors encountered during this process are returned. Not all configuration options are reloadable. See the -[Reloadable Configuration](/docs/agent/options#reloadable-configuration) +[Reloadable Configuration](/docs/agent/config#reloadable-configuration) section on the agent options page for details on which options are supported. | Method | Path | Produces | @@ -440,7 +440,7 @@ page. In order to enable [Prometheus](https://prometheus.io/) support, you need to use the configuration directive -[`prometheus_retention_time`](/docs/agent/options#telemetry-prometheus_retention_time). +[`prometheus_retention_time`](/docs/agent/config/agent-config-files#telemetry-prometheus_retention_time). Since Consul 1.7.2 this endpoint will also automatically switch output format if the request contains an `Accept` header with a compatible MIME type such as @@ -745,7 +745,7 @@ $ curl \ This endpoint updates the ACL tokens currently in use by the agent. It can be used to introduce ACL tokens to the agent for the first time, or to update tokens that were initially loaded from the agent's configuration. Tokens will be persisted -only if the [`acl.enable_token_persistence`](/docs/agent/options#acl_enable_token_persistence) +only if the [`acl.enable_token_persistence`](/docs/agent/config/agent-config-files#acl_enable_token_persistence) configuration is `true`. When not being persisted, they will need to be reset if the agent is restarted. @@ -757,9 +757,9 @@ is restarted. | `PUT` | `/agent/token/replication` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`default`](/docs/agent/options#acl_tokens_default), [`agent`](/docs/agent/options#acl_tokens_agent), -[`agent_recovery`](/docs/agent/options#acl_tokens_agent_recovery), and -[`replication`](/docs/agent/options#acl_tokens_replication). +[`default`](/docs/agent/config/agent-config-files#acl_tokens_default), [`agent`](/docs/agent/config/agent-config-files#acl_tokens_agent), +[`agent_recovery`](/docs/agent/config/agent-config-files#acl_tokens_agent_recovery), and +[`replication`](/docs/agent/config/agent-config-files#acl_tokens_replication). -> **Deprecation Note:** The following paths were deprecated in version 1.11 @@ -768,7 +768,7 @@ The paths above correspond to the token names as found in the agent configuratio | `PUT` | `/agent/token/agent_master` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`agent_master`](/docs/agent/options#acl_tokens_agent_master). +[`agent_master`](/docs/agent/config/agent-config-files#acl_tokens_agent_master). -> **Deprecation Note:** The following paths were deprecated in version 1.4.3 @@ -780,9 +780,9 @@ The paths above correspond to the token names as found in the agent configuratio | `PUT` | `/agent/token/acl_replication_token` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`acl_token`](/docs/agent/options#acl_token_legacy), [`acl_agent_token`](/docs/agent/options#acl_agent_token_legacy), -[`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token_legacy), and -[`acl_replication_token`](/docs/agent/options#acl_replication_token_legacy). +[`acl_token`](/docs/agent/config/agent-config-files#acl_token_legacy), [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token_legacy), +[`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy), and +[`acl_replication_token`](/docs/agent/config/agent-config-files#acl_replication_token_legacy). The table below shows this endpoint's support for [blocking queries](/api-docs/features/blocking), diff --git a/website/content/api-docs/config.mdx b/website/content/api-docs/config.mdx index 27d665aa6..1ba83ed68 100644 --- a/website/content/api-docs/config.mdx +++ b/website/content/api-docs/config.mdx @@ -10,7 +10,7 @@ description: |- The `/config` endpoints create, update, delete and query central configuration entries registered with Consul. See the -[agent configuration](/docs/agent/options#enable_central_service_config) +[agent configuration](/docs/agent/config/agent-config-files#enable_central_service_config) for more information on how to enable this functionality for centrally configuring services and [configuration entries docs](/docs/agent/config-entries) for a description of the configuration entries content. diff --git a/website/content/api-docs/connect/intentions.mdx b/website/content/api-docs/connect/intentions.mdx index 49b6ac660..ddbeab469 100644 --- a/website/content/api-docs/connect/intentions.mdx +++ b/website/content/api-docs/connect/intentions.mdx @@ -96,7 +96,7 @@ The corresponding CLI command is [`consul intention create -replace`](/commands/ evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default intention behavior is defined by the default [ACL - policy](/docs/agent/options#acl_default_policy). + policy](/docs/agent/config/agent-config-files#acl_default_policy). This should be omitted for an L4 intention as it is mutually exclusive with the `Action` field. diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx index 6d8a1c677..4d60aafe9 100644 --- a/website/content/api-docs/health.mdx +++ b/website/content/api-docs/health.mdx @@ -241,7 +241,7 @@ The table below shows this endpoint's support for ascending order based on the estimated round trip time from that node. Passing `?near=_agent` will use the agent's node for the sort. This is specified as part of the URL as a query parameter. **Note** that using `near` will ignore - [`use_streaming_backend`](/docs/agent/options#use_streaming_backend) and always + [`use_streaming_backend`](/docs/agent/config/agent-config-files#use_streaming_backend) and always use blocking queries, because the data required to sort the results is not available to the streaming backend. diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 481180c94..40dc5a79b 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -83,7 +83,7 @@ $ curl \ Consul 0.7 added the ability to translate addresses in HTTP response based on the configuration setting for -[`translate_wan_addrs`](/docs/agent/options#translate_wan_addrs). In order +[`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs). In order to allow clients to know if address translation is in effect, the `X-Consul-Translate-Addresses` header will be added if translation is enabled, and will have a value of `true`. If translation is not enabled then this header @@ -94,7 +94,7 @@ will not be present. All API responses for Consul versions after 1.9 will include an HTTP response header `X-Consul-Default-ACL-Policy` set to either "allow" or "deny" which mirrors the current value of the agent's -[`acl.default_policy`](/docs/agent/options#acl_default_policy) option. +[`acl.default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) option. This is also the default [intention](/docs/connect/intentions) enforcement action if no intention matches. diff --git a/website/content/api-docs/operator/autopilot.mdx b/website/content/api-docs/operator/autopilot.mdx index e1d050871..f4a2e25c8 100644 --- a/website/content/api-docs/operator/autopilot.mdx +++ b/website/content/api-docs/operator/autopilot.mdx @@ -69,7 +69,7 @@ $ curl \ ``` For more information about the Autopilot configuration options, see the -[agent configuration section](/docs/agent/options#autopilot). +[agent configuration section](/docs/agent/config/agent-config-files#autopilot). ## Update Configuration diff --git a/website/content/commands/acl/set-agent-token.mdx b/website/content/commands/acl/set-agent-token.mdx index 201e8b6ed..dfe9f4567 100644 --- a/website/content/commands/acl/set-agent-token.mdx +++ b/website/content/commands/acl/set-agent-token.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/agent/token/:type](/api-docs/agent This command updates the ACL tokens currently in use by the agent. It can be used to introduce ACL tokens to the agent for the first time, or to update tokens that were initially loaded from the agent's configuration. Tokens are not persisted unless -[`acl.enable_token_persistence`](/docs/agent/options#acl_enable_token_persistence) +[`acl.enable_token_persistence`](/docs/agent/config/agent-config-files#acl_enable_token_persistence) is `true`, so tokens will need to be updated again if that option is `false` and the agent is restarted. diff --git a/website/content/commands/config/index.mdx b/website/content/commands/config/index.mdx index 1d3abe7c4..5b22e5115 100644 --- a/website/content/commands/config/index.mdx +++ b/website/content/commands/config/index.mdx @@ -10,7 +10,7 @@ Command: `consul config` The `config` command is used to interact with Consul's central configuration system. It exposes commands for creating, updating, reading, and deleting different kinds of config entries. See the -[agent configuration](/docs/agent/options#enable_central_service_config) +[agent configuration](/docs/agent/config/agent-config-files#enable_central_service_config) for more information on how to enable this functionality for centrally configuring services and [configuration entries docs](/docs/agent/config-entries) for a description of the configuration entries content. diff --git a/website/content/commands/connect/envoy.mdx b/website/content/commands/connect/envoy.mdx index b2a1988e6..68d81d47a 100644 --- a/website/content/commands/connect/envoy.mdx +++ b/website/content/commands/connect/envoy.mdx @@ -42,7 +42,7 @@ proxy configuration needed. be used instead. The scheme can also be set to HTTPS by setting the environment variable CONSUL_HTTP_SSL=true. This may be a unix domain socket using `unix:///path/to/socket` if the [agent is configured to - listen](/docs/agent/options#addresses) that way. + listen](/docs/agent/config/agent-config-files#addresses) that way. -> **Note:** gRPC uses the same TLS settings as the HTTPS API. If HTTPS is enabled then gRPC will require HTTPS diff --git a/website/content/commands/debug.mdx b/website/content/commands/debug.mdx index 58434cb16..23ff3d0da 100644 --- a/website/content/commands/debug.mdx +++ b/website/content/commands/debug.mdx @@ -78,7 +78,7 @@ information when `debug` is running. By default, it captures all information. | `members` | A list of all the WAN and LAN members in the cluster. | | `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. | | `logs` | `DEBUG` level logs for the target agent, captured for the duration. | -| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU profile is captured for `duration` in a single file, trace is captured for a single `interval`, while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/options#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | +| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/config/agent-config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | ## Examples diff --git a/website/content/commands/index.mdx b/website/content/commands/index.mdx index e468c76cc..4ff1b0735 100644 --- a/website/content/commands/index.mdx +++ b/website/content/commands/index.mdx @@ -235,7 +235,7 @@ CONSUL_TLS_SERVER_NAME=consulserver.domain Like [`CONSUL_HTTP_ADDR`](#consul_http_addr) but configures the address the local agent is listening for gRPC requests. Currently gRPC is only used for integrating [Envoy proxy](/docs/connect/proxies/envoy) and must be [enabled -explicitly](/docs/agent/options#grpc_port) in agent configuration. +explicitly](/docs/agent/config/agent-config-files#grpc_port) in agent configuration. ``` CONSUL_GRPC_ADDR=127.0.0.1:8502 diff --git a/website/content/commands/operator/autopilot.mdx b/website/content/commands/operator/autopilot.mdx index e78ade6a7..2a0a2c29a 100644 --- a/website/content/commands/operator/autopilot.mdx +++ b/website/content/commands/operator/autopilot.mdx @@ -104,10 +104,10 @@ Usage: `consul operator autopilot set-config [options]` - `-disable-upgrade-migration` - Controls whether Consul will avoid promoting new servers until it can perform a migration. Must be one of `[true|false]`. -- `-redundancy-zone-tag` - Controls the [`-node-meta`](/docs/agent/options#_node_meta) +- `-redundancy-zone-tag` - Controls the [`-node-meta`](/docs/agent/config/agent-config-cli#_node_meta) key name used for separating servers into different redundancy zones. -- `-upgrade-version-tag` - Controls the [`-node-meta`](/docs/agent/options#_node_meta) +- `-upgrade-version-tag` - Controls the [`-node-meta`](/docs/agent/config/agent-config-cli#_node_meta) tag to use for version info when performing upgrade migrations. If left blank, the Consul version will be used. ### Command Output diff --git a/website/content/commands/reload.mdx b/website/content/commands/reload.mdx index 40d234a15..3043a346f 100644 --- a/website/content/commands/reload.mdx +++ b/website/content/commands/reload.mdx @@ -22,7 +22,7 @@ reload will be present in the agent logs and not in the output of this command. **NOTE** Not all configuration options are reloadable. See the -[Reloadable Configuration](/docs/agent/options#reloadable-configuration) +[Reloadable Configuration](/docs/agent/config#reloadable-configuration) section on the agent options page for details on which options are supported. The table below shows this command's [required ACLs](/api#authentication). Configuration of diff --git a/website/content/commands/validate.mdx b/website/content/commands/validate.mdx index cb3d02e07..ade133928 100644 --- a/website/content/commands/validate.mdx +++ b/website/content/commands/validate.mdx @@ -21,7 +21,7 @@ to be loaded by the agent. This command cannot operate on partial configuration fragments since those won't pass the full agent validation. For more information on the format of Consul's configuration files, read the -consul agent [Configuration Files](/docs/agent/options#configuration-files) +consul agent [Configuration Files](/docs/agent/config/agent-config-files) section. ## Usage diff --git a/website/content/docs/agent/config-entries.mdx b/website/content/docs/agent/config-entries.mdx index d477db6b2..48a996e05 100644 --- a/website/content/docs/agent/config-entries.mdx +++ b/website/content/docs/agent/config-entries.mdx @@ -57,8 +57,8 @@ See [Kubernetes Custom Resource Definitions](/docs/k8s/crds). Configuration entries outside of Kubernetes should be managed with the Consul [CLI](/commands/config) or [API](/api-docs/config). Additionally, as a convenience for initial cluster bootstrapping, configuration entries can be -specified in the Consul servers agent's -[configuration files](/docs/agent/options#config_entries_bootstrap) +specified in all of the Consul servers's +[configuration files](/docs/agent/config/agent-config-files#config_entries_bootstrap) ### Managing Configuration Entries with the CLI @@ -162,7 +162,7 @@ api ### Bootstrapping From A Configuration File Configuration entries can be bootstrapped by adding them [inline to each Consul -server's configuration file](/docs/agent/options#config_entries). When a +server's configuration file](/docs/agent/config/agent-config-files#config_entries). When a server gains leadership, it will attempt to initialize the configuration entries. If a configuration entry does not already exist outside of the servers configuration, then it will create it. If a configuration entry does exist, that diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index 40dae7878..08a9f4398 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -905,7 +905,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. If there's a partition or other outage affecting the authoritative datacenter, and the - [`acl_down_policy`](/docs/agent/options#acl_down_policy) is set to "extend-cache", tokens not + [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) is set to "extend-cache", tokens not in the cache can be resolved during the outage using the replicated set of ACLs. - `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx index 44ced8493..f9daad3b9 100644 --- a/website/content/docs/agent/config/index.mdx +++ b/website/content/docs/agent/config/index.mdx @@ -57,22 +57,22 @@ Reloading configuration does not reload all configuration items. The items which are reloaded include: - ACL Tokens -- [Configuration Entry Bootstrap](#config_entries_bootstrap) +- [Configuration Entry Bootstrap](/docs/agent/config/agent-config-files#config_entries_bootstrap) - Checks -- [Discard Check Output](#discard_check_output) +- [Discard Check Output](/docs/agent/config/agent-config-files#discard_check_output) - HTTP Client Address - Log level -- [Metric Prefix Filter](#telemetry-prefix_filter) -- [Node Metadata](#node_meta) +- [Metric Prefix Filter](/docs/agent/config/agent-config-files#telemetry-prefix_filter) +- [Node Metadata](/docs/agent/config/agent-config-files#node_meta) - Some Raft options (since Consul 1.10.0) - - [`raft_snapshot_threshold`](#_raft_snapshot_threshold) - - [`raft_snapshot_interval`](#_raft_snapshot_interval) - - [`raft_trailing_logs`](#_raft_trailing_logs) + - [`raft_snapshot_threshold`](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) + - [`raft_snapshot_interval`](/docs/agent/config/agent-config-files#_raft_snapshot_interval) + - [`raft_trailing_logs`](/docs/agent/config/agent-config-files#_raft_trailing_logs) - These can be important in certain outage situations so being able to control them without a restart provides a recovery path that doesn't involve downtime. They generally shouldn't be changed otherwise. -- [RPC rate limiting](#limits) -- [HTTP Maximum Connections per Client](#http_max_conns_per_client) +- [RPC rate limiting](/docs/agent/config/agent-config-files#limits) +- [HTTP Maximum Connections per Client](/docs/agent/config/agent-config-files#http_max_conns_per_client) - Services - TLS Configuration - Please be aware that this is currently limited to reload a configuration that is already TLS enabled. You cannot enable or disable TLS only with reloading. diff --git a/website/content/docs/agent/index.mdx b/website/content/docs/agent/index.mdx index b937214a8..2d963d13f 100644 --- a/website/content/docs/agent/index.mdx +++ b/website/content/docs/agent/index.mdx @@ -102,7 +102,7 @@ The following example starts an agent in dev mode and stores agent state data in $ consul agent -data-dir=tmp/consul -dev ``` -Agents are highly configurable, which enables you to deploy Consul to any infrastructure. Many of the default options for the `agent` command are suitable for becoming familiar with a local instance of Consul. In practice, however, several additional configuration options must be specified for Consul to function as expected. Refer to [Agent Configuration](/docs/agent/options) topic for a complete list of configuration options. +Agents are highly configurable, which enables you to deploy Consul to any infrastructure. Many of the default options for the `agent` command are suitable for becoming familiar with a local instance of Consul. In practice, however, several additional configuration options must be specified for Consul to function as expected. Refer to [Agent Configuration](/docs/agent/config) topic for a complete list of configuration options. ### Understanding the Agent Startup Output @@ -127,16 +127,16 @@ $ consul agent -data-dir=/tmp/consul - **Node name**: This is a unique name for the agent. By default, this is the hostname of the machine, but you may customize it using the - [`-node`](/docs/agent/options#_node) flag. + [`-node`](/docs/agent/config/agent-config-cli#_node) flag. - **Datacenter**: This is the datacenter in which the agent is configured to - run. For single-DC configurations, the agent will default to `dc1`, but you can configure which datacenter the agent reports to with the [`-datacenter`](/docs/agent/options#_datacenter) flag. + run. For single-DC configurations, the agent will default to `dc1`, but you can configure which datacenter the agent reports to with the [`-datacenter`](/docs/agent/config/agent-config-cli#_datacenter) flag. Consul has first-class support for multiple datacenters, but configuring each node to report its datacenter improves agent efficiency. - **Server**: This indicates whether the agent is running in server or client mode. Running an agent in server mode requires additional overhead. This is because they participate in the consensus quorum, store cluster state, and handle queries. A server may also be - in ["bootstrap"](/docs/agent/options#_bootstrap_expect) mode, which enables the server to elect itself as the Raft leader. Multiple servers cannot be in bootstrap mode because it would put the cluster in an inconsistent state. + in ["bootstrap"](/docs/agent/config/agent-config-cli#_bootstrap_expect) mode, which enables the server to elect itselft as the Raft leader. Multiple servers cannot be in bootstrap mode because it would put the cluster in an inconsistent state. - **Client Addr**: This is the address used for client interfaces to the agent. This includes the ports for the HTTP and DNS interfaces. By default, this @@ -179,18 +179,18 @@ The following settings are commonly used in the configuration file (also called | Parameter | Description | Default | | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | -| `node_name` | String value that specifies a name for the agent node.
See [`-node-id`](/docs/agent/options#_node_id) for details. | Hostname of the machine | -| `server` | Boolean value that determines if the agent runs in server mode.
See [`-server`](/docs/agent/options#_server) for details. | `false` | -| `datacenter` | String value that specifies which datacenter the agent runs in.
See [-datacenter](/docs/agent/options#_datacenter) for details. | `dc1` | -| `data_dir` | String value that specifies a directory for storing agent state data.
See [`-data-dir`](/docs/agent/options#_data_dir) for details. | none | -| `log_level` | String value that specifies the level of logging the agent reports.
See [`-log-level`](docs/agent/options#_log_level) for details. | `info` | -| `retry_join` | Array of string values that specify one or more agent addresses to join after startup. The agent will continue trying to join the specified agents until it has successfully joined another member.
See [`-retry-join`](/docs/agent/options#_retry_join) for details. | none | -| `addresses` | Block of nested objects that define addresses bound to the agent for internal cluster communication. | `"http": "0.0.0.0"` See the Agent Configuration page for [default address values](/docs/agent/options#addresses) | -| `ports` | Block of nested objects that define ports bound to agent addresses.
See (link to addresses option) for details. | See the Agent Configuration page for [default port values](/docs/agent/options#ports) | +| `node_name` | String value that specifies a name for the agent node.
See [`-node-id`](/docs/agent/config/agent-config-cli#_node_id) for details. | Hostname of the machine | +| `server` | Boolean value that determines if the agent runs in server mode.
See [`-server`](/docs/agent/config/agent-config-cli#_server) for details. | `false` | +| `datacenter` | String value that specifies which datacenter the agent runs in.
See [-datacenter](/docs/agent/config/agent-config-cli#_datacenter) for details. | `dc1` | +| `data_dir` | String value that specifies a directory for storing agent state data.
See [`-data-dir`](/docs/agent/config/agent-config-cli#_data_dir) for details. | none | +| `log_level` | String value that specifies the level of logging the agent reports.
See [`-log-level`](/docs/agent/config/agent-config-cli#_log_level) for details. | `info` | +| `retry_join` | Array of string values that specify one or more agent addresses to join after startup. The agent will continue trying to join the specified agents until it has successfully joined another member.
See [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) for details. | none | +| `addresses` | Block of nested objects that define addresses bound to the agent for internal cluster communication. | `"http": "0.0.0.0"` See the Agent Configuration page for [default address values](/docs/agent/config/agent-config-files#addresses) | +| `ports` | Block of nested objects that define ports bound to agent addresses.
See (link to addresses option) for details. | See the Agent Configuration page for [default port values](/docs/agent/config/agent-config-files#ports) | ### Server Node in a Service Mesh -The following example configuration is for a server agent named "`consul-server`". The server is [bootstrapped](/docs/agent/options#_bootstrap) and the Consul GUI is enabled. +The following example configuration is for a server agent named "`consul-server`". The server is [bootstrapped](/docs/agent/config/agent-config-cli#_bootstrap) and the Consul GUI is enabled. The reason this server agent is configured for a service mesh is that the `connect` configuration is enabled. Connect is Consul's service mesh component that provides service-to-service connection authorization and encryption using mutual Transport Layer Security (TLS). Applications can use sidecar proxies in a service mesh configuration to establish TLS connections for inbound and outbound connections without being aware of Connect at all. See [Connect](/docs/connect) for details. @@ -389,7 +389,7 @@ log_level = "INFO" bind_addr = "0.0.0.0" # Used for HTTP, HTTPS, DNS, and gRPC addresses. -# loopback is not included in GetPrivateInterfaces because it is not routable. +# loopback is not included in GetPrivateInterfaces because it is not routable. client_addr = "{{ GetPrivateInterfaces | exclude \"type\" \"ipv6\" | join \"address\" \" \" }} {{ GetAllInterfaces | include \"flags\" \"loopback\" | join \"address\" \" \" }}" # advertises gossip and RPC interface to other nodes @@ -448,8 +448,8 @@ may not be important for your use case. For example, for a web server and load balancer setup, both result in the same outcome: the web node is removed from the load balancer pool. -The [`skip_leave_on_interrupt`](/docs/agent/options#skip_leave_on_interrupt) and -[`leave_on_terminate`](/docs/agent/options#leave_on_terminate) configuration +The [`skip_leave_on_interrupt`](/docs/agent/config/agent-config-files#skip_leave_on_interrupt) and +[`leave_on_terminate`](/docs/agent/config/agent-config-files#leave_on_terminate) configuration options allow you to adjust this behavior. diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 7296ed208..dcea27e05 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -29,7 +29,7 @@ This telemetry information can be used for debugging or otherwise getting a better view of what Consul is doing. Review the [Monitoring and Metrics tutorial](https://learn.hashicorp.com/tutorials/consul/monitor-datacenter-health?utm_source=consul.io&utm_medium=docs) to learn how collect and interpret Consul data. -Additionally, if the [`telemetry` configuration options](/docs/agent/options#telemetry) +Additionally, if the [`telemetry` configuration options](/docs/agent/config/agent-config-files#telemetry) are provided, the telemetry information will be streamed to a [statsite](http://github.com/armon/statsite) or [statsd](http://github.com/etsy/statsd) server where it can be aggregated and flushed to Graphite or any other metrics store. @@ -140,7 +140,7 @@ you will need to apply a function such as InfluxDB's [`non_negative_difference() | Metric Name | Description | Unit | Type | | :--------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | :------ | | `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server | requests | counter | -| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/options#limits) configuration. | requests | counter | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/agent-config-files#limits) configuration. | requests | counter | | `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | **Why they're important:** These measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. A high RPC count, especially from `consul.client.rpcexceeded` meaning that the requests are being rate-limited, could imply a misconfigured Consul agent. @@ -172,7 +172,7 @@ Under these conditions, a follower after a restart may be unable to catch up on replication and become a voter again since it takes longer to restore from disk or the leader than the leader takes to write a new snapshot and truncate its logs. Servers retain -[`raft_trailing_logs`](/docs/agent/options#_raft_trailing_logs) (default +[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) (default `10240`) log entries even if their snapshot was more recent. On a leader processing 500 commits/second, that is only about 20 seconds worth of logs. Assuming the leader is able to write out a snapshot and truncate the logs in @@ -197,7 +197,7 @@ repeatedly as well as reduce the fault tolerance and serving capacity of the cluster. Since Consul 1.5.3 -[`raft_trailing_logs`](/docs/agent/options#_raft_trailing_logs) has been +[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) has been configurable. Increasing it allows the leader to retain more logs and give followers more time to restore and catch up. The tradeoff is potentially slower appends which eventually might affect write throughput and latency @@ -208,7 +208,7 @@ mean loosing cluster availability and needing to recover the cluster from a loss of quorum. Since Consul 1.10.0 -[`raft_trailing_logs`](/docs/agent/options#_raft_trailing_logs) is now +[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) is now reloadable with `consul reload` or `SIGHUP` allowing operators to increase this without the leader restarting or loosing leadership allowing the cluster to be recovered gracefully. @@ -298,7 +298,7 @@ it took as well as how many logs were contained in the batch. Writing logs in th a subsequent log storage operation can only be started after the previous one completed. The maximum number of log storage operations that can be performed each second is represented with the `consul.raft.boltdb.writeCapacity` metric. When log storage operations are becoming slower you may not see an immediate decrease in write capacity -due to increased batch sizes of the each operation. However, the max batch size allowed is 64 logs. Therefore if +due to increased batch sizes of the each operation. However, the max batch size allowed is 64 logs. Therefore if the `logsPerBatch` metric is near 64 and the `storeLogs` metric is seeing increased time to write each batch to disk, then it is likely that increased write latencies and other errors may occur. @@ -332,7 +332,7 @@ This is a full list of metrics emitted by Consul. | `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter | | `consul.api.http` | Migrated from consul.http.. this samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer | | `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter | -| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/options#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/agent-config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | | `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | | `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter | | `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter | @@ -431,7 +431,7 @@ These metrics are used to monitor the health of the Consul servers. | `consul.raft.last_index` | Represents the raft applied index. | index | gauge | | `consul.raft.leader.dispatchLog` | Measures the time it takes for the leader to write log entries to disk. | ms | timer | | `consul.raft.leader.dispatchNumLogs` | Measures the number of logs committed to disk in a batch. | logs | gauge | -| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/docs/agent/options#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/docs/install/performance) guide for more details. | ms | timer | +| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/docs/agent/config/agent-config-files#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/docs/install/performance) guide for more details. | ms | timer | | `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. Note: this metric won't be emitted until the leader writes a snapshot. After an upgrade to Consul 1.10.0 it won't be emitted until the oldest log was written after the upgrade. | ms | gauge | | `consul.raft.replication.heartbeat` | Measures the time taken to invoke appendEntries on a peer, so that it doesn’t timeout on a periodic basis. | ms | timer | | `consul.raft.replication.appendEntries` | Measures the time it takes to replicate log entries to followers. This is a general indicator of the load pressure on the Consul servers, as well as the performance of the communication between the servers. | ms | timer | @@ -575,7 +575,7 @@ These metrics give insight into the health of the cluster as a whole. | `consul.memberlist.degraded.timeout` | Counts the number of times an agent was marked as a dead node, whilst not getting enough confirmations from a randomly selected list of agent nodes in an agent's membership. | occurrence / interval | counter | | `consul.memberlist.msg.dead` | Counts the number of times an agent has marked another agent to be a dead node. | messages / interval | counter | | `consul.memberlist.health.score` | Describes a node's perception of its own health based on how well it is meeting the soft real-time requirements of the protocol. This metric ranges from 0 to 8, where 0 indicates "totally healthy". This health score is used to scale the time between outgoing probes, and higher scores translate into longer probing intervals. For more details see section IV of the Lifeguard paper: https://arxiv.org/pdf/1707.00788.pdf | score | gauge | -| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/options#ports). | suspect messages received / interval | counter | +| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | suspect messages received / interval | counter | | `consul.memberlist.tcp.accept` | Counts the number of times an agent has accepted an incoming TCP stream connection. | connections accepted / interval | counter | | `consul.memberlist.udp.sent/received` | Measures the total number of bytes sent/received by an agent through the UDP protocol. | bytes sent or bytes received / interval | counter | | `consul.memberlist.tcp.connect` | Counts the number of times an agent has initiated a push/pull sync with an other agent. | push/pull initiated / interval | counter | @@ -586,14 +586,14 @@ These metrics give insight into the health of the cluster as a whole. | `consul.memberlist.msg_suspect` | The number of suspect messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | | `consul.memberlist.probeNode` | Measures the time taken to perform a single round of failure detection on a select agent. | nodes / Interval | counter | | `consul.memberlist.pushPullNode` | Measures the number of agents that have exchanged state with this agent. | nodes / Interval | counter | -| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | failures / interval | counter | -| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/options#ports). | flaps / interval | counter | +| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | failures / interval | counter | +| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | flaps / interval | counter | | `consul.serf.member.join` | Increments when an agent joins the cluster. If an agent flapped or failed this counter also increments when it re-joins. | joins / interval | counter | | `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter | | `consul.serf.events` | Increments when an agent processes an [event](/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter | | `consul.serf.msgs.sent` | This metric is sample of the number of bytes of messages broadcast to the cluster. In a given time interval, the sum of this metric is the total number of bytes sent and the count is the number of messages sent. | message bytes / interval | counter | -| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers   | gauge | -| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | boolean   | gauge | +| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers | gauge | +| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | boolean | gauge | | `consul.session_ttl.active` | Tracks the active number of sessions being tracked. | sessions | gauge | | `consul.catalog.service.query.` | Increments for each catalog query for the given service. | queries | counter | | `consul.catalog.service.query-tag..` | Increments for each catalog query for the given service with the given tag. | queries | counter | diff --git a/website/content/docs/connect/ca/aws.mdx b/website/content/docs/connect/ca/aws.mdx index 46e215f87..3b4ed80b9 100644 --- a/website/content/docs/connect/ca/aws.mdx +++ b/website/content/docs/connect/ca/aws.mdx @@ -173,11 +173,11 @@ So monthly cost would be calculated as: - 500 ⨉ 13.3 = 6,650 certificates issued in dc3 The number of certificates issued could be reduced by increasing -[`leaf_cert_ttl`](/docs/agent/options#ca_leaf_cert_ttl) in the CA Provider +[`leaf_cert_ttl`](/docs/agent/config/agent-config-files#ca_leaf_cert_ttl) in the CA Provider configuration if the longer lived credentials are an acceptable risk tradeoff against the cost. -[`ca_config`]: /docs/agent/options#connect_ca_config -[`ca_provider`]: /docs/agent/options#connect_ca_provider +[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider [`/connect/ca/configuration`]: /api-docs/connect/ca#update-ca-configuration diff --git a/website/content/docs/connect/ca/consul.mdx b/website/content/docs/connect/ca/consul.mdx index 1094ffd25..ba7645171 100644 --- a/website/content/docs/connect/ca/consul.mdx +++ b/website/content/docs/connect/ca/consul.mdx @@ -92,7 +92,7 @@ Connect is enabled - the PrivateKey and RootCert fields have not been set, so th been generated (as seen above in the roots list). There are two ways to have the Consul CA use a custom private key and root certificate: -either through the `ca_config` section of the [Agent configuration](/docs/agent/options#connect_ca_config) (which can only be used during the cluster's +either through the `ca_config` section of the [Agent configuration](/docs/agent/config/agent-config-files#connect_ca_config) (which can only be used during the cluster's initial bootstrap) or through the [Update CA Configuration endpoint](/api-docs/connect/ca#update-ca-configuration). Currently Consul requires that root certificates are valid [SPIFFE SVID Signing certificates](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx index dc035b4e4..16d5c0021 100644 --- a/website/content/docs/connect/ca/index.mdx +++ b/website/content/docs/connect/ca/index.mdx @@ -47,7 +47,7 @@ will generate the initial root certificates and setup the internal Consul server state. For the initial bootstrap, the CA provider can be configured through the -[Agent configuration](/docs/agent/options#connect_ca_config). After +[Agent configuration](/docs/agent/config/agent-config-files#connect_ca_config). After initialization, the CA can only be updated through the [Update CA Configuration API endpoint](/api-docs/connect/ca#update-ca-configuration). If a CA is already initialized, any changes to the CA configuration in the diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index a39635046..376d50bfc 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -280,6 +280,6 @@ path "/connect_inter/*" { -[`ca_config`]: /docs/agent/options#connect_ca_config -[`ca_provider`]: /docs/agent/options#connect_ca_provider +[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider [`/connect/ca/configuration`]: /api-docs/connect/ca#update-ca-configuration diff --git a/website/content/docs/connect/config-entries/exported-services.mdx b/website/content/docs/connect/config-entries/exported-services.mdx index 41b3e94dc..790d773c6 100644 --- a/website/content/docs/connect/config-entries/exported-services.mdx +++ b/website/content/docs/connect/config-entries/exported-services.mdx @@ -28,7 +28,7 @@ You can configure the settings defined in the `exported-services` configuration ## Usage 1. Verify that your datacenter meets the conditions specified in the [Requirements](#requirements). -1. Specify the `exported-services` configuration in the agent configuration file (see [`config_entries`](/docs/agent/options#config_entries)) as described in [Configuration](#configuration). +1. Specify the `exported-services` configuration in the agent configuration file (see [`config_entries`](/docs/agent/config/agent-config-files#config_entries)) as described in [Configuration](#configuration). 1. Apply the configuration using one of the following methods: - Kubernetes CRD: Refer to the [Custom Resource Definitions](/docs/k8s/crds) documentation for details. - Issue the `consul config write` command: Refer to the [Consul Config Write](/commands/config/write) documentation for details. diff --git a/website/content/docs/connect/config-entries/index.mdx b/website/content/docs/connect/config-entries/index.mdx index d7d11797c..2ff6142fa 100644 --- a/website/content/docs/connect/config-entries/index.mdx +++ b/website/content/docs/connect/config-entries/index.mdx @@ -49,7 +49,7 @@ See [Agent - Config Entries](/docs/agent/config-entries). ## Using Configuration Entries For Service Defaults Outside of Kubernetes, when the agent is -[configured](/docs/agent/options#enable_central_service_config) to enable +[configured](/docs/agent/config/agent-config-files#enable_central_service_config) to enable central service configurations, it will look for service configuration defaults that match a registering service instance. If it finds any, the agent will merge those defaults with the service instance configuration. This allows for things diff --git a/website/content/docs/connect/config-entries/proxy-defaults.mdx b/website/content/docs/connect/config-entries/proxy-defaults.mdx index 699c2d133..2d5d2ebd8 100644 --- a/website/content/docs/connect/config-entries/proxy-defaults.mdx +++ b/website/content/docs/connect/config-entries/proxy-defaults.mdx @@ -390,8 +390,8 @@ spec: type: 'bool: false', description: `If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/options#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/options#expose_min_port) to [expose_max_port](/docs/agent/options#expose_max_port). + [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost.`, }, { diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 86ce364b6..06f1892a3 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -662,8 +662,8 @@ spec: type: 'bool: false', description: `If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/options#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/options#expose_min_port) to [expose_max_port](/docs/agent/options#expose_max_port). + [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost. One example is when running Consul on Kubernetes, and Consul agents run in their own pods.`, }, diff --git a/website/content/docs/connect/config-entries/service-intentions.mdx b/website/content/docs/connect/config-entries/service-intentions.mdx index 64317d51c..c2e77fb06 100644 --- a/website/content/docs/connect/config-entries/service-intentions.mdx +++ b/website/content/docs/connect/config-entries/service-intentions.mdx @@ -488,7 +488,7 @@ spec: first permission to match in the list is terminal and stops further evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default - intention behavior is defined by the default [ACL policy](/docs/agent/options#acl_default_policy).

+ intention behavior is defined by the default [ACL policy](/docs/agent/config/agent-config-files#acl_default_policy).

This should be omitted for an L4 intention as it is mutually exclusive with the \`Action\` field.

Setting \`Permissions\` is not valid if a wildcard is used for the \`Name\` or \`Namespace\` because they can only be @@ -498,7 +498,7 @@ spec: first permission to match in the list is terminal and stops further evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default - intention behavior is defined by the default [ACL policy](/docs/agent/options#acl_default_policy).

+ intention behavior is defined by the default [ACL policy](/docs/agent/config/agent-config-files#acl_default_policy).

This should be omitted for an L4 intention as it is mutually exclusive with the \`action\` field.

Setting \`permissions\` is not valid if a wildcard is used for the \`spec.destination.name\` or \`spec.destination.namespace\` diff --git a/website/content/docs/connect/configuration.mdx b/website/content/docs/connect/configuration.mdx index e6ae57508..0018fc4db 100644 --- a/website/content/docs/connect/configuration.mdx +++ b/website/content/docs/connect/configuration.mdx @@ -22,7 +22,7 @@ The first step to use Connect is to enable Connect for your Consul cluster. By default, Connect is disabled. Enabling Connect requires changing the configuration of only your Consul _servers_ (not client agents). To enable Connect, add the following to a new or existing -[server configuration file](/docs/agent/options). In an existing cluster, this configuration change requires a Consul server restart, which you can perform one server at a time to maintain availability. In HCL: +[server configuration file](/docs/agent/config/agent-config-files). In an existing cluster, this configuration change requires a Consul server restart, which you can perform one server at a time to maintain availability. In HCL: ```hcl connect { @@ -43,20 +43,20 @@ connection attempts to fail until Connect is enabled on the server agents. Other optional Connect configurations that you can set in the server configuration file include: -- [certificate authority settings](/docs/agent/options#connect) -- [token replication](/docs/agent/options#acl_tokens_replication) -- [dev mode](/docs/agent/options#_dev) -- [server host name verification](/docs/agent/options#tls_internal_rpc_verify_server_hostname) +- [certificate authority settings](/docs/agent/config/agent-config-files#connect) +- [token replication](/docs/agent/config/agent-config-files#acl_tokens_replication) +- [dev mode](/docs/agent/config/agent-config-cli#_dev) +- [server host name verification](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) If you would like to use Envoy as your Connect proxy you will need to [enable -gRPC](/docs/agent/options#grpc_port). +gRPC](/docs/agent/config/agent-config-files#grpc_port). Additionally if you plan on using the observability features of Connect, it can be convenient to configure your proxies and services using [configuration entries](/docs/agent/config-entries) which you can interact with using the CLI or API, or by creating configuration entry files. You will want to enable [centralized service -configuration](/docs/agent/options#enable_central_service_config) on +configuration](/docs/agent/config/agent-config-files#enable_central_service_config) on clients, which allows each service's proxy configuration to be managed centrally via API. diff --git a/website/content/docs/connect/connect-internals.mdx b/website/content/docs/connect/connect-internals.mdx index 7e49c9936..63682983e 100644 --- a/website/content/docs/connect/connect-internals.mdx +++ b/website/content/docs/connect/connect-internals.mdx @@ -109,10 +109,10 @@ externally routable IPs at the service level. ## Intention Replication Intention replication happens automatically but requires the -[`primary_datacenter`](/docs/agent/options#primary_datacenter) +[`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) configuration to be set to specify a datacenter that is authoritative for intentions. In production setups with ACLs enabled, the -[replication token](/docs/agent/options#acl_tokens_replication) must also +[replication token](/docs/agent/config/agent-config-files#acl_tokens_replication) must also be set in the secondary datacenter server's configuration. ## Certificate Authority Federation diff --git a/website/content/docs/connect/gateways/ingress-gateway.mdx b/website/content/docs/connect/gateways/ingress-gateway.mdx index 9f181850e..da6155963 100644 --- a/website/content/docs/connect/gateways/ingress-gateway.mdx +++ b/website/content/docs/connect/gateways/ingress-gateway.mdx @@ -40,8 +40,8 @@ the [hosts](/docs/connect/config-entries/ingress-gateway#hosts) field. Ingress gateways also require that your Consul datacenters are configured correctly: - You'll need to use Consul version 1.8.0 or newer. -- Consul [Connect](/docs/agent/options#connect) must be enabled on the datacenter's Consul servers. -- [gRPC](/docs/agent/options#grpc_port) must be enabled on all client agents. +- Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled on the datacenter's Consul servers. +- [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled on all client agents. Currently, [Envoy](https://www.envoyproxy.io/) is the only proxy with ingress gateway capabilities in Consul. diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx index ad0405231..ee6266030 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx @@ -10,13 +10,13 @@ description: >- -> **1.6.0+:** This feature is available in Consul versions 1.6.0 and newer. -Mesh gateways enable service mesh traffic to be routed between different Consul datacenters. +Mesh gateways enable service mesh traffic to be routed between different Consul datacenters. Datacenters can reside in different clouds or runtime environments where general interconnectivity between all services -in all datacenters isn't feasible. +in all datacenters isn't feasible. Mesh gateways operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested. The gateway does not decrypt the data within the mTLS session. -The following diagram describes the architecture for using mesh gateways for cross-datacenter communication: +The following diagram describes the architecture for using mesh gateways for cross-datacenter communication: ![Mesh Gateway Architecture](/img/mesh-gateways.png) @@ -30,12 +30,12 @@ Ensure that your Consul environment meets the following requirements. * Consul version 1.6.0 or newer. * A local Consul agent is required to manage its configuration. -* Consul [Connect](/docs/agent/options#connect) must be enabled in both datacenters. -* Each [datacenter](/docs/agent/options#datacenter) must have a unique name. +* Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled in both datacenters. +* Each [datacenter](/docs/agent/config/agent-config-files#datacenter) must have a unique name. * Each datacenters must be [WAN joined](https://learn.hashicorp.com/tutorials/consul/federarion-gossip-wan). -* The [primary datacenter](/docs/agent/options#primary_datacenter) must be set to the same value in both datacenters. This specifies which datacenter is the authority for Connect certificates and is required for services in all datacenters to establish mutual TLS with each other. -* [gRPC](/docs/agent/options#grpc_port) must be enabled. -* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/options#enable_central_service_config). +* The [primary datacenter](/docs/agent/config/agent-config-files#primary_datacenter) must be set to the same value in both datacenters. This specifies which datacenter is the authority for Connect certificates and is required for services in all datacenters to establish mutual TLS with each other. +* [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled. +* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/agent-config-files#enable_central_service_config). ### Network @@ -58,23 +58,23 @@ Sidecar proxies that do not send upstream traffic through a gateway are not affe Configure the following settings to register the mesh gateway as a service in Consul. * Specify `mesh-gateway` in the `kind` field to register the gateway with Consul. -* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and datacenter. Refer to the [`upstreams` documentation](/docs/connect/registration/service-registration#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.datacenter` must be configured to enable cross-datacenter traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace. -* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy (i.e., Envoy). For Envoy, refer to the [Gateway Options](/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information. +* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and datacenter. Refer to the [`upstreams` documentation](/docs/connect/registration/service-registration#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.datacenter` must be configured to enable cross-datacenter traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace. +* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy (i.e., Envoy). For Envoy, refer to the [Gateway Options](/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information. * If ACLs are enabled, a token granting `service:write` for the gateway's service name and `service:read` for all services in the datacenter or partition must be added to the gateway's service definition. These permissions authorize the token to route communications for other Consul service mesh services, but does not allow decrypting any of their communications. ### Modes -Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway. +Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway. Depending on your network, the proxy's connection to the gateway can operate in one of the following modes (refer to the [mesh-architecture-diagram](#mesh-architecture-diagram)): * `none` - (Default) No gateway is used and a service mesh connect proxy makes its outbound connections directly to the destination services. * `local` - The service mesh connect proxy makes an outbound connection to a gateway running in the - same datacenter. That gateway is responsible for ensuring that the data is forwarded to gateways in the destination datacenter. + same datacenter. That gateway is responsible for ensuring that the data is forwarded to gateways in the destination datacenter. Refer to the flow labeled `local` in the [mesh-architecture-diagram](#mesh-architecture-diagram). -* `remote` - The service mesh proxy makes an outbound connection to a gateway running in the destination datacenter. +* `remote` - The service mesh proxy makes an outbound connection to a gateway running in the destination datacenter. The gateway forwards the data to the final destination service. Refer to the flow labeled `remote` in the [mesh-architecture-diagram](#mesh-architecture-diagram). diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx index cebb531f7..65d133021 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx @@ -12,7 +12,7 @@ description: >- Mesh gateways enable you to route service mesh traffic between different Consul [admin partitions](/docs/enterprise/admin-partitions). Partitions can reside in different clouds or runtime environments where general interconnectivity between all services -in all partitions isn't feasible. +in all partitions isn't feasible. Mesh gateways operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested. The gateway does not decrypt the data within the mTLS session. @@ -24,15 +24,15 @@ Ensure that your Consul environment meets the following requirements. * Consul Enterprise version 1.11.0 or newer. * A local Consul agent is required to manage its configuration. -* Consul service mesh must be enabled in all partitions. Refer to the [`connect` documentation](/docs/agent/options#connect) for details. +* Consul service mesh must be enabled in all partitions. Refer to the [`connect` documentation](/docs/agent/config/agent-config-files#connect) for details. * Each partition must have a unique name. Refer to the [admin partitions documentation](/docs/enterprise/admin-partitions) for details. -* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/options#enable_central_service_config). +* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/agent-config-files#enable_central_service_config). ### Proxy Envoy is the only proxy with mesh gateway capabilities in Consul. -Mesh gateway proxies receive their configuration through Consul, which automatically generates it based on the proxy's registration. +Mesh gateway proxies receive their configuration through Consul, which automatically generates it based on the proxy's registration. Consul can only translate mesh gateway registration information into Envoy configuration. Sidecar proxies that send traffic to an upstream service through a gateway need to know the location of that gateway. They discover the gateway based on their sidecar proxy registrations. Consul can only translate the gateway registration information into Envoy configuration. @@ -44,22 +44,22 @@ Sidecar proxies that do not send upstream traffic through a gateway are not affe Configure the following settings to register the mesh gateway as a service in Consul. * Specify `mesh-gateway` in the `kind` field to register the gateway with Consul. -* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and partition. Refer to the [`upstreams` documentation](/docs/connect/registration/service-registration#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.destination_partition` must be configured to enable cross-partition traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace. -* Configure the `exported-services` configuration entry to enable Consul to export services contained in an admin partition to one or more additional partitions. Refer to the [Exported Services documentation](/docs/connect/config-entries/exported-services) for details. -* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy, i.e., Envoy. For Envoy, refer to the [Gateway Options](/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information. +* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and partition. Refer to the [`upstreams` documentation](/docs/connect/registration/service-registration#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.destination_partition` must be configured to enable cross-partition traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace. +* Configure the `exported-services` configuration entry to enable Consul to export services contained in an admin partition to one or more additional partitions. Refer to the [Exported Services documentation](/docs/connect/config-entries/exported-services) for details. +* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy, i.e., Envoy. For Envoy, refer to the [Gateway Options](/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information. * If ACLs are enabled, a token granting `service:write` for the gateway's service name and `service:read` for all services in the datacenter or partition must be added to the gateway's service definition. These permissions authorize the token to route communications for other Consul service mesh services, but does not allow decrypting any of their communications. ### Modes -Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway. +Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway. Depending on your network, the proxy's connection to the gateway can operate in one of the following modes: * `none` - (Default) No gateway is used and a service mesh connect proxy makes its outbound connections directly to the destination services. -* `local` - The service mesh connect proxy makes an outbound connection to a gateway running in the same datacenter. The gateway at the outbound connection is responsible for ensuring that the data is forwarded to gateways in the destination partition. +* `local` - The service mesh connect proxy makes an outbound connection to a gateway running in the same datacenter. The gateway at the outbound connection is responsible for ensuring that the data is forwarded to gateways in the destination partition. -* `remote` - The service mesh connect proxy makes an outbound connection to a gateway running in the destination datacenter. +* `remote` - The service mesh connect proxy makes an outbound connection to a gateway running in the destination datacenter. The gateway forwards the data to the final destination service. ### Connect Proxy Configuration diff --git a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx index aac107b2e..d6dcf2a42 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx @@ -126,7 +126,10 @@ connect { } ``` -The [`start_join_wan`](/docs/agent/options#start_join_wan) or [`retry_join_wan`](/docs/agent/options#retry_join_wan) are only used for the [traditional federation process](/docs/k8s/installation/multi-cluster#traditional-wan-federation). They must be omitted when federating Consul servers via gateways. +The [`start_join_wan`](/docs/agent/config/agent-config-files#start_join_wan) or +[`retry_join_wan`](/docs/agent/config/agent-config-files#retry_join_wan) are +only used for the [traditional federation process](/docs/k8s/installation/multi-cluster#traditional-wan-federation). +They must be omitted when federating Consul servers via gateways. -> The `primary_gateways` configuration can also use `go-discover` syntax just like `retry_join_wan`. diff --git a/website/content/docs/connect/gateways/terminating-gateway.mdx b/website/content/docs/connect/gateways/terminating-gateway.mdx index 81e18d19f..fdd3891c4 100644 --- a/website/content/docs/connect/gateways/terminating-gateway.mdx +++ b/website/content/docs/connect/gateways/terminating-gateway.mdx @@ -59,8 +59,8 @@ Each terminating gateway needs: Terminating gateways also require that your Consul datacenters are configured correctly: - You'll need to use Consul version 1.8.0 or newer. -- Consul [Connect](/docs/agent/options#connect) must be enabled on the datacenter's Consul servers. -- [gRPC](/docs/agent/options#grpc_port) must be enabled on all client agents. +- Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled on the datacenter's Consul servers. +- [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled on all client agents. Currently, [Envoy](https://www.envoyproxy.io/) is the only proxy with terminating gateway capabilities in Consul. diff --git a/website/content/docs/connect/intentions-legacy.mdx b/website/content/docs/connect/intentions-legacy.mdx index 1939db196..804bd8c65 100644 --- a/website/content/docs/connect/intentions-legacy.mdx +++ b/website/content/docs/connect/intentions-legacy.mdx @@ -25,7 +25,7 @@ is allowed by testing the intentions. If authorize returns false the connection must be terminated. The default intention behavior is defined by the default [ACL -policy](/docs/agent/options#acl_default_policy). If the default ACL policy is +policy](/docs/agent/config/agent-config-files#acl_default_policy). If the default ACL policy is "allow all", then all Connect connections are allowed by default. If the default ACL policy is "deny all", then all Connect connections are denied by default. diff --git a/website/content/docs/connect/intentions.mdx b/website/content/docs/connect/intentions.mdx index e76db2511..71a182823 100644 --- a/website/content/docs/connect/intentions.mdx +++ b/website/content/docs/connect/intentions.mdx @@ -49,7 +49,7 @@ target destination. After verifying the TLS client certificate, the cached intentions should be consulted for each incoming connection/request to determine if it should be accepted or rejected. -The default intention behavior is defined by the [`default_policy`](/docs/agent/options#acl_default_policy) configuration. +The default intention behavior is defined by the [`default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) configuration. If the configuration is set `allow`, then all service mesh Connect connections will be allowed by default. If is set to `deny`, then all connections or requests will be denied by default. diff --git a/website/content/docs/connect/observability/index.mdx b/website/content/docs/connect/observability/index.mdx index 8b5073c5e..616919f94 100644 --- a/website/content/docs/connect/observability/index.mdx +++ b/website/content/docs/connect/observability/index.mdx @@ -18,10 +18,10 @@ to: - Define the upstreams for each of your services. If you are using Envoy as your sidecar proxy, you will need to [enable -gRPC](/docs/agent/options#grpc_port) on your client agents. To define the +gRPC](/docs/agent/config/agent-config-files#grpc_port) on your client agents. To define the metrics destination and service protocol you may want to enable [configuration -entries](/docs/agent/options#config_entries) and [centralized service -configuration](/docs/agent/options#enable_central_service_config). +entries](/docs/agent/config/agent-config-files#config_entries) and [centralized service +configuration](/docs/agent/config/agent-config-files#enable_central_service_config). ### Kubernetes If you are using Kubernetes, the Helm chart can simplify much of the configuration needed to enable observability. See diff --git a/website/content/docs/connect/observability/ui-visualization.mdx b/website/content/docs/connect/observability/ui-visualization.mdx index 0b74a6b12..503163452 100644 --- a/website/content/docs/connect/observability/ui-visualization.mdx +++ b/website/content/docs/connect/observability/ui-visualization.mdx @@ -47,11 +47,11 @@ UI. If there are multiple clients with the UI enabled in a datacenter for redundancy these configurations must be added to all of them. We assume that the UI is already enabled by setting -[`ui_config.enabled`](/docs/agent/options#ui_config_enabled) to `true` in the +[`ui_config.enabled`](/docs/agent/config/agent-config-files#ui_config_enabled) to `true` in the agent's configuration file. To use the built-in Prometheus provider -[`ui_config.metrics_provider`](/docs/agent/options#ui_config_metrics_provider) +[`ui_config.metrics_provider`](/docs/agent/config/agent-config-files#ui_config_metrics_provider) must be set to `prometheus`. The UI must query the metrics provider through a proxy endpoint. This simplifies @@ -59,7 +59,7 @@ deployment where Prometheus is not exposed externally to UI user's browsers. To set this up, provide the URL that the _Consul agent_ should use to reach the Prometheus server in -[`ui_config.metrics_proxy.base_url`](/docs/agent/options#ui_config_metrics_proxy_base_url). +[`ui_config.metrics_proxy.base_url`](/docs/agent/config/agent-config-files#ui_config_metrics_proxy_base_url). For example in Kubernetes, the Prometheus helm chart by default installs a service named `prometheus-server` so each Consul agent can reach it on `http://prometheus-server` (using Kubernetes' DNS resolution). @@ -124,7 +124,7 @@ service-specific dashboard in an external tool like [Grafana](https://grafana.com) or a hosted provider. To configure this, you must provide a URL template in the [agent configuration -file](/docs/agent/options#ui_config_dashboard_url_templates) for all agents that +file](/docs/agent/config/agent-config-files#ui_config_dashboard_url_templates) for all agents that have the UI enabled. The template is essentially the URL to the external dashboard, but can have placeholder values which will be replaced with the service name, namespace and datacenter where appropriate to allow deep-linking @@ -659,12 +659,12 @@ ui_config {
More than one JavaScript file may be specified in -[`metrics_provider_files`](/docs/agent/options#ui_config_metrics_provider_files), +[`metrics_provider_files`](/docs/agent/config/agent-config-files#ui_config_metrics_provider_files) and all will be served allowing flexibility if needed to include dependencies. Only one metrics provider can be configured and used at one time. The -[`metrics_provider_options_json`](/docs/agent/options#ui_config_metrics_provider_options_json) +[`metrics_provider_options_json`](/docs/agent/config/agent-config-files#ui_config_metrics_provider_options_json) field is an optional literal JSON object which is passed to the provider's `init` method at startup time. This allows configuring arbitrary parameters for the provider in config rather than hard coding them into the provider itself to @@ -673,7 +673,7 @@ make providers more reusable. The provider may fetch metrics directly from another source although in this case the agent will probably need to serve the correct CORS headers to prevent browsers from blocking these requests. These may be configured with -[`http_config.response_headers`](/docs/agent/options#response_headers). +[`http_config.response_headers`](/docs/agent/config/agent-config-files#response_headers). Alternatively, the provider may choose to use the [built-in metrics proxy](#metrics-proxy) to avoid cross domain issues or to inject additional diff --git a/website/content/docs/connect/proxies/built-in.mdx b/website/content/docs/connect/proxies/built-in.mdx index 7661320d4..5dc321614 100644 --- a/website/content/docs/connect/proxies/built-in.mdx +++ b/website/content/docs/connect/proxies/built-in.mdx @@ -53,8 +53,8 @@ All fields are optional with a reasonable default. - `bind_port` - The port the proxy will bind its _public_ mTLS listener to. If not provided, the agent will assign a random port from its - configured proxy port range specified by [`sidecar_min_port`](/docs/agent/options#sidecar_min_port) - and [`sidecar_max_port`](/docs/agent/options#sidecar_max_port). + configured proxy port range specified by [`sidecar_min_port`](/docs/agent/config/agent-config-files#sidecar_min_port) + and [`sidecar_max_port`](/docs/agent/config/agent-config-files#sidecar_max_port). - `local_service_address`- The `[address]:port` that the proxy should use to connect to the local application instance. By default diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index babc8af27..b8fad5af7 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -184,7 +184,7 @@ the upstream listeners of any downstream service. One example is how users can define a service's protocol in a [`service-defaults` configuration entry](/docs/connect/config-entries/service-defaults). Agents with -[`enable_central_service_config`](/docs/agent/options#enable_central_service_config) +[`enable_central_service_config`](/docs/agent/config/agent-config-files#enable_central_service_config) set to true will automatically discover the protocol when configuring a proxy for a service. The proxy will discover the main protocol of the service it represents and use this to configure its main public listener. It will also diff --git a/website/content/docs/connect/proxies/managed-deprecated.mdx b/website/content/docs/connect/proxies/managed-deprecated.mdx index 6b97e006d..3848ecb64 100644 --- a/website/content/docs/connect/proxies/managed-deprecated.mdx +++ b/website/content/docs/connect/proxies/managed-deprecated.mdx @@ -24,7 +24,7 @@ Managed proxies have been deprecated since Consul 1.3 and have been fully remove in Consul 1.6. Anyone using Managed Proxies should aim to change their workflow as soon as possible to avoid issues with a later upgrade. -After transitioning away from all managed proxy usage, the `proxy` subdirectory inside [`data_dir`](/docs/agent/options#_data_dir) (specified in Consul config) can be deleted to remove extraneous configuration files and free up disk space. +After transitioning away from all managed proxy usage, the `proxy` subdirectory inside [`data_dir`](/docs/agent/config/agent-config-cli#_data_dir) (specified in Consul config) can be deleted to remove extraneous configuration files and free up disk space. **new and known issues will not be fixed**. @@ -79,7 +79,7 @@ via agent configuration files. They _cannot_ be registered via the HTTP API. And 2.) Managed proxies are not started at all if Consul is running as root. Both of these default configurations help prevent arbitrary process execution or privilege escalation. This behavior can be configured -[per-agent](/docs/agent/options). +[per-agent](/docs/agent/config). ### Lifecycle @@ -275,6 +275,6 @@ level logs showing service discovery, certificate and authorization information. ~> **Note:** In `-dev` mode there is no `data_dir` unless one is explicitly configured so logging is disabled. You can access logs by providing the -[`-data-dir`](/docs/agent/options#_data_dir) CLI option. If a data dir is +[`-data-dir`](/docs/agent/config/agent-config-cli#_data_dir) CLI option. If a data dir is configured, this will also cause proxy processes to stay running when the agent terminates as described in [Lifecycle](#lifecycle). diff --git a/website/content/docs/connect/registration/service-registration.mdx b/website/content/docs/connect/registration/service-registration.mdx index d579b9867..8e897da33 100644 --- a/website/content/docs/connect/registration/service-registration.mdx +++ b/website/content/docs/connect/registration/service-registration.mdx @@ -437,8 +437,8 @@ registrations](/docs/discovery/services#service-definition-parameter-case). - `checks` `(bool: false)` - If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/options#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/options#expose_min_port) to [expose_max_port](/docs/agent/options#expose_max_port). + [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost. One example is when running Consul on Kubernetes, and Consul agents run in their own pods. - `paths` `array: []` - A list of paths to expose through Envoy. diff --git a/website/content/docs/connect/registration/sidecar-service.mdx b/website/content/docs/connect/registration/sidecar-service.mdx index c08bd1791..6b2602f90 100644 --- a/website/content/docs/connect/registration/sidecar-service.mdx +++ b/website/content/docs/connect/registration/sidecar-service.mdx @@ -131,8 +131,8 @@ proxy. - `tags` - Defaults to the tags of the parent service. - `meta` - Defaults to the service metadata of the parent service. - `port` - Defaults to being auto-assigned from a configurable - range specified by [`sidecar_min_port`](/docs/agent/options#sidecar_min_port) - and [`sidecar_max_port`](/docs/agent/options#sidecar_max_port). + range specified by [`sidecar_min_port`](/docs/agent/config/agent-config-files#sidecar_min_port) + and [`sidecar_max_port`](/docs/agent/config/agent-config-files#sidecar_max_port). - `kind` - Defaults to `connect-proxy`. This can't be overridden currently. - `check`, `checks` - By default we add a TCP check on the local address and port for the proxy, and a [service alias diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index 64dc3de11..8be31decf 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -34,10 +34,10 @@ There are several different kinds of checks: In Consul 0.9.0 and later, script checks are not enabled by default. To use them you can either use : - - [`enable_local_script_checks`](/docs/agent/options#_enable_local_script_checks): + - [`enable_local_script_checks`](/docs/agent/config/agent-config-cli#_enable_local_script_checks): enable script checks defined in local config files. Script checks defined via the HTTP API will not be allowed. - - [`enable_script_checks`](/docs/agent/options#_enable_script_checks): enable + - [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks): enable script checks regardless of how they are defined. ~> **Security Warning:** Enabling script checks in some configurations may @@ -109,7 +109,7 @@ There are several different kinds of checks: has to be performed is configurable which makes it possible to run containers which have different shells on the same host. Check output for Docker is limited to 4KB. Any output larger than this will be truncated. In Consul 0.9.0 and later, the agent - must be configured with [`enable_script_checks`](/docs/agent/options#_enable_script_checks) + must be configured with [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) set to `true` in order to enable Docker health checks. - `gRPC + Interval` - These checks are intended for applications that support the standard @@ -467,7 +467,7 @@ This is the only convention that Consul depends on. Any output of the script will be captured and stored in the `output` field. In Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/options#_enable_script_checks) set to `true` +[`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) set to `true` in order to enable script checks. ## Initial Health Check Status @@ -543,7 +543,7 @@ provided by the node will remain unchanged. ## Agent Certificates for TLS Checks -The [enable_agent_tls_for_checks](/docs/agent/options#enable_agent_tls_for_checks) +The [enable_agent_tls_for_checks](/docs/agent/config/agent-config-files#enable_agent_tls_for_checks) agent configuration option can be utilized to have HTTP or gRPC health checks to use the agent's credentials when configured for TLS. diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index 5023b7b12..4a671f246 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -21,18 +21,18 @@ are located in the `us-east-1` datacenter, and have no failing health checks. It's that simple! There are a number of configuration options that are important for the DNS interface, -specifically [`client_addr`](/docs/agent/options#client_addr),[`ports.dns`](/docs/agent/options#dns_port), -[`recursors`](/docs/agent/options#recursors),[`domain`](/docs/agent/options#domain), -[`alt_domain`](/docs/agent/options#alt_domain), and [`dns_config`](/docs/agent/options#dns_config). +specifically [`client_addr`](/docs/agent/config/agent-config-files#client_addr),[`ports.dns`](/docs/agent/config/agent-config-files#dns_port), +[`recursors`](/docs/agent/config/agent-config-files#recursors),[`domain`](/docs/agent/config/agent-config-files#domain), +[`alt_domain`](/docs/agent/config/agent-config-files#alt_domain), and [`dns_config`](/docs/agent/config/agent-config-files#dns_config). By default, Consul will listen on 127.0.0.1:8600 for DNS queries in the `consul.` domain, without support for further DNS recursion. Please consult the -[documentation on configuration options](/docs/agent/options), +[documentation on configuration options](/docs/agent/config), specifically the configuration items linked above, for more details. There are a few ways to use the DNS interface. One option is to use a custom DNS resolver library and point it at Consul. Another option is to set Consul as the DNS server for a node and provide a -[`recursors`](/docs/agent/options#recursors) configuration so that non-Consul queries +[`recursors`](/docs/agent/config/agent-config-files#recursors) configuration so that non-Consul queries can also be resolved. The last method is to forward all queries for the "consul." domain to a Consul agent from the existing DNS server. Review the [DNS Forwarding tutorial](https://learn.hashicorp.com/tutorials/consul/dns-forwarding?utm_source=consul.io&utm_medium=docs) for examples. @@ -412,15 +412,15 @@ are not truncated. ## Alternative Domain By default, Consul responds to DNS queries in the `consul` domain, -but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/options#domain) parameter. +but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/config/agent-config-files#domain) parameter. In some instances, Consul may need to respond to queries in more than one domain, such as during a DNS migration or to distinguish between internal and external queries. Consul versions 1.5.2+ can be configured to respond to DNS queries on an alternative domain -through the [`alt_domain`](/docs/agent/options#alt_domain) agent configuration +through the [`alt_domain`](/docs/agent/config/agent-config-files#alt_domain) agent configuration option. As of Consul versions 1.11.0+, Consul's DNS response will use the same domain as was used in the query; -in prior versions, the response may use the primary [`domain`](/docs/agent/options#domain) no matter which +in prior versions, the response may use the primary [`domain`](/docs/agent/config/agent-config-files#domain) no matter which domain was used in the query. In the following example, the `alt_domain` parameter is set to `test-domain`: @@ -448,7 +448,7 @@ machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" ``` -> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the -[primary domain](/docs/agent/options#domain) (not the alternative domain), +[primary domain](/docs/agent/config/agent-config-files#domain) (not the alternative domain), as there is no way for the query to specify a domain. ## Caching @@ -463,8 +463,8 @@ for [DNS caching](https://learn.hashicorp.com/tutorials/consul/dns-caching). By default, Consul DNS queries will return a node's local address, even when being queried from a remote datacenter. If you need to use a different address to reach a node from outside its datacenter, you can configure this behavior -using the [`advertise-wan`](/docs/agent/options#_advertise-wan) and -[`translate_wan_addrs`](/docs/agent/options#translate_wan_addrs) configuration +using the [`advertise-wan`](/docs/agent/config/agent-config-cli#_advertise-wan) and +[`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs) configuration options. ## Namespaced/Partitioned Services @@ -480,7 +480,7 @@ services from other namespaces or partitions the following form can be used: This is the canonical name of a Consul Enterprise service. Currently all parts must be present - in a future version (once the -[`prefer_namespace` configuration](/docs/agent/options#dns_prefer_namespace) has been +[`prefer_namespace` configuration](/docs/agent/config/agent-config-files#dns_prefer_namespace) has been deprecated), the namespace, partition and datacenter components will become optional and may be individually omitted to default to the `default` namespace, local partition or local datacenter respectively. @@ -494,7 +494,7 @@ are enabled, you must first create ACL tokens with the necessary policies. Consul agents resolve DNS requests using one of the preconfigured tokens below, listed in order of precedence: -1. The agent's [`default` token](/docs/agent/options#acl_tokens_default). +1. The agent's [`default` token](/docs/agent/config/agent-config-files#acl_tokens_default). 2. The built-in [`anonymous` token](/docs/security/acl/acl-system#builtin-tokens). Because the anonymous token is used when any request is made to Consul without explicitly specifying a token, production deployments should not apply policies diff --git a/website/content/docs/dynamic-app-config/kv.mdx b/website/content/docs/dynamic-app-config/kv.mdx index 501abf027..ad8c57a4a 100644 --- a/website/content/docs/dynamic-app-config/kv.mdx +++ b/website/content/docs/dynamic-app-config/kv.mdx @@ -39,7 +39,7 @@ privileges on one key for developers to update the value related to their application. The datastore itself is located on the Consul servers in the [data -directory](/docs/agent/options#_data_dir). To ensure data is not lost in +directory](/docs/agent/config/agent-config-cli#_data_dir). To ensure data is not lost in the event of a complete outage, use the [`consul snapshot`](/commands/snapshot/restore) feature to backup the data. ## Using Consul KV @@ -48,7 +48,7 @@ Objects are opaque to Consul, meaning there are no restrictions on the type of object stored in a key/value entry. The main restriction on an object is size - the maximum is 512 KB. Due to the maximum object size and main use cases, you should not need extra storage; the general [sizing -recommendations](/docs/agent/options#kv_max_value_size) +recommendations](/docs/agent/config/agent-config-files#kv_max_value_size) are usually sufficient. Keys, like objects are not restricted by type and can include any character. diff --git a/website/content/docs/dynamic-app-config/watches.mdx b/website/content/docs/dynamic-app-config/watches.mdx index 328a46719..a3fe837d5 100644 --- a/website/content/docs/dynamic-app-config/watches.mdx +++ b/website/content/docs/dynamic-app-config/watches.mdx @@ -20,7 +20,7 @@ Watches are implemented using blocking queries in the [HTTP API](/api). Agents automatically make the proper API calls to watch for changes and inform a handler when the data view has updated. -Watches can be configured as part of the [agent's configuration](/docs/agent/options#watches), +Watches can be configured as part of the [agent's configuration](/docs/agent/config/agent-config-files#watches), causing them to run once the agent is initialized. Reloading the agent configuration allows for adding or removing watches dynamically. diff --git a/website/content/docs/enterprise/audit-logging.mdx b/website/content/docs/enterprise/audit-logging.mdx index 24e1ee26b..155872ae3 100644 --- a/website/content/docs/enterprise/audit-logging.mdx +++ b/website/content/docs/enterprise/audit-logging.mdx @@ -25,14 +25,14 @@ For more experience leveraging Consul's audit logging functionality, explore our HashiCorp Learn tutorial [Capture Consul Events with Audit Logging](https://learn.hashicorp.com/tutorials/consul/audit-logging). For detailed configuration information on configuring the Consul Enterprise's audit -logging, review the Consul [Audit Log](/docs/agent/options#audit) +logging, review the Consul [Audit Log](/docs/agent/config/agent-config-files#audit) documentation. ## Example Configuration Audit logging must be enabled on every agent in order to accurately capture all operations performed through the HTTP API. To enable logging, add -the [`audit`](/docs/agent/options#audit) stanza to the agent's configuration. +the [`audit`](/docs/agent/config/agent-config-files#audit) stanza to the agent's configuration. -> **Note**: Consul only logs operations which are initiated via the HTTP API. The audit log does not record operations that take place over the internal RPC @@ -42,8 +42,8 @@ communication channel used for agent communication. The following example configures a destination called "My Sink". Since rotation is enabled, -audit events will be stored at files named: `/tmp/audit-.json`. The log file will -be rotated either every 24 hours, or when the log file size is greater than 25165824 bytes +audit events will be stored at files named: `/tmp/audit-.json`. The log file will +be rotated either every 24 hours, or when the log file size is greater than 25165824 bytes (24 megabytes). diff --git a/website/content/docs/enterprise/license/overview.mdx b/website/content/docs/enterprise/license/overview.mdx index 67947e273..ae99a5874 100644 --- a/website/content/docs/enterprise/license/overview.mdx +++ b/website/content/docs/enterprise/license/overview.mdx @@ -36,7 +36,7 @@ When using these binaries no further action is necessary to configure the licens ### Binaries Without Built In Licenses For Consul Enterprise 1.10.0 or greater, binaries that do not include built in licenses a license must be available at the time the agent starts. -For server agents this means that they must either have the [`license_path`](/docs/agent/options#license_path) +For server agents this means that they must either have the [`license_path`](/docs/agent/config/agent-config-files#license_path) configuration set or have a license configured in the servers environment with the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. Both the configuration item and the `CONSUL_LICENSE_PATH` environment variable point to a file containing the license whereas the `CONSUL_LICENSE` environment @@ -55,9 +55,9 @@ to retrieve the license automatically under specific circumstances. When a client agent starts without a license in its configuration or environment, it will try to retrieve the license from the servers via RPCs. That RPC always requires a valid non-anonymous ACL token to authorize the request but the token doesn't need any particular permissions. As the license is required before the client -actually joins the cluster, where to make those RPC requests to is inferred from the [`start_join`](/docs/agent/options#start_join) -or [`retry_join`](/docs/agent/options#retry_join) configurations. If those are both unset or no -[`agent` token](/docs/agent/options#acl_tokens_agent) is set then the client agent will immediately shut itself down. +actually joins the cluster, where to make those RPC requests to is inferred from the [`start_join`](/docs/agent/config/agent-config-files#start_join) +or [`retry_join`](/docs/agent/config/agent-config-files#retry_join) configurations. If those are both unset or no +[`agent` token](/docs/agent/config/agent-config-files#acl_tokens_agent) is set then the client agent will immediately shut itself down. If all preliminary checks pass the client agent will attempt to reach out to any server on its RPC port to request the license. These requests will be retried for up to 5 minutes and if it is unable to retrieve a diff --git a/website/content/docs/enterprise/network-segments.mdx b/website/content/docs/enterprise/network-segments.mdx index f7c011c5c..d36d121ce 100644 --- a/website/content/docs/enterprise/network-segments.mdx +++ b/website/content/docs/enterprise/network-segments.mdx @@ -15,7 +15,7 @@ description: |- Consul requires full connectivity between all agents (servers and clients) in a -[datacenter](/docs/agent/options#_datacenter) within a given +[datacenter](/docs/agent/config/agent-config-cli#_datacenter) within a given LAN gossip pool. By default, all Consul agents will be a part of one shared Serf LAN gossip pool known as the `` network segment, thus requiring full mesh connectivity within the datacenter. @@ -46,7 +46,7 @@ Consul networking models and their capabilities. **Cluster:** A set of Consul servers forming a Raft quorum along with a collection of Consul clients, all set to the same -[datacenter](/docs/agent/options#_datacenter), and joined together to form +[datacenter](/docs/agent/config/agent-config-cli#_datacenter), and joined together to form what we will call a "local cluster". Consul clients discover the Consul servers in their local cluster through the gossip mechanism and make RPC requests to them. LAN Gossip (OSS) is an open intra-cluster networking model, and Network @@ -72,7 +72,7 @@ group of agents to only connect with the agents in its segment. Server agents are members of all segments. The datacenter includes a `` segment, as well as additional segments defined in the -[`segments`](/docs/agent/options#segments) server agent configuration option. +[`segments`](/docs/agent/config/agent-config-files#segments) server agent configuration option. Each additional segment is defined by: - a non-empty name @@ -129,19 +129,19 @@ segments = [ -The server [agent configuration](/docs/agent/options) options relevant to network +The server [agent configuration](/docs/agent/config/agent-config-files) options relevant to network segments are: -- [`ports.serf_lan`](/docs/agent/options#serf_lan_port): The Serf LAN port on this server +- [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port): The Serf LAN port on this server for the `` network segment's gossip pool. -- [`segments`](/docs/agent/options#segments): A list of user-defined network segments +- [`segments`](/docs/agent/config/agent-config-files#segments): A list of user-defined network segments on this server, including their names and Serf LAN ports. ## Client Configuration Each client agent can only be a member of one segment at a time. This will be the `` segment unless otherwise specified in the agent's -[`segment`](/docs/agent/options#_segment) agent configuration option. +[`segment`](/docs/agent/config/agent-config-cli#segment) agent configuration option. ### Join a Client to a Segment ((#join_a_client_to_a_segment)) @@ -154,14 +154,14 @@ configured segment. Clients A and B specify the same segment S. Client B is already joined to the segment S LAN gossip pool. Client A wants to join via Client B. In order to do so, Client A -must connect to Client B's configured [Serf LAN port](/docs/agent/options#serf_lan_port). +must connect to Client B's configured [Serf LAN port](/docs/agent/config/agent-config-files#serf_lan_port). Client A specifies segment S and wants to join the segment S gossip pool via Server 1. In order to do so, Client A must connect to Server 1's configured [Serf LAN port -for segment S](/docs/agent/options#segment_port). +for segment S](/docs/agent/config/agent-config-files#segment_port). @@ -171,12 +171,12 @@ of precedence: 1. **Specify an explicit port in the join address**. This can be done at the CLI when starting the agent (e.g., `consul agent -retry-join "client-b-address:8303"`), or in the agent's - configuration using the [retry-join option](/docs/agent/options#retry_join). This method + configuration using the [retry-join option](/docs/agent/config/agent-config-files#retry_join). This method is not compatible with [cloud auto-join](/docs/install/cloud-auto-join#auto-join-with-network-segments). 2. **Specify an alternate Serf LAN port for the agent**. This can be done at the CLI when starting the agent (e.g., `consul agent -retry-join "client-b-address" -serf-lan-port 8303`), or in - the agent's configuration using the [serf_lan](/docs/agent/options#serf_lan_port) option. + the agent's configuration using the [serf_lan](/docs/agent/config/agent-config-files#serf_lan_port) option. When a Serf LAN port is not explicitly specified in the join address, the agent will attempt to join the target host at the Serf LAN port specified in CLI or agent configuration. @@ -221,15 +221,15 @@ ports = { -The client [agent configuration](/docs/agent/options) options relevant to network +The client [agent configuration](/docs/agent/config/agent-config-files) options relevant to network segments are: -- [`segment`](/docs/agent/options#segment-2): The name of the network segment this +- [`segment`](/docs/agent/config/agent-config-files#segment-2): The name of the network segment this client agent belongs to. -- [`ports.serf_lan`](/docs/agent/options#serf_lan_port): +- [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port): Serf LAN port for the above segment on this client. This is not required to match the configured Serf LAN port for other agents on this segment. -- [`retry_join`](/docs/agent/options#retry_join) or - [`start_join`](/docs/agent/options#start_join): A list of agent addresses to join +- [`retry_join`](/docs/agent/config/agent-config-files#retry_join) or + [`start_join`](/docs/agent/config/agent-config-files#start_join): A list of agent addresses to join when starting. Ensure the correct Serf LAN port for this segment is used when joining the LAN gossip pool using one of the [available configuration methods](#join_a_client_to_a_segment). diff --git a/website/content/docs/enterprise/read-scale.mdx b/website/content/docs/enterprise/read-scale.mdx index 65faf68f9..c7b6006b5 100644 --- a/website/content/docs/enterprise/read-scale.mdx +++ b/website/content/docs/enterprise/read-scale.mdx @@ -19,6 +19,6 @@ to include voting servers and read replicas. Read replicas still receive data fr however, they do not take part in quorum election operations. Expanding your Consul cluster in this way can scale reads without impacting write latency. -For more details, review the [Consul server configuration](/docs/agent/options) -documentation and the [-read-replica](/docs/agent/options#_read_replica) +For more details, review the [Consul server configuration](/docs/agent/config) +documentation and the [-read-replica](/docs/agent/config/agent-config-cli#_read_replica) configuration flag. diff --git a/website/content/docs/index.mdx b/website/content/docs/index.mdx index d9ff3ca89..6388baf51 100644 --- a/website/content/docs/index.mdx +++ b/website/content/docs/index.mdx @@ -16,5 +16,5 @@ and a link to our guides that walk you through common tasks. Note that the guides are located on the HashiCorp Learn site. - Follow [the documentation](/docs/install) to install Consul either with a precompiled binary or from source. -- Read more about the [configuration options](/docs/agent/options) for Consul servers and clients. +- Read more about the [configuration options](/docs/agent/config) for Consul servers and clients. - Get started using Consul with our step-by-step guides at [HashiCorp Learn](https://learn.hashicorp.com/consul). diff --git a/website/content/docs/install/bootstrapping.mdx b/website/content/docs/install/bootstrapping.mdx index 107d2be0a..a191be28a 100644 --- a/website/content/docs/install/bootstrapping.mdx +++ b/website/content/docs/install/bootstrapping.mdx @@ -30,16 +30,16 @@ as data loss is inevitable in a failure scenario. Please refer to the Manual bootstrapping with `-bootstrap` is not recommended in newer versions of Consul (0.5 and newer) as it is more error-prone. Instead you should use automatic bootstrapping -with [`-bootstrap-expect`](/docs/agent/options#_bootstrap_expect). +with [`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect). ## Bootstrapping the Servers -The recommended way to bootstrap the servers is to use the [`-bootstrap-expect`](/docs/agent/options#_bootstrap_expect) +The recommended way to bootstrap the servers is to use the [`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect) configuration option. This option informs Consul of the expected number of server nodes and automatically bootstraps when that many servers are available. To prevent inconsistencies and split-brain (clusters where multiple servers consider themselves leader) situations, you should either specify the same value for -[`-bootstrap-expect`](/docs/agent/options#_bootstrap_expect) +[`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect) or specify no value at all on all the servers. Only servers that specify a value will attempt to bootstrap the cluster. Suppose we are starting a three server cluster. We can start `Node A`, `Node B`, and `Node C` with each @@ -61,11 +61,11 @@ You can trigger leader election by joining the servers together, to create a clu There are multiple options for joining the servers. Choose the method which best suits your environment and specific use case. - Specify a list of servers with - [-join](/docs/agent/options#_join) and - [start_join](/docs/agent/options#start_join) + [-join](/docs/agent/config/agent-config-cli#_join) and + [start_join](/docs/agent/config/agent-config-files#start_join) options. -- Specify a list of servers with [-retry-join](/docs/agent/options#_retry_join) option. -- Use automatic joining by tag for supported cloud environments with the [-retry-join](/docs/agent/options#_retry_join) option. +- Specify a list of servers with [-retry-join](/docs/agent/config/agent-config-cli#_retry_join) option. +- Use automatic joining by tag for supported cloud environments with the [-retry-join](/docs/agent/config/agent-config-cli#_retry_join) option. All three methods can be set in the agent configuration file or the command line flag. diff --git a/website/content/docs/install/cloud-auto-join.mdx b/website/content/docs/install/cloud-auto-join.mdx index 064ca2fa0..78dc2b310 100644 --- a/website/content/docs/install/cloud-auto-join.mdx +++ b/website/content/docs/install/cloud-auto-join.mdx @@ -69,7 +69,7 @@ to use port `8303` as its Serf LAN port prior to attempting to join the cluster. The following example configuration overrides the default Serf LAN port using the -[`ports.serf_lan`](/docs/agent/options#serf_lan_port) configuration option. +[`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port) configuration option. @@ -85,7 +85,7 @@ ports { The following example overrides the default Serf LAN port using the -[`-serf-lan-port`](/docs/agent/options#_serf_lan_port) command line flag. +[`-serf-lan-port`](/docs/agent/config/agent-config-cli#_serf_lan_port) command line flag. ```shell $ consul agent -serf-lan-port=8303 -retry-join "provider=..." diff --git a/website/content/docs/install/manual-bootstrap.mdx b/website/content/docs/install/manual-bootstrap.mdx index 1bb179786..46f8cab3e 100644 --- a/website/content/docs/install/manual-bootstrap.mdx +++ b/website/content/docs/install/manual-bootstrap.mdx @@ -23,7 +23,7 @@ storing the cluster state. The client nodes are mostly stateless and rely on the server nodes, so they can be started easily. Manual bootstrapping requires that the first server that is deployed in a new -datacenter provide the [`-bootstrap` configuration option](/docs/agent/options#_bootstrap). +datacenter provide the [`-bootstrap` configuration option](/docs/agent/config/agent-config-cli#_bootstrap). This option allows the server to assert leadership of the cluster without agreement from any other server. This is necessary because at this point, there are no other servers running in diff --git a/website/content/docs/install/performance.mdx b/website/content/docs/install/performance.mdx index ca9c0e538..3be2d3801 100644 --- a/website/content/docs/install/performance.mdx +++ b/website/content/docs/install/performance.mdx @@ -18,7 +18,7 @@ reads work from a fully in-memory data store that is optimized for concurrent ac ## Minimum Server Requirements ((#minimum)) -In Consul 0.7, the default server [performance parameters](/docs/agent/options#performance) +In Consul 0.7, the default server [performance parameters](/docs/agent/config/agent-config-files#performance) were tuned to allow Consul to run reliably (but relatively slowly) on a server cluster of three [AWS t2.micro](https://aws.amazon.com/ec2/instance-types/) instances. These thresholds were determined empirically using a leader instance that was under sufficient read, write, @@ -43,7 +43,7 @@ The default performance configuration is equivalent to this: ## Production Server Requirements ((#production)) When running Consul 0.7 and later in production, it is recommended to configure the server -[performance parameters](/docs/agent/options#performance) back to Consul's original +[performance parameters](/docs/agent/config/agent-config-files#performance) back to Consul's original high-performance settings. This will let Consul servers detect a failed leader and complete leader elections much more quickly than the default configuration which extends key Raft timeouts by a factor of 5, so it can be quite slow during these events. @@ -103,14 +103,14 @@ Here are some general recommendations: issues between the servers or insufficient CPU resources. Users in cloud environments often bump their servers up to the next instance class with improved networking and CPU until leader elections stabilize, and in Consul 0.7 or later the [performance - parameters](/docs/agent/options#performance) configuration now gives you tools + parameters](/docs/agent/config/agent-config-files#performance) configuration now gives you tools to trade off performance instead of upsizing servers. You can use the [`consul.raft.leader.lastContact` telemetry](/docs/agent/telemetry#leadership-changes) to observe how the Raft timing is performing and guide the decision to de-tune Raft performance or add more powerful servers. - For DNS-heavy workloads, configuring all Consul agents in a cluster with the - [`allow_stale`](/docs/agent/options#allow_stale) configuration option will allow reads to + [`allow_stale`](/docs/agent/config/agent-config-files#allow_stale) configuration option will allow reads to scale across all Consul servers, not just the leader. Consul 0.7 and later enables stale reads for DNS by default. See [Stale Reads](https://learn.hashicorp.com/tutorials/consul/dns-caching#stale-reads) in the [DNS Caching](https://learn.hashicorp.com/tutorials/consul/dns-caching) guide for more details. It's also good to set @@ -121,7 +121,7 @@ Here are some general recommendations: [stale consistency mode](/api-docs/features/consistency#stale) available to allow reads to scale across all the servers and not just be forwarded to the leader. -- In Consul 0.9.3 and later, a new [`limits`](/docs/agent/options#limits) configuration is +- In Consul 0.9.3 and later, a new [`limits`](/docs/agent/config/agent-config-files#limits) configuration is available on Consul clients to limit the RPC request rate they are allowed to make against the Consul servers. After hitting the limit, requests will start to return rate limit errors until time has passed and more requests are allowed. Configuring this across the cluster can help with @@ -156,11 +156,11 @@ For **write-heavy** workloads, the total RAM available for overhead must approxi RAM NEEDED = number of keys * average key size * 2-3x ``` -Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/options#_data_dir)) +Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/config/agent-config-cli#_data_dir)) For a **read-heavy** workload, configure all Consul server agents with the `allow_stale` DNS option, or query the API with the `stale` [consistency mode](/api-docs/features/consistency). By default, all queries made to the server are RPC forwarded to and serviced by the leader. By enabling stale reads, any server will respond to any query, thereby reducing overhead on the leader. Typically, the stale response is `100ms` or less from consistent mode but it drastically improves performance and reduces latency under high load. -If the leader server is out of memory or the disk is full, the server eventually stops responding, loses its election and cannot move past its last commit time. However, by configuring `max_stale` and setting it to a large value, Consul will continue to respond to queries during such outage scenarios. ([max_stale documentation](/docs/agent/options#max_stale)). +If the leader server is out of memory or the disk is full, the server eventually stops responding, loses its election and cannot move past its last commit time. However, by configuring `max_stale` and setting it to a large value, Consul will continue to respond to queries during such outage scenarios. ([max_stale documentation](/docs/agent/config/agent-config-files#max_stale)). It should be noted that `stale` is not appropriate for coordination where strong consistency is important (i.e. locking or application leader election). For critical cases, the optional `consistent` API query mode is required for true linearizability; the trade off is that this turns a read into a full quorum write so requires more resources and takes longer. @@ -168,7 +168,7 @@ It should be noted that `stale` is not appropriate for coordination where strong Consul’s agents use network sockets for communicating with the other nodes (gossip) and with the server agent. In addition, file descriptors are also opened for watch handlers, health checks, and log files. For a **write heavy** cluster, the `ulimit` size must be increased from the default value (`1024`) to prevent the leader from running out of file descriptors. -To prevent any CPU spikes from a misconfigured client, RPC requests to the server should be [rate limited](/docs/agent/options#limits) +To prevent any CPU spikes from a misconfigured client, RPC requests to the server should be [rate limited](/docs/agent/config/agent-config-files#limits) ~> **NOTE** Rate limiting is configured on the client agent only. @@ -191,8 +191,8 @@ Smearing requests over 30s is sufficient to bring RPC load to a reasonable level in all but the very largest clusters, but the extra CPU load from cryptographic operations could impact the server's normal work. To limit that, Consul since 1.4.1 exposes two ways to limit the impact Certificate signing has on the leader -[`csr_max_per_second`](/docs/agent/options#ca_csr_max_per_second) and -[`csr_max_concurrent`](/docs/agent/options#ca_csr_max_concurrent). +[`csr_max_per_second`](/docs/agent/config/agent-config-files#ca_csr_max_per_second) and +[`csr_max_concurrent`](/docs/agent/config/agent-config-files#ca_csr_max_concurrent). By default we set a limit of 50 per second which is reasonable on modest hardware but may be too low and impact rotation times if more than 1500 service diff --git a/website/content/docs/install/ports.mdx b/website/content/docs/install/ports.mdx index a9763327c..6cbc9eb82 100644 --- a/website/content/docs/install/ports.mdx +++ b/website/content/docs/install/ports.mdx @@ -55,4 +55,4 @@ the Serf WAN port (TCP/UDP) to be listening on both WAN and LAN interfaces. See **Server RPC** This is used by servers to handle incoming requests from other agents. -Note, the default ports can be changed in the [agent configuration](/docs/agent/options#ports). +Note, the default ports can be changed in the [agent configuration](/docs/agent/config/agent-config-files#ports). diff --git a/website/content/docs/k8s/connect/connect-ca-provider.mdx b/website/content/docs/k8s/connect/connect-ca-provider.mdx index 4fb45c82c..5025fcd7b 100644 --- a/website/content/docs/k8s/connect/connect-ca-provider.mdx +++ b/website/content/docs/k8s/connect/connect-ca-provider.mdx @@ -200,5 +200,5 @@ To update any settings under these keys, you must use Consul's [Update CA Config To renew the Vault token, use the [`vault token renew`](https://www.vaultproject.io/docs/commands/token/renew) CLI command or API. -[`ca_config`]: /docs/agent/options#connect_ca_config -[`ca_provider`]: /docs/agent/options#connect_ca_provider +[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 0072528c1..f2ca6d800 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -58,7 +58,7 @@ Use these links to navigate to a particular top-level stanza. the prefix will be `-consul`. - `domain` ((#v-global-domain)) (`string: consul`) - The domain Consul will answer DNS queries for - (see `-domain` (https://consul.io/docs/agent/options#_domain)) and the domain services synced from + (see `-domain` (https://consul.io/docs/agent/config/agent-config-cli#_domain)) and the domain services synced from Consul into Kubernetes will have, e.g. `service-name.service.consul`. - `adminPartitions` ((#v-global-adminpartitions)) - Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters. @@ -261,7 +261,7 @@ Use these links to navigate to a particular top-level stanza. ``` - `gossipEncryption` ((#v-global-gossipencryption)) - Configures Consul's gossip encryption key. - (see `-encrypt` (https://consul.io/docs/agent/options#_encrypt)). + (see `-encrypt` (https://consul.io/docs/agent/config/agent-config-cli#_encrypt)). By default, gossip encryption is not enabled. The gossip encryption key may be set automatically or manually. The recommended method is to automatically generate the key. To automatically generate and set a gossip encryption key, set autoGenerate to true. @@ -292,7 +292,7 @@ Use these links to navigate to a particular top-level stanza. - `recursors` ((#v-global-recursors)) (`array: []`) - A list of addresses of upstream DNS servers that are used to recursively resolve DNS queries. These values are given as `-recursor` flags to Consul servers and clients. - See https://www.consul.io/docs/agent/options#_recursor for more details. + See https://www.consul.io/docs/agent/config/agent-config-cli#_recursor for more details. If this is an empty array (the default), then Consul DNS will only resolve queries for the Consul top level domain (by default `.consul`). - `tls` ((#v-global-tls)) - Enables TLS (https://learn.hashicorp.com/tutorials/consul/tls-encryption-secure) @@ -663,7 +663,7 @@ Use these links to navigate to a particular top-level stanza. --set 'server.disruptionBudget.maxUnavailable=0'` flag to the helm chart installation command because of a limitation in the Helm templating language. - - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra JSON configuration (https://consul.io/docs/agent/options) for Consul + - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra JSON configuration (https://consul.io/docs/agent/config) for Consul servers. This will be saved as-is into a ConfigMap that is read by the Consul server agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -864,7 +864,7 @@ Use these links to navigate to a particular top-level stanza. - `image` ((#v-client-image)) (`string: null`) - The name of the Docker image (including any tag) for the containers running Consul client agents. - - `join` ((#v-client-join)) (`array: null`) - A list of valid `-retry-join` values (https://consul.io/docs/agent/options#retry-join). + - `join` ((#v-client-join)) (`array: null`) - A list of valid `-retry-join` values (https://consul.io/docs/agent/config/agent-config-files#retry-join). If this is `null` (default), then the clients will attempt to automatically join the server cluster running within Kubernetes. This means that with `server.enabled` set to true, clients will automatically @@ -885,7 +885,7 @@ Use these links to navigate to a particular top-level stanza. required for Connect. - `nodeMeta` ((#v-client-nodemeta)) - nodeMeta specifies an arbitrary metadata key/value pair to associate with the node - (see https://www.consul.io/docs/agent/options.html#_node_meta) + (see https://www.consul.io/docs/agent/config/agent-config-cli#_node_meta) - `pod-name` ((#v-client-nodemeta-pod-name)) (`string: ${HOSTNAME}`) @@ -929,7 +929,7 @@ Use these links to navigate to a particular top-level stanza. - `tlsInit` ((#v-client-containersecuritycontext-tlsinit)) (`map`) - The tls-init initContainer - - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra JSON configuration (https://consul.io/docs/agent/options) for Consul + - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra JSON configuration (https://consul.io/docs/agent/config) for Consul clients. This will be saved as-is into a ConfigMap that is read by the Consul client agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -1238,7 +1238,7 @@ Use these links to navigate to a particular top-level stanza. will inherit from `global.metrics.enabled` value. - `provider` ((#v-ui-metrics-provider)) (`string: prometheus`) - Provider for metrics. See - https://www.consul.io/docs/agent/options#ui_config_metrics_provider + https://www.consul.io/docs/agent/config/agent-config-files#ui_config_metrics_provider This value is only used if `ui.enabled` is set to true. - `baseURL` ((#v-ui-metrics-baseurl)) (`string: http://prometheus-server`) - baseURL is the URL of the prometheus server, usually the service URL. diff --git a/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx b/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx index 7c021e06a..6cc85bb0f 100644 --- a/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx +++ b/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx @@ -22,8 +22,8 @@ you want the clients to be exposed on the Kubernetes internal node IPs (`true`) their pod IPs (`false`). Finally, `client.join` is set to an array of valid -[`-retry-join` values](/docs/agent/options#retry-join). In the -example above, a fake [cloud auto-join](/docs/install/cloud-auto-join) +[`-retry-join` values](/docs/agent/config/agent-config-cli#retry-join). In the +example above, a fake [cloud auto-join](/docs/agent/cloud-auto-join) value is specified. This should be set to resolve to the proper addresses of your existing Consul cluster. diff --git a/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx b/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx index 6c9054903..1c37161e0 100644 --- a/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx +++ b/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx @@ -271,8 +271,8 @@ The automatically generated federation secret contains: - **Consul server config** - This is a JSON snippet that must be used as part of the server config for secondary datacenters. It sets: - - [`primary_datacenter`](/docs/agent/options#primary_datacenter) to the name of the primary datacenter. - - [`primary_gateways`](/docs/agent/options#primary_gateways) to an array of IPs or hostnames + - [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) to the name of the primary datacenter. + - [`primary_gateways`](/docs/agent/config/agent-config-files#primary_gateways) to an array of IPs or hostnames for the mesh gateways in the primary datacenter. These are the addresses that Consul servers in secondary clusters will use to communicate with the primary datacenter. diff --git a/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx b/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx index 73e0c079e..f792ae115 100644 --- a/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx +++ b/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx @@ -95,7 +95,7 @@ The following sections detail how to export this data. ==> Saved dc1-client-consul-0-key.pem ``` - Or use the [auto_encrypt](/docs/agent/options#auto_encrypt) feature. + Or use the [auto_encrypt](/docs/agent/config/agent-config-files#auto_encrypt) feature. ### Mesh Gateway Addresses diff --git a/website/content/docs/nia/configuration.mdx b/website/content/docs/nia/configuration.mdx index 71657d8cb..52da3e570 100644 --- a/website/content/docs/nia/configuration.mdx +++ b/website/content/docs/nia/configuration.mdx @@ -61,7 +61,7 @@ tls { The `consul` block is used to configure CTS connection with a Consul agent to perform queries to the Consul Catalog and Consul KV pertaining to task execution. --> **Note:** Use HTTP/2 to improve CTS performance when communicating with the local Consul process. [TLS/HTTPS](/docs/agent/options) must be configured for the local Consul with the [cert_file](/docs/agent/options#cert_file) and [key_file](/docs/agent/options#key_file) parameters set. For the CTS configuration, set `tls.enabled = true` and set the `address` parameter to the HTTPS URL, e.g., `address = example.consul.com:8501`. If using self-signed certificates for Consul, you will also need to set `tls.verify = false` or add the certificate to `ca_cert` or `ca_path`. +-> **Note:** Use HTTP/2 to improve Consul-Terraform-Sync performance when communicating with the local Consul process. [TLS/HTTPS](/docs/agent/config/agent-config-files) must be configured for the local Consul with the [cert_file](/docs/agent/config/agent-config-filess#cert_file) and [key_file](/docs/agent/config/agent-config-files#key_file) parameters set. For the Consul-Terraform-Sync configuration, set `tls.enabled = true` and set the `address` parameter to the HTTPS URL, e.g., `address = example.consul.com:8501`. If using self-signed certificates for Consul, you will also need to set `tls.verify = false` or add the certificate to `ca_cert` or `ca_path`. To read more on suggestions for configuring the Consul agent, see [run an agent](/docs/nia/installation/requirements#run-an-agent). @@ -80,7 +80,7 @@ consul { - `enabled` - (bool) - `username` - (string) - `password` - (string) -- `tls` - Configure TLS to use a secure client connection with Consul. Using HTTP/2 can solve issues related to hitting Consul's maximum connection limits, as well as improve efficiency when processing many blocking queries. This option is required for CTS when connecting to a [Consul agent with TLS verification enabled for HTTPS connections](/docs/agent/options#verify_incoming). +- `tls` - Configure TLS to use a secure client connection with Consul. Using HTTP/2 can solve issues related to hitting Consul's maximum connection limits, as well as improve efficiency when processing many blocking queries. This option is required for Consul-Terraform-Sync when connecting to a [Consul agent with TLS verification enabled for HTTPS connections](/docs/agent/config/agent-config-files#verify_incoming). - `enabled` - (bool) Enable TLS. Providing a value for any of the TLS options will enable this parameter implicitly. - `verify` - (bool: true) Enables TLS peer verification. The default is enabled, which will check the global certificate authority (CA) chain to make sure the certificates returned by Consul are valid. - If Consul is using a self-signed certificate that you have not added to the global CA chain, you can set this certificate with `ca_cert` or `ca_path`. Alternatively, you can disable SSL verification by setting `verify` to false. However, disabling verification is a potential security vulnerability. @@ -98,7 +98,7 @@ consul { - `max_idle_conns` - (int: 0) The maximum number of total idle connections across all hosts. The limit is disabled by default. - `max_idle_conns_per_host` - (int: 100) The maximum number of idle connections per remote host. The majority of connections are established with one host, the Consul agent. - To achieve the shortest latency between a Consul service update to a task execution, configure `max_idle_conns_per_host` equal to or greater than the number of services in automation across all tasks. - - This value should be lower than the configured [`http_max_conns_per_client`](/docs/agent/options#http_max_conns_per_client) for the Consul agent. If `max_idle_conns_per_host` and the number of services in automation is greater than the Consul agent limit, CTS may error due to connection limits (status code 429). You may increase the agent limit with caution. _Note: requests to the Consul agent made by Terraform subprocesses or any other process on the same host as CTS will contribute to the Consul agent connection limit._ + - This value should be lower than the configured [`http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) for the Consul agent. If `max_idle_conns_per_host` and the number of services in automation is greater than the Consul agent limit, Consul-Terraform-Sync may error due to connection limits (status code 429). You may increase the agent limit with caution. _Note: requests to the Consul agent made by Terraform subprocesses or any other process on the same host as Consul-Terraform-Sync will contribute to the Consul agent connection limit._ - `tls_handshake_timeout` - (string: "10s") amount of time to wait to complete the TLS handshake. ## Service diff --git a/website/content/docs/nia/installation/requirements.mdx b/website/content/docs/nia/installation/requirements.mdx index ee30a49f4..27f482699 100644 --- a/website/content/docs/nia/installation/requirements.mdx +++ b/website/content/docs/nia/installation/requirements.mdx @@ -35,7 +35,7 @@ The Consul agent must be running in order to dynamically update network devices. When running a Consul agent with CTS in production, we suggest to keep a few considerations in mind. CTS uses [blocking queries](/api-docs/features/blocking) to monitor task dependencies, like changes to registered services. This results in multiple long running TCP connections between CTS and the agent to poll changes for each dependency. Monitoring a high number of services may quickly hit the default Consul agent connection limits. -There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between CTS and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/options#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by CTS. +There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between Consul-Terraform-Sync and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by Consul-Terraform-Sync. ### Register Services diff --git a/website/content/docs/releases/release-notes/v1_9_0.mdx b/website/content/docs/releases/release-notes/v1_9_0.mdx index ee22d4ecc..1c7610aca 100644 --- a/website/content/docs/releases/release-notes/v1_9_0.mdx +++ b/website/content/docs/releases/release-notes/v1_9_0.mdx @@ -21,7 +21,7 @@ page_title: 1.9.0 - **Active Health Checks for Consul on Kubernetes:** Consul service mesh now integrates with Kubernetes Readiness probes. This provides the ability to natively detect health status from Kubernetes via Readiness probe, and is then used for directing service mesh traffic. - **Streaming:** This feature introduces a major architectural enhancement in how update notifications for blocking queries are delivered within the cluster. Streaming results in very significant reduction of CPU and network bandwidth usage on Consul servers in large-scale deployments. Streaming is particularly helpful in scaling blocking queries in Consul clusters that have rapid changes in service state. - - Streaming is now available for the service health HTTP endpoint, and can be enabled through the [`use_streaming_backend`](/docs/agent/options#use_streaming_backend) client configuration option, and [`rpc.enable_streaming`](/docs/agent/options#rpc_enable_streaming) option on the servers. We will continue to enable streaming in more endpoints in subsequent releases. + - Streaming is now available for the service health HTTP endpoint, and can be enabled through the [`use_streaming_backend`](/docs/agent/config/agent-config-files#use_streaming_backend) client configuration option, and [`rpc.enable_streaming`](/docs/agent/config/agent-config-files#rpc_enable_streaming) option on the servers. We will continue to enable streaming in more endpoints in subsequent releases. ## What's Changed diff --git a/website/content/docs/security/acl/acl-legacy.mdx b/website/content/docs/security/acl/acl-legacy.mdx index ea58bef0d..17fcbae22 100644 --- a/website/content/docs/security/acl/acl-legacy.mdx +++ b/website/content/docs/security/acl/acl-legacy.mdx @@ -89,7 +89,7 @@ and [Policies](/api-docs/acl/policies). ~> **Warning**: In this document we use the deprecated configuration parameter `acl_datacenter`. In Consul 1.4 and newer the -parameter has been updated to [`primary_datacenter`](/docs/agent/options#primary_datacenter). +parameter has been updated to [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter). Consul provides an optional Access Control List (ACL) system which can be used to control access to data and APIs. The ACL is @@ -129,7 +129,7 @@ token are automatically applied. The anonymous token is managed using the Tokens are bound to a set of rules that control which Consul resources the token has access to. Policies can be defined in either an allowlist or denylist mode depending on the configuration of -[`acl_default_policy`](/docs/agent/options#acl_default_policy). If the default +[`acl_default_policy`](/docs/agent/config/agent-config-files#acl_default_policy). If the default policy is to "deny" all actions, then token rules can be set to allowlist specific actions. In the inverse, the "allow" all default behavior is a denylist where rules are used to prohibit actions. By default, Consul will allow all actions. @@ -169,7 +169,7 @@ Constructing rules from these policies is covered in detail in the #### ACL Datacenter All nodes (clients and servers) must be configured with a -[`acl_datacenter`](/docs/agent/options#acl_datacenter) which enables ACL +[`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) which enables ACL enforcement but also specifies the authoritative datacenter. Consul relies on [RPC forwarding](/docs/architecture) to support multi-datacenter configurations. However, because requests can be made across datacenter boundaries, @@ -179,14 +179,14 @@ is considered authoritative and stores the canonical set of tokens. When a request is made to an agent in a non-authoritative datacenter, it must be resolved into the appropriate policy. This is done by reading the token from the authoritative server and caching the result for a configurable -[`acl_ttl`](/docs/agent/options#acl_ttl). The implication of caching is that +[`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl). The implication of caching is that the cache TTL is an upper bound on the staleness of policy that is enforced. It is possible to set a zero TTL, but this has adverse performance impacts, as every request requires refreshing the policy via an RPC call. During an outage of the ACL datacenter, or loss of connectivity, the cache will be used as long as the TTL is valid, or the cache may be extended if the -[`acl_down_policy`](/docs/agent/options#acl_down_policy) is set accordingly. +[`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) is set accordingly. This configuration also allows the ACL system to fail open or closed. [ACL replication](#replication) is also available to allow for the full set of ACL tokens to be replicated for use during an outage. @@ -198,10 +198,10 @@ as to whether they are set on servers, clients, or both. | Configuration Option | Servers | Clients | Purpose | | --------------------------------------------------------------------- | ---------- | ---------- | ----------------------------------------------------------------------------------------- | -| [`acl_datacenter`](/docs/agent/options#acl_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs | -| [`acl_default_policy`](/docs/agent/options#acl_default_policy_legacy) | `OPTIONAL` | `N/A` | Determines allowlist or denylist mode | -| [`acl_down_policy`](/docs/agent/options#acl_down_policy_legacy) | `OPTIONAL` | `OPTIONAL` | Determines what to do when the ACL datacenter is offline | -| [`acl_ttl`](/docs/agent/options#acl_ttl_legacy) | `OPTIONAL` | `OPTIONAL` | Determines time-to-live for cached ACLs | +| [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs | +| [`acl_default_policy`](/docs/agent/config/agent-config-files#acl_default_policy_legacy) | `OPTIONAL` | `N/A` | Determines allowlist or denylist mode | +| [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy_legacy) | `OPTIONAL` | `OPTIONAL` | Determines what to do when the ACL datacenter is offline | +| [`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl_legacy) | `OPTIONAL` | `OPTIONAL` | Determines time-to-live for cached ACLs | There are some additional configuration items related to [ACL replication](#replication) and [Version 8 ACL support](#version_8_acls). These are discussed in those respective sections @@ -210,19 +210,19 @@ below. A number of special tokens can also be configured which allow for bootstrapping the ACL system, or accessing Consul in special situations: -| Special Token | Servers | Clients | Purpose | -| ----------------------------------------------------------------------------- | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api-docs/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | -| [`acl_agent_token`](/docs/agent/options#acl_agent_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | -| [`acl_master_token`](/docs/agent/options#acl_master_token_legacy) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | -| [`acl_token`](/docs/agent/options#acl_token_legacy) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | +| Special Token | Servers | Clients | Purpose | +| ----------------------------------------------------------------------------------------------- | ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api-docs/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | +| [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | +| [`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token_legacy) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | +| [`acl_token`](/docs/agent/config/agent-config-files#acl_token_legacy) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the [/v1/agent/token API](/api-docs/agent#update-acl-tokens). #### ACL Agent Master Token -Since the [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token_legacy) is designed to be used when the Consul servers are not available, its policy is managed locally on the agent and does not need to have a token defined on the Consul servers via the ACL API. Once set, it implicitly has the following policy associated with it (the `node` policy was added in Consul 0.9.0): +Since the [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy) is designed to be used when the Consul servers are not available, its policy is managed locally on the agent and does not need to have a token defined on the Consul servers via the ACL API. Once set, it implicitly has the following policy associated with it (the `node` policy was added in Consul 0.9.0): ```hcl agent "" { @@ -238,7 +238,7 @@ In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via #### ACL Agent Token -The [`acl_agent_token`](/docs/agent/options#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/options#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: +The [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/config/agent-config-files#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: 1. Updating the agent's node entry using the [Catalog API](/api-docs/catalog), including updating its node metadata, tagged addresses, and network coordinates 2. Performing [anti-entropy](/docs/architecture/anti-entropy) syncing, in particular reading the node metadata and services registered with the catalog @@ -258,7 +258,7 @@ key "_rexec" { } ``` -The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/options#disable_remote_exec), the default, then the `key` policy can be omitted. +The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/config/agent-config-files#disable_remote_exec), the default, then the `key` policy can be omitted. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the [/v1/agent/token API](/api-docs/agent#update-acl-tokens). @@ -294,12 +294,12 @@ The servers will need to be restarted to load the new configuration. Please take to start the servers one at a time, and ensure each server has joined and is operating correctly before starting another. -The [`acl_master_token`](/docs/agent/options#acl_master_token) will be created +The [`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) will be created as a "management" type token automatically. The -[`acl_master_token`](/docs/agent/options#acl_master_token) is only installed when +[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) is only installed when a server acquires cluster leadership. If you would like to install or change the -[`acl_master_token`](/docs/agent/options#acl_master_token), set the new value for -[`acl_master_token`](/docs/agent/options#acl_master_token) in the configuration +[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token), set the new value for +[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) in the configuration for all servers. Once this is done, restart the current leader to force a leader election. In Consul 0.9.1 and later, you can use the [/v1/acl/bootstrap API](/api-docs/acl#bootstrap-acls) @@ -332,7 +332,7 @@ servers related to permission denied errors: ``` These errors are because the agent doesn't yet have a properly configured -[`acl_agent_token`](/docs/agent/options#acl_agent_token) that it can use for its +[`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) that it can use for its own internal operations like updating its node information in the catalog and performing [anti-entropy](/docs/architecture/anti-entropy) syncing. We can create a token using the ACL API, and the ACL master token we set in the previous step: @@ -550,9 +550,9 @@ The next section shows an alternative to the anonymous token. #### Set Agent-Specific Default Tokens (Optional) -An alternative to the anonymous token is the [`acl_token`](/docs/agent/options#acl_token) +An alternative to the anonymous token is the [`acl_token`](/docs/agent/config/agent-config-files#acl_token) configuration item. When a request is made to a particular Consul agent and no token is -supplied, the [`acl_token`](/docs/agent/options#acl_token) will be used for the token, +supplied, the [`acl_token`](/docs/agent/config/agent-config-files#acl_token) will be used for the token, instead of being left empty which would normally invoke the anonymous token. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the @@ -563,7 +563,7 @@ agent, if desired. For example, this allows more fine grained control of what DN given agent can service, or can give the agent read access to some key-value store prefixes by default. -If using [`acl_token`](/docs/agent/options#acl_token), then it's likely the anonymous +If using [`acl_token`](/docs/agent/config/agent-config-files#acl_token), then it's likely the anonymous token will have a more restrictive policy than shown in the examples here. #### Create Tokens for UI Use (Optional) @@ -727,7 +727,7 @@ starts with "bar". Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be -configured with [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token) to allow +configured with [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) to allow write access to these operations even if no ACL resolution capability is available. #### Event Rules @@ -753,7 +753,7 @@ starts with "deploy". The [`consul exec`](/commands/exec) command uses events with the "\_rexec" prefix during operation, so to enable this feature in a Consul environment with ACLs enabled, you will need to give agents a token with access to this event prefix, in addition to configuring -[`disable_remote_exec`](/docs/agent/options#disable_remote_exec) to `false`. +[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) to `false`. #### Key/Value Rules @@ -861,13 +861,13 @@ the example above, the rules allow read-only access to any node name with the em read-write access to any node name that starts with "app", and deny all access to any node name that starts with "admin". -Agents need to be configured with an [`acl_agent_token`](/docs/agent/options#acl_agent_token) +Agents need to be configured with an [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) with at least "write" privileges to their own node name in order to register their information with the catalog, such as node metadata and tagged addresses. If this is configured incorrectly, the agent will print an error to the console when it tries to sync its state with the catalog. Consul's DNS interface is also affected by restrictions on node rules. If the -[`acl_token`](/docs/agent/options#acl_token) used by the agent does not have "read" access to a +[`acl_token`](/docs/agent/config/agent-config-files#acl_token) used by the agent does not have "read" access to a given node, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, node rules are @@ -880,7 +880,7 @@ periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which may requir ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl_token](/docs/agent/options#acl_token) configuration +1. Using the [acl_token](/docs/agent/config/agent-config-files#acl_token) configuration directive. This allows a single token to be configured globally and used during all check registration operations. 2. Providing an ACL token with service and check definitions at @@ -891,7 +891,7 @@ to use for registration events: [HTTP API](/api) for operations that require them. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/options#_enable_script_checks) set to `true` in order to enable +[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) set to `true` in order to enable script checks. #### Operator Rules @@ -1025,7 +1025,7 @@ read-write access to any service name that starts with "app", and deny all acces starts with "admin". Consul's DNS interface is affected by restrictions on service rules. If the -[`acl_token`](/docs/agent/options#acl_token) used by the agent does not have "read" access to a +[`acl_token`](/docs/agent/config/agent-config-files#acl_token) used by the agent does not have "read" access to a given service, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, service rules are @@ -1037,7 +1037,7 @@ performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which m ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl_token](/docs/agent/options#acl_token) configuration +1. Using the [acl_token](/docs/agent/config/agent-config-files#acl_token) configuration directive. This allows a single token to be configured globally and used during all service and check registration operations. 2. Providing an ACL token with service and check definitions at registration @@ -1048,12 +1048,12 @@ to use for registration events: API](/api) for operations that require them. **Note:** all tokens passed to an agent are persisted on local disk to allow recovery from restarts. See [`-data-dir` flag - documentation](/docs/agent/options#acl_token) for notes on securing + documentation](/docs/agent/config/agent-config-files#acl_token) for notes on securing access. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/options#_enable_script_checks) or -[`enable_local_script_checks`](/docs/agent/options#_enable_local_script_checks) +[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) or +[`enable_local_script_checks`](/docs/agent/config/agent-config-files#enable_local_script_checks) set to `true` in order to enable script checks. #### Session Rules @@ -1084,20 +1084,20 @@ name that starts with "admin". #### Outages and ACL Replication ((#replication)) The Consul ACL system is designed with flexible rules to accommodate for an outage -of the [`acl_datacenter`](/docs/agent/options#acl_datacenter) or networking +of the [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) or networking issues preventing access to it. In this case, it may be impossible for agents in non-authoritative datacenters to resolve tokens. Consul provides -a number of configurable [`acl_down_policy`](/docs/agent/options#acl_down_policy) +a number of configurable [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) choices to tune behavior. It is possible to deny or permit all actions or to ignore cache TTLs and enter a fail-safe mode. The default is to ignore cache TTLs for any previously resolved tokens and to deny any uncached tokens. Consul 0.7 added an ACL Replication capability that can allow non-authoritative datacenter agents to resolve even uncached tokens. This is enabled by setting an -[`acl_replication_token`](/docs/agent/options#acl_replication_token) in the +[`acl_replication_token`](/docs/agent/config/agent-config-files#acl_replication_token) in the configuration on the servers in the non-authoritative datacenters. In Consul 0.9.1 and later you can enable ACL replication using -[`enable_acl_replication`](/docs/agent/options#enable_acl_replication) and +[`enable_acl_replication`](/docs/agent/config/agent-config-files#enable_acl_replication) and then set the token later using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. This can also be used to rotate the token without restarting the Consul servers. @@ -1113,7 +1113,7 @@ every 30 seconds. Replicated changes are written at a rate that's throttled to a large set of ACLs. If there's a partition or other outage affecting the authoritative datacenter, -and the [`acl_down_policy`](/docs/agent/options#acl_down_policy) +and the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) is set to "extend-cache", tokens will be resolved during the outage using the replicated set of ACLs. An [ACL replication status](/api-docs/acl#check-acl-replication) endpoint is available to monitor the health of the replication process. @@ -1123,7 +1123,7 @@ already cached and is expired while similar semantics than "extend-cache". It allows to avoid having issues when connectivity with the authoritative is not completely broken, but very slow. -Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/options#acl_ttl) +Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl) setting of the non-authoritative datacenter, so these entries may persist in the cache for up to the TTL, even after the authoritative datacenter comes back online. @@ -1149,7 +1149,7 @@ Consul 0.8 added many more ACL policy types and brought ACL enforcement to Consu agents for the first time. To ease the transition to Consul 0.8 for existing ACL users, there's a configuration option to disable these new features. To disable support for these new ACLs, set the -[`acl_enforce_version_8`](/docs/agent/options#acl_enforce_version_8) configuration +[`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) configuration option to `false` on Consul clients and servers. Here's a summary of the new features: @@ -1172,31 +1172,31 @@ Here's a summary of the new features: Two new configuration options are used once version 8 ACLs are enabled: -- [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token) is used as +- [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) is used as a special access token that has `agent` ACL policy `write` privileges on each agent where it is configured, as well as `node` ACL policy `read` privileges for all nodes. This token should only be used by operators during outages when Consul servers aren't available to resolve ACL tokens. Applications should use regular ACL tokens during normal operation. -- [`acl_agent_token`](/docs/agent/options#acl_agent_token) is used internally by +- [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) is used internally by Consul agents to perform operations to the service catalog when registering themselves or sending network coordinates to the servers. This token must at least have `node` ACL policy `write` access to the node name it will register as in order to register any node-level information like metadata or tagged addresses. -Since clients now resolve ACLs locally, the [`acl_down_policy`](/docs/agent/options#acl_down_policy) +Since clients now resolve ACLs locally, the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) now applies to Consul clients as well as Consul servers. This will determine what the client will do in the event that the servers are down. -Consul clients must have [`acl_datacenter`](/docs/agent/options#acl_datacenter) configured +Consul clients must have [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) configured in order to enable agent-level ACL features. If this is set, the agents will contact the Consul servers to determine if ACLs are enabled at the cluster level. If they detect that ACLs are not enabled, they will check at most every 2 minutes to see if they have become enabled, and will start enforcing ACLs automatically. If an agent has an `acl_datacenter` defined, operators will -need to use the [`acl_agent_master_token`](/docs/agent/options#acl_agent_master_token) to +need to use the [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) to perform agent-level operations if the Consul servers aren't present (such as for a manual join -to the cluster), unless the [`acl_down_policy`](/docs/agent/options#acl_down_policy) on the +to the cluster), unless the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) on the agent is set to "allow". Non-server agents do not need to have the -[`acl_master_token`](/docs/agent/options#acl_master_token) configured; it is not +[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) configured; it is not used by agents in any way. diff --git a/website/content/docs/security/acl/acl-rules.mdx b/website/content/docs/security/acl/acl-rules.mdx index 28fe09a70..59c24b31d 100644 --- a/website/content/docs/security/acl/acl-rules.mdx +++ b/website/content/docs/security/acl/acl-rules.mdx @@ -100,7 +100,7 @@ partition_prefix "ex-" { ```json -({ +{ "partition": [ { "example": [ @@ -171,7 +171,7 @@ partition_prefix "ex-" { ] } ] -}) +} ``` @@ -227,7 +227,7 @@ with `bar`. Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be -configured with [`acl.tokens.agent_recovery`](/docs/agent/options#acl_tokens_agent_recovery) to allow +configured with [`acl.tokens.agent_recovery`](/docs/agent/config/agent-config-files#acl_tokens_agent_recovery) to allow write access to these operations even if no ACL resolution capability is available. ## Event Rules @@ -272,7 +272,7 @@ read-only access to any event, and firing of the "deploy" event. The [`consul exec`](/commands/exec) command uses events with the "\_rexec" prefix during operation, so to enable this feature in a Consul environment with ACLs enabled, you will need to give agents a token with access to this event prefix, in addition to configuring -[`disable_remote_exec`](/docs/agent/options#disable_remote_exec) to `false`. +[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) to `false`. ## Key/Value Rules @@ -640,18 +640,18 @@ node "admin" { Agents must be configured with `write` privileges for their own node name so that the agent can register their node metadata, tagged addresses, and other information in the catalog. If configured incorrectly, the agent will print an error to the console when it tries to sync its state with the catalog. -Configure `write` access in the [`acl.tokens.agent`](/docs/agent/options#acl_tokens_agent) parameter. +Configure `write` access in the [`acl.tokens.agent`](/docs/agent/config/agent-config-files#acl_tokens_agent) parameter. -The [`acl.token.default`](/docs/agent/options#acl_tokens_default) used by the agent should have `read` access to a given node so that the DNS interface can be queried. +The [`acl.token.default`](/docs/agent/config/agent-config-files#acl_tokens_default) used by the agent should have `read` access to a given node so that the DNS interface can be queried. Node rules are used to filter query results when reading from the catalog or retrieving information from the health endpoints. This allows for configurations where a token has access to a given service name, but only on an allowed subset of node names. Consul agents check tokens locally when health checks are registered and when Consul performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs. These actions may required an ACL token to complete. Use the following methods to configure ACL tokens for registration events: -- Configure a global token in the [acl.tokens.default](/docs/agent/options#acl_tokens_default) parameter. +* Configure a global token in the [acl.tokens.default](/docs/agent/config/agent-config-files#acl_tokens_default) parameter. This allows a single token to be used during all check registration operations. -- Provide an ACL token with `service` and `check` definitions at registration time. +* Provide an ACL token with `service` and `check` definitions at registration time. This allows for greater flexibility and enables the use of multiple tokens on the same agent. Refer to the [services](/docs/agent/services) and [checks](/docs/agent/checks) documentation for examples. Tokens may also be passed to the [HTTP API](/api) for operations that require them. @@ -835,7 +835,7 @@ service "admin" { Consul's DNS interface is affected by restrictions on service rules. If the -[`acl.tokens.default`](/docs/agent/options#acl_tokens_default) used by the agent does not have `read` access to a +[`acl.tokens.default`](/docs/agent/config/agent-config-files#acl_tokens_default) used by the agent does not have `read` access to a given service, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, service rules are @@ -847,7 +847,7 @@ performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which m ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl.tokens.default](/docs/agent/options#acl_tokens_default) configuration +1. Using the [acl.tokens.default](/docs/agent/config/agent-config-files#acl_tokens_default) configuration directive. This allows a single token to be configured globally and used during all service and check registration operations. 2. Providing an ACL token with service and check definitions at registration @@ -858,12 +858,12 @@ to use for registration events: API](/api) for operations that require them. **Note:** all tokens passed to an agent are persisted on local disk to allow recovery from restarts. See [`-data-dir` flag - documentation](/docs/agent/options#acl_token) for notes on securing + documentation](/docs/agent/config/agent-config-files#acl_token) for notes on securing access. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/options#_enable_script_checks) or -[`enable_local_script_checks`](/docs/agent/options#_enable_local_script_checks) +[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) or +[`enable_local_script_checks`](/docs/agent/config/agent-config-files#enable_local_script_checks) set to `true` in order to enable script checks. Service rules are also used to grant read or write access to intentions. The diff --git a/website/content/docs/security/acl/auth-methods/index.mdx b/website/content/docs/security/acl/auth-methods/index.mdx index c1e29d504..253f038a0 100644 --- a/website/content/docs/security/acl/auth-methods/index.mdx +++ b/website/content/docs/security/acl/auth-methods/index.mdx @@ -60,7 +60,7 @@ using the API or command line before they can be used by applications. endpoints](/api-docs/acl/binding-rules). -> **Note** - To configure auth methods in any connected secondary datacenter, -[ACL token replication](/docs/agent/options#acl_enable_token_replication) +[ACL token replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be enabled. Auth methods require the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. diff --git a/website/content/docs/security/encryption.mdx b/website/content/docs/security/encryption.mdx index 5d33117cb..3f9c3659b 100644 --- a/website/content/docs/security/encryption.mdx +++ b/website/content/docs/security/encryption.mdx @@ -75,17 +75,17 @@ CA then signs keys for each of the agents, as in ~> Certificates need to be created with x509v3 extendedKeyUsage attributes for both clientAuth and serverAuth since Consul uses a single cert/key pair for both server and client communications. TLS can be used to verify the authenticity of the servers or verify the authenticity of clients. -These modes are controlled by the [`verify_outgoing`](/docs/agent/options#tls_internal_rpc_verify_outgoing), -[`verify_server_hostname`](/docs/agent/options#tls_internal_rpc_verify_server_hostname), -and [`verify_incoming`](/docs/agent/options#tls_internal_rpc_verify_incoming) options, respectively. +These modes are controlled by the [`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing), +[`verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname), +and [`verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) options, respectively. -If [`verify_outgoing`](/docs/agent/options#tls_internal_rpc_verify_outgoing) is set, agents verify the +If [`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) is set, agents verify the authenticity of Consul for outgoing connections. Server nodes must present a certificate signed by a common certificate authority present on all agents, set via the agent's -[`ca_file`](/docs/agent/options#tls_internal_rpc_ca_file) and [`ca_path`](/docs/agent/options#tls_internal_rpc_ca_path) -options. All server nodes must have an appropriate key pair set using [`cert_file`](/docs/agent/options#tls_internal_rpc_cert_file) and [`key_file`](/docs/agent/options#tls_internal_rpc_key_file). +[`ca_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_ca_file) and [`ca_path`](/docs/agent/config/agent-config-files#tls_internal_rpc_ca_path) +options. All server nodes must have an appropriate key pair set using [`cert_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_cert_file) and [`key_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_key_file). -If [`verify_server_hostname`](/docs/agent/options#tls_internal_rpc_verify_server_hostname) is set, then +If [`verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) is set, then outgoing connections perform hostname verification. All servers must have a certificate valid for `server..` or the client will reject the handshake. This is a new configuration as of 0.5.1, and it is used to prevent a compromised client from being @@ -93,12 +93,12 @@ able to restart in server mode and perform a MITM (Man-In-The-Middle) attack. Ne to true, and generate the proper certificates, but this is defaulted to false to avoid breaking existing deployments. -If [`verify_incoming`](/docs/agent/options#tls_internal_rpc_verify_incoming) is set, the servers verify the +If [`verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) is set, the servers verify the authenticity of all incoming connections. All clients must have a valid key pair set using -[`cert_file`](/docs/agent/options#tls_internal_rpc_cert_file) and -[`key_file`](/docs/agent/options#tls_internal_rpc_key_file). Servers will +[`cert_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_cert_file) and +[`key_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_key_file). Servers will also disallow any non-TLS connections. To force clients to use TLS, -[`verify_outgoing`](/docs/agent/options#tls_internal_rpc_verify_outgoing) must also be set. +[`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) must also be set. TLS is used to secure the RPC calls between agents, but gossip between nodes is done over UDP and is secured using a symmetric key. See above for enabling gossip encryption. diff --git a/website/content/docs/security/security-models/core.mdx b/website/content/docs/security/security-models/core.mdx index f408a57f6..59b9a0cdc 100644 --- a/website/content/docs/security/security-models/core.mdx +++ b/website/content/docs/security/security-models/core.mdx @@ -72,32 +72,32 @@ environment and adapt these configurations accordingly. - **mTLS** - Mutual authentication of both the TLS server and client x509 certificates prevents internal abuse through unauthorized access to Consul agents within the cluster. - - [`tls.defaults.verify_incoming`](/docs/agent/options#tls_defaults_verify_incoming) - By default this is false, and + - [`tls.defaults.verify_incoming`](/docs/agent/config/agent-config-files#tls_defaults_verify_incoming) - By default this is false, and should almost always be set to true to require TLS verification for incoming client connections. This applies to the internal RPC, HTTPS and gRPC APIs. - - [`tls.https.verify_incoming`](/docs/agent/options#tls_https_verify_incoming) - By default this is false, and should + - [`tls.https.verify_incoming`](/docs/agent/config/agent-config-files#tls_https_verify_incoming) - By default this is false, and should be set to true to require clients to provide a valid TLS certificate when the Consul HTTPS API is enabled. TLS for the API may be not be necessary if it is exclusively served over a loopback interface such as `localhost`. - - [`tls.internal_rpc.verify_incoming`](/docs/agent/options#tls_internal_rpc_verify_incoming) - By default this is false, + - [`tls.internal_rpc.verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) - By default this is false, and should almost always be set to true to require clients to provide a valid TLS certificate for Consul agent RPCs. - [`tls.grpc.verify_incoming`](/docs/agent/options#tls_grpc_verify_incoming) - By default this is false, and should be set to true to require clients to provide a valid TLS certificate when the Consul gRPC API is enabled. TLS for the API may be not be necessary if it is exclusively served over a loopback interface such as `localhost`. - - [`tls.internal_rpc.verify_outgoing`](/docs/agent/options#tls_internal_rpc_verify_outgoing) - By default this is false, + - [`tls.internal_rpc.verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) - By default this is false, and should be set to true to require TLS for outgoing connections from server or client agents. Servers that specify `verify_outgoing = true` will always talk to other servers over TLS, but they still accept non-TLS connections to allow for a transition of all clients to TLS. Currently the only way to enforce that no client can communicate with a server unencrypted is to also enable `verify_incoming` which requires client certificates too. - - [`enable_agent_tls_for_checks`](/docs/agent/options#enable_agent_tls_for_checks) - By default this is false, and + - [`enable_agent_tls_for_checks`](/docs/agent/config/agent-config-files#enable_agent_tls_for_checks) - By default this is false, and should almost always be set to true to require mTLS to set up the client for HTTP or gRPC health checks. This was added in Consul 1.0.1. - - [`tls.internal_rpc.verify_server_hostname`](/docs/agent/options#tls_internal_rpc_verify_server_hostname) - By default + - [`tls.internal_rpc.verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) - By default this is false, and should be set to true to require that the TLS certificate presented by the servers matches `server..` hostname for outgoing TLS connections. The default configuration does not verify the hostname of the certificate, only that it is signed by a trusted CA. This setting is critical to prevent a @@ -108,14 +108,14 @@ environment and adapt these configurations accordingly. [CVE-2018-19653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-19653) for more details. This is fixed in 1.4.1. - - [`auto_encrypt`](/docs/agent/options#auto_encrypt) - Enables automated TLS certificate distribution for client - agent RPC communication using the Connect CA. Using this configuration a [`ca_file`](/docs/agent/options#tls_defaults_ca_file) + - [`auto_encrypt`](/docs/agent/config/agent-config-files#auto_encrypt) - Enables automated TLS certificate distribution for client + agent RPC communication using the Connect CA. Using this configuration a [`ca_file`](/docs/agent/config/agent-config-files#tls_defaults_ca_file) and ACL token would still need to be distributed to client agents. - - [`allow_tls`](/docs/agent/options#allow_tls) - By default this is false, and should be set to true on server + - [`allow_tls`](/docs/agent/config/agent-config-files#allow_tls) - By default this is false, and should be set to true on server agents to allow certificates to be automatically generated and distributed from the Connect CA to client agents. - - [`tls`](/docs/agent/options#tls) - By default this is false, and should be set to true on client agents to + - [`tls`](/docs/agent/config/agent-config-files#tls) - By default this is false, and should be set to true on client agents to automatically request a client TLS certificate from the server's Connect CA. **Example Server Agent TLS Configuration** @@ -161,7 +161,7 @@ environment and adapt these configurations accordingly. } ``` - -> The client agent TLS configuration from above sets [`verify_incoming`](/docs/agent/options#tls_defaults_verify_incoming) + -> The client agent TLS configuration from above sets [`verify_incoming`](/docs/agent/config/agent-config-files#tls_defaults_verify_incoming) to false which assumes all incoming traffic is restricted to `localhost`. The primary benefit for this configuration would be to avoid provisioning client TLS certificates (in addition to ACL tokens) for all tools or applications using the local Consul agent. In this case ACLs should be enabled to provide authorization and only ACL tokens would @@ -169,7 +169,7 @@ environment and adapt these configurations accordingly. - **ACLs** - The access control list (ACL) system provides a security mechanism for Consul administrators to grant capabilities tied to an individual human, or machine operator identity. To ultimately secure the ACL system, - administrators should configure the [`default_policy`](/docs/agent/options#acl_default_policy) to "deny". + administrators should configure the [`default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) to "deny". The [system](/docs/security/acl/acl-system) is comprised of five major components: @@ -196,10 +196,10 @@ environment and adapt these configurations accordingly. Two optional gossip encryption options enable Consul servers without gossip encryption to safely upgrade. After upgrading, the verification options should be enabled, or removed to set them to their default state: - - [`encrypt_verify_incoming`](/docs/agent/options#encrypt_verify_incoming) - By default this is true to enforce + - [`encrypt_verify_incoming`](/docs/agent/config/agent-config-files#encrypt_verify_incoming) - By default this is true to enforce encryption on _incoming_ gossip communications. - - [`encrypt_verify_outgoing`](/docs/agent/options#encrypt_verify_outgoing) - By default this is true to enforce + - [`encrypt_verify_outgoing`](/docs/agent/config/agent-config-files#encrypt_verify_outgoing) - By default this is true to enforce encryption on _outgoing_ gossip communications. - **Namespaces** - Read and write operations should be scoped to logical namespaces to @@ -240,16 +240,16 @@ environment and adapt these configurations accordingly. - **Linux Security Modules** - Use of security modules that can be directly integrated into operating systems such as AppArmor, SElinux, and Seccomp on Consul agent hosts. -- **Customize TLS Settings** - TLS settings such as the [available cipher suites](/docs/agent/options#tls_defaults_tls_cipher_suites), +- **Customize TLS Settings** - TLS settings such as the [available cipher suites](/docs/agent/config/agent-config-files#tls_defaults_tls_cipher_suites), should be tuned to fit the needs of your environment. - - [`tls_min_version`](/docs/agent/options#tls_defaults_tls_min_version) - Used to specify the minimum TLS version to use. + - [`tls_min_version`](/docs/agent/config/agent-config-files#tls_defaults_tls_min_version) - Used to specify the minimum TLS version to use. - - [`tls_cipher_suites`](/docs/agent/options#tls_defaults_tls_cipher_suites) - Used to specify which TLS cipher suites are allowed. + - [`tls_cipher_suites`](/docs/agent/config/agent-config-files#tls_defaults_tls_cipher_suites) - Used to specify which TLS cipher suites are allowed. - **Customize HTTP Response Headers** - Additional security headers, such as [`X-XSS-Protection`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection), can be - [configured](/docs/agent/options#response_headers) for HTTP API responses. + [configured](/docs/agent/config/agent-config-files#response_headers) for HTTP API responses. ```hcl http_config { @@ -262,28 +262,28 @@ environment and adapt these configurations accordingly. - **Customize Default Limits** - Consul has a number of builtin features with default connection limits that should be tuned to fit your environment. - - [`http_max_conns_per_client`](/docs/agent/options#http_max_conns_per_client) - Used to limit concurrent access from + - [`http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) - Used to limit concurrent access from a single client to the HTTP(S) endpoint on Consul agents. - - [`https_handshake_timeout`](/docs/agent/options#https_handshake_timeout) - Used to timeout TLS connection for the + - [`https_handshake_timeout`](/docs/agent/config/agent-config-files#https_handshake_timeout) - Used to timeout TLS connection for the HTTP(S) endpoint for Consul agents. - - [`rpc_handshake_timeout`](/docs/agent/options#rpc_handshake_timeout) - Used to timeout TLS connections for the RPC + - [`rpc_handshake_timeout`](/docs/agent/config/agent-config-files#rpc_handshake_timeout) - Used to timeout TLS connections for the RPC endpoint for Consul agents. - - [`rpc_max_conns_per_client`](/docs/agent/options#rpc_max_conns_per_client) - Used to limit concurrent access from a + - [`rpc_max_conns_per_client`](/docs/agent/config/agent-config-files#rpc_max_conns_per_client) - Used to limit concurrent access from a single client to the RPC endpoint on Consul agents. - - [`rpc_rate`](/docs/agent/options#rpc_rate) - Disabled by default, this is used to limit (requests/second) for client + - [`rpc_rate`](/docs/agent/config/agent-config-files#rpc_rate) - Disabled by default, this is used to limit (requests/second) for client agents making RPC calls to server agents. - - [`rpc_max_burst`](/docs/agent/options#rpc_max_burst) - Used as the token bucket size for client agents making RPC + - [`rpc_max_burst`](/docs/agent/config/agent-config-files#rpc_max_burst) - Used as the token bucket size for client agents making RPC calls to server agents. - - [`kv_max_value_size`](/docs/agent/options#kv_max_value_size) - Used to configure the max number of bytes in a + - [`kv_max_value_size`](/docs/agent/config/agent-config-files#kv_max_value_size) - Used to configure the max number of bytes in a key-value API request. - - [`txn_max_req_len`](/docs/agent/options#txn_max_req_len) - Used to configure the max number of bytes in a + - [`txn_max_req_len`](/docs/agent/config/agent-config-files#txn_max_req_len) - Used to configure the max number of bytes in a transaction API request. - **Secure UI Access** - Access to Consul’s builtin UI can be secured in various ways: @@ -303,7 +303,7 @@ environment and adapt these configurations accordingly. [Securing Consul with Access Control Lists (ACLs)](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production), which includes a section on [creating ACL tokens that provide a desired level UI access](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production#consul-ui-token). - - **Restrict HTTP Writes** - Using the [`allow_write_http_from`](/docs/agent/options#allow_write_http_from) + - **Restrict HTTP Writes** - Using the [`allow_write_http_from`](/docs/agent/config/agent-config-files#allow_write_http_from) configuration option to restrict write access for agent endpoints to hosts on the specified list of CIDRs. **Example Agent Configuration** diff --git a/website/content/docs/troubleshoot/common-errors.mdx b/website/content/docs/troubleshoot/common-errors.mdx index 450bdeaab..ca659196f 100644 --- a/website/content/docs/troubleshoot/common-errors.mdx +++ b/website/content/docs/troubleshoot/common-errors.mdx @@ -198,14 +198,14 @@ We recommend raising an issue with the CNI you're using to add support for `host and switching back to `hostPort` eventually. [troubleshooting]: https://learn.hashicorp.com/consul/day-2-operations/advanced-operations/troubleshooting -[node_name]: /docs/agent/options#node_name -[retry_join]: /docs/agent/options#retry-join +[node_name]: /docs/agent/config/agent-config-files#node_name +[retry_join]: /docs/agent/config/agent-config-cli#retry-join [license]: /commands/license [releases]: https://releases.hashicorp.com/consul/ [files]: https://easyengine.io/tutorials/linux/increase-open-files-limit [certificates]: https://learn.hashicorp.com/consul/advanced/day-1-operations/certificates [systemd]: https://learn.hashicorp.com/consul/advanced/day-1-operations/deployment-guide#configure-systemd [monitoring]: https://learn.hashicorp.com/consul/advanced/day-1-operations/monitoring -[bind]: /docs/agent/options#_bind +[bind]: /docs/agent/config/agent-config-cli#_bind [jq]: https://stedolan.github.io/jq/ [go-sockaddr]: https://godoc.org/github.com/hashicorp/go-sockaddr/template diff --git a/website/content/docs/troubleshoot/faq.mdx b/website/content/docs/troubleshoot/faq.mdx index 9734c3880..7e85ecd23 100644 --- a/website/content/docs/troubleshoot/faq.mdx +++ b/website/content/docs/troubleshoot/faq.mdx @@ -62,8 +62,8 @@ messages. This anonymous ID can be disabled. In fact, using the Checkpoint service is optional and can be disabled. -See [`disable_anonymous_signature`](/docs/agent/options#disable_anonymous_signature) -and [`disable_update_check`](/docs/agent/options#disable_update_check). +See [`disable_anonymous_signature`](/docs/agent/config/agent-config-files#disable_anonymous_signature) +and [`disable_update_check`](/docs/agent/config/agent-config-files#disable_update_check). ### Q: Does Consul rely on UDP Broadcast or Multicast? @@ -116,7 +116,7 @@ as well as race conditions between data updates and watch registrations. ### Q: What network ports does Consul use? -The [Ports Used](/docs/agent/options#ports) section of the Configuration +The [Ports Used](/docs/agent/config/agent-config-files#ports) section of the Configuration documentation lists all ports that Consul uses. ### Q: Does Consul require certain user process resource limits? @@ -143,7 +143,7 @@ of any excessive resource utilization before arbitrarily increasing the limits. The default recommended limit on a key's value size is 512KB. This is strictly enforced and an HTTP 413 status will be returned to any client that attempts to store more than that limit in a value. The limit can be increased by using the -[`kv_max_value_size`](/docs/agent/options#kv_max_value_size) configuration option. +[`kv_max_value_size`](/docs/agent/config/agent-config-files#kv_max_value_size) configuration option. It should be noted that the Consul key/value store is not designed to be used as a general purpose database. See diff --git a/website/content/docs/upgrading/instructions/general-process.mdx b/website/content/docs/upgrading/instructions/general-process.mdx index 51d8a3006..eeed1069a 100644 --- a/website/content/docs/upgrading/instructions/general-process.mdx +++ b/website/content/docs/upgrading/instructions/general-process.mdx @@ -74,7 +74,7 @@ this snapshot somewhere safe. More documentation on snapshot usage is available - [consul.io/commands/snapshot](/commands/snapshot) - -**2.** Temporarily modify your Consul configuration so that its [log_level](/docs/agent/options#_log_level) +**2.** Temporarily modify your Consul configuration so that its [log_level](/docs/agent/config/agent-config-cli#_log_level) is set to `debug`. After doing this, issue the following command on your servers to reload the configuration: @@ -183,7 +183,7 @@ then the following options for further assistance are available: When contacting Hashicorp Support, please include the following information in your ticket: - Consul version you were upgrading FROM and TO. -- [Debug level logs](/docs/agent/options#_log_level) from all servers in the cluster +- [Debug level logs](/docs/agent/config/agent-config-cli#_log_level) from all servers in the cluster that you are having trouble with. These should include logs from prior to the upgrade attempt up through the current time. If your logs were not set at debug level prior to the upgrade, please include those logs as well. Also, update your config to use debug logs, diff --git a/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx b/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx index 5b1d19fde..e1aa4a6b9 100644 --- a/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx +++ b/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx @@ -20,7 +20,7 @@ Here is some documentation that may prove useful for reference during this upgra - [ACL System in Legacy Mode](/docs/security/acl/acl-legacy) - You can find information about legacy configuration options and differences between modes here. -- [Configuration](/docs/agent/options) - You can find more details +- [Configuration](/docs/agent/config) - You can find more details around legacy ACL and new ACL configuration options here. Legacy ACL config options will be listed as deprecates as of 1.4.0. @@ -51,7 +51,7 @@ Looking through these changes prior to upgrading is highly recommended. Two very notable items are: - 1.6.2 introduced more strict JSON decoding. Invalid JSON that was previously ignored might result in errors now (e.g., `Connect: null` in service definitions). See [[GH#6680](https://github.com/hashicorp/consul/pull/6680)]. -- 1.6.3 introduced the [http_max_conns_per_client](/docs/agent/options#http_max_conns_per_client) limit. This defaults to 200. Prior to this, connections per client were unbounded. [[GH#7159](https://github.com/hashicorp/consul/issues/7159)] +- 1.6.3 introduced the [http_max_conns_per_client](/docs/agent/config/agent-config-files#http_max_conns_per_client) limit. This defaults to 200. Prior to this, connections per client were unbounded. [[GH#7159](https://github.com/hashicorp/consul/issues/7159)] ## Procedure @@ -202,8 +202,8 @@ update those now to avoid issues when moving to newer versions. These are the changes you will need to make: -- `acl_datacenter` is now named `primary_datacenter` (review our [docs](/docs/agent/options#primary_datacenter) for more info) -- `acl_default_policy`, `acl_down_policy`, `acl_ttl`, `acl_*_token` and `enable_acl_replication` options are now specified like this (review our [docs](/docs/agent/options#acl) for more info): +- `acl_datacenter` is now named `primary_datacenter` (review our [docs](/docs/agent/config/agent-config-files#primary_datacenter) for more info) +- `acl_default_policy`, `acl_down_policy`, `acl_ttl`, `acl_*_token` and `enable_acl_replication` options are now specified like this (review our [docs](/docs/agent/config/agent-config-files#acl) for more info): ```hcl acl { enabled = true/false diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index b3fb7477f..551979958 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -54,7 +54,7 @@ Due to this rename the following endpoint is also deprecated: These config keys are now deprecated: - `audit.sink[].name` - - [`dns_config.dns_prefer_namespace`](/docs/agent/options#dns_prefer_namespace) + - [`dns_config.dns_prefer_namespace`](/docs/agent/config/agent-config-files#dns_prefer_namespace) ### Deprecated CLI Subcommands @@ -119,8 +119,8 @@ have a license loaded from a configuration file or from their environment the sa agents must have the license specified. Both agents can still perform automatic retrieval of their license but with a few extra stipulations. First, license auto-retrieval now requires that ACLs are on and that the client or snapshot agent is configured with a valid ACL token. Secondly, client -agents require that either the [`start_join`](/docs/agent/options#start_join) or -[`retry_join`](/docs/agent/options#retry_join) configurations are set and that they resolve to server +agents require that either the [`start_join`](/docs/agent/config/agent-config-files#start_join) or +[`retry_join`](/docs/agent/config/agent-config-files#retry_join) configurations are set and that they resolve to server agents. If those stipulations are not met, attempting to start the client or snapshot agent will result in it immediately shutting down. @@ -214,7 +214,7 @@ to Consul 1.9.0. ### Changes to Configuration Defaults -The [`enable_central_service_config`](/docs/agent/options#enable_central_service_config) +The [`enable_central_service_config`](/docs/agent/config/agent-config-files#enable_central_service_config) configuration now defaults to `true`. ### Changes to Intentions @@ -283,7 +283,7 @@ behavior: #### Removal of Deprecated Features -The [`acl_enforce_version_8`](/docs/agent/options#acl_enforce_version_8) +The [`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) configuration has been removed (with version 8 ACL support by being on by default). @@ -326,7 +326,7 @@ to more precisely capture the view of _active_ blocking queries. ### Vault: default `http_max_conns_per_client` too low to run Vault properly -Consul 1.7.0 introduced [limiting of connections per client](/docs/agent/options#http_max_conns_per_client). The default value +Consul 1.7.0 introduced [limiting of connections per client](/docs/agent/config/agent-config-files#http_max_conns_per_client). The default value was 100, but Vault could use up to 128, which caused problems. If you want to use Vault with Consul 1.7.0, you should change the value to 200. Starting with Consul 1.7.1 this is the new default. @@ -334,7 +334,7 @@ Starting with Consul 1.7.1 this is the new default. ### Vault: default `http_max_conns_per_client` too low to run Vault properly -Consul 1.6.3 introduced [limiting of connections per client](/docs/agent/options#http_max_conns_per_client). The default value +Consul 1.6.3 introduced [limiting of connections per client](/docs/agent/config/agent-config-files#http_max_conns_per_client). The default value was 100, but Vault could use up to 128, which caused problems. If you want to use Vault with Consul 1.6.3 through 1.7.0, you should change the value to 200. Starting with Consul 1.7.1 this is the new default. @@ -373,7 +373,7 @@ datacenter". All configuration is backwards compatible and shouldn't need to change prior to upgrade although it's strongly recommended to migrate ACL configuration to the new syntax soon after upgrade. This includes moving to `primary_datacenter` rather than `acl_datacenter` and `acl_*` to the new [ACL -block](/docs/agent/options#acl). +block](/docs/agent/config/agent-config-files#acl). Datacenters can be upgraded in any order although secondaries will remain in [Legacy ACL mode](#legacy-acl-mode) until the primary datacenter is fully @@ -500,11 +500,11 @@ The following previously deprecated fields and config options have been removed: Consul 1.0.1 (and earlier versions of Consul) checked for raft snapshots every 5 seconds, and created new snapshots for every 8192 writes. These defaults cause constant disk IO in large busy clusters. Consul 1.1.0 increases these to larger values, -and makes them tunable via the [raft_snapshot_interval](/docs/agent/options#_raft_snapshot_interval) and -[raft_snapshot_threshold](/docs/agent/options#_raft_snapshot_threshold) parameters. We recommend +and makes them tunable via the [raft_snapshot_interval](/docs/agent/config/agent-config-files#_raft_snapshot_interval) and +[raft_snapshot_threshold](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) parameters. We recommend keeping the new defaults. However, operators can go back to the old defaults by changing their -config if they prefer more frequent snapshots. See the documentation for [raft_snapshot_interval](/docs/agent/options#_raft_snapshot_interval) -and [raft_snapshot_threshold](/docs/agent/options#_raft_snapshot_threshold) to understand the trade-offs +config if they prefer more frequent snapshots. See the documentation for [raft_snapshot_interval](/docs/agent/config/agent-config-files#_raft_snapshot_interval) +and [raft_snapshot_threshold](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) to understand the trade-offs when tuning these. ## Consul 1.0.7 @@ -532,7 +532,7 @@ before proceeding. #### Carefully Check and Remove Stale Servers During Rolling Upgrades Consul 1.0 (and earlier versions of Consul when running with [Raft protocol -3](/docs/agent/options#_raft_protocol) had an issue where performing +3](/docs/agent/config/agent-config-files#_raft_protocol) had an issue where performing rolling updates of Consul servers could result in an outage from old servers remaining in the cluster. [Autopilot](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations) @@ -553,7 +553,7 @@ Please be sure to read over all the details here before upgrading. #### Raft Protocol Now Defaults to 3 -The [`-raft-protocol`](/docs/agent/options#_raft_protocol) default has +The [`-raft-protocol`](/docs/agent/config/agent-config-cli#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations) features by default. @@ -582,7 +582,7 @@ servers, and then slowly stand down each of the older servers in a similar fashion. When using Raft protocol version 3, servers are identified by their -[`-node-id`](/docs/agent/options#_node_id) instead of their IP address +[`-node-id`](/docs/agent/config/agent-config-cli#_node_id) instead of their IP address when Consul makes changes to its internal Raft quorum configuration. This means that once a cluster has been upgraded with servers all running Raft protocol version 3, it will no longer allow servers running any older Raft protocol @@ -597,7 +597,7 @@ to map the server to its node ID in the Raft quorum configuration. As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the -[`-config-file`](/docs/agent/options#_config_file) argument to specify a +[`-config-file`](/docs/agent/config/agent-config-cli#_config_file) argument to specify a file directly. #### Service Definition Parameter Case changed @@ -614,40 +614,41 @@ upgrading. Here's the complete list of removed options and their equivalents: | Removed Option | Equivalent | | ------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `-dc` | [`-datacenter`](/docs/agent/options#_datacenter) | -| `-retry-join-azure-tag-name` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-azure-tag-value` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-ec2-region` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-ec2-tag-key` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-ec2-tag-value` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-gce-credentials-file` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-gce-project-name` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-gce-tag-name` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `-retry-join-gce-zone-pattern` | [`-retry-join`](/docs/agent/options#_retry_join) | +| `-dc` | [`-datacenter`](/docs/agent/config/agent-config-cli#_datacenter) | +| `-retry-join-azure-tag-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-azure-tag-value` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-ec2-region` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-ec2-tag-key` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-ec2-tag-value` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-gce-credentials-file` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-gce-project-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-gce-tag-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-retry-join-gce-zone-pattern` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | | `addresses.rpc` | None, the RPC server for CLI commands is no longer supported. | -| `advertise_addrs` | [`ports`](/docs/agent/options#ports) with [`advertise_addr`](/docs/agent/options#advertise_addr) and/or [`advertise_addr_wan`](/docs/agent/options#advertise_addr_wan) | -| `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](/docs/agent/options#telemetry-dogstatsd_addr) | -| `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](/docs/agent/options#telemetry-dogstatsd_tags) | -| `http_api_response_headers` | [`http_config.response_headers`](/docs/agent/options#response_headers) | +| `advertise_addrs` | [`ports`](/docs/agent/config/agent-config-files#ports) with [`advertise_addr`](/docs/agent/config/agent-config-files#advertise_addr) and/or [`advertise_addr_wan`](/docs/agent/config/agent-config-files#advertise_addr_wan) | +| `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](/docs/agent/config/agent-config-files#telemetry-dogstatsd_addr) | +| `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](/docs/agent/config/agent-config-files#telemetry-dogstatsd_tags) | +| `http_api_response_headers` | [`http_config.response_headers`](/docs/agent/config/agent-config-files#response_headers) | | `ports.rpc` | None, the RPC server for CLI commands is no longer supported. | -| `recursor` | [`recursors`](https://github.com/hashicorp/consul/blob/main/website/pages/docs/agent/options.mdx#recursors) | -| `retry_join_azure` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `retry_join_ec2` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `retry_join_gce` | [`-retry-join`](/docs/agent/options#_retry_join) | -| `statsd_addr` | [`telemetry.statsd_address`](https://github.com/hashicorp/consul/blob/main/website/pages/docs/agent/options.mdx#telemetry-statsd_address) | -| `statsite_addr` | [`telemetry.statsite_address`](https://github.com/hashicorp/consul/blob/main/website/pages/docs/agent/options.mdx#telemetry-statsite_address) | -| `statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/options#telemetry-metrics_prefix) | -| `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/options#telemetry-metrics_prefix) | -| (service definitions) `serviceid` | [`service_id`](/docs/discovery/services) | -| (service definitions) `dockercontainerid` | [`docker_container_id`](/docs/discovery/services) | -| (service definitions) `tlsskipverify` | [`tls_skip_verify`](/docs/discovery/services) | -| (service definitions) `deregistercriticalserviceafter` | [`deregister_critical_service_after`](/docs/discovery/services) | + +| `recursor` | [`recursors`](/docs/agent/config/agent-config-files#recursors) | +| `retry_join_azure` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | +| `retry_join_ec2` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | +| `retry_join_gce` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | +| `statsd_addr` | [`telemetry.statsd_address`](/docs/agent/config/agent-config-files#telemetry-statsd_address) | +| `statsite_addr` | [`telemetry.statsite_address`](/docs/agent/config/agent-config-files#telemetry-statsite_address) | +| `statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix) | +| `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix) | +| (service definitions) `serviceid` | [`id`](/api-docs/agent/service#id) | +| (service definitions) `dockercontainerid` | [`docker_container_id`](/api-docs/agent/check#dockercontainerid) | +| (service definitions) `tlsskipverify` | [`tls_skip_verify`](/api-docs/agent/check#tlsskipverify) | +| (service definitions) `deregistercriticalserviceafter` | [`deregister_critical_service_after`](/api-docs/agent/check#deregistercriticalserviceafter) | #### `statsite_prefix` Renamed to `metrics_prefix` Since the `statsite_prefix` configuration option applied to all telemetry providers, `statsite_prefix` was renamed to -[`metrics_prefix`](/docs/agent/options#telemetry-metrics_prefix). +[`metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix). Configuration files will need to be updated when upgrading to this version of Consul. @@ -659,8 +660,8 @@ wrongly stated that you could configure both host and port. #### Escaping Behavior Changed for go-discover Configs -The format for [`-retry-join`](/docs/agent/options#retry-join) and -[`-retry-join-wan`](/docs/agent/options#retry-join-wan) values that use +The format for [`-retry-join`](/docs/agent/config/agent-config-cli#retry-join) and +[`-retry-join-wan`](/docs/agent/config/agent-config-cli#retry-join-wan) values that use [go-discover](https://github.com/hashicorp/go-discover) cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or @@ -778,7 +779,7 @@ invalid health checks would get skipped. #### Script Checks Are Now Opt-In -A new [`enable_script_checks`](/docs/agent/options#_enable_script_checks) +A new [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) configuration option was added, and defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box @@ -800,10 +801,10 @@ for more information. Consul releases will no longer include a `web_ui.zip` file with the compiled web assets. These have been built in to the Consul binary since the 0.7.x -series and can be enabled with the [`-ui`](/docs/agent/options#_ui) +series and can be enabled with the [`-ui`](/docs/agent/config/agent-config-cli#_ui) configuration option. These built-in web assets have always been identical to the contents of the `web_ui.zip` file for each release. The -[`-ui-dir`](/docs/agent/options#_ui_dir) option is still available for +[`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) option is still available for hosting customized versions of the web assets, but the vast majority of Consul users can just use the built in web assets. @@ -835,12 +836,12 @@ to the following commands: #### Version 8 ACLs Are Now Opt-Out -The [`acl_enforce_version_8`](/docs/agent/options#acl_enforce_version_8) +The [`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) configuration now defaults to `true` to enable full version 8 ACL support by default. If you are upgrading an existing cluster with ACLs enabled, you will need to set this to `false` during the upgrade on **both Consul agents and Consul servers**. Version 8 ACLs were also changed so that -[`acl_datacenter`](/docs/agent/options#acl_datacenter) must be set on +[`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) must be set on agents in order to enable the agent-side enforcement of ACLs. This makes for a smoother experience in clusters where ACLs aren't enabled at all, but where the agents would have to wait to contact a Consul server before learning that. @@ -848,14 +849,14 @@ agents would have to wait to contact a Consul server before learning that. #### Remote Exec Is Now Opt-In The default for -[`disable_remote_exec`](/docs/agent/options#disable_remote_exec) was +[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) was changed to "true", so now operators need to opt-in to having agents support running commands remotely via [`consul exec`](/commands/exec). #### Raft Protocol Version Compatibility When upgrading to Consul 0.8.0 from a version lower than 0.7.0, users will need -to set the [`-raft-protocol`](/docs/agent/options#_raft_protocol) option +to set the [`-raft-protocol`](/docs/agent/config/agent-config-cli#_raft_protocol) option to 1 in order to maintain backwards compatibility with the old servers during the upgrade. After the servers have been migrated to version 0.8.0, `-raft-protocol` can be moved up to 2 and the servers restarted to match the @@ -890,7 +891,7 @@ process to reap child processes. #### DNS Resiliency Defaults -The default for [`max_stale`](/docs/agent/options#max_stale) has been +The default for [`max_stale`](/docs/agent/config/agent-config-files#max_stale) has been increased from 5 seconds to a near-indefinite threshold (10 years) to allow DNS queries to continue to be served in the event of a long outage with no leader. A new telemetry counter was added at `consul.dns.stale_queries` to track when @@ -904,7 +905,7 @@ to be aware of during an upgrade are categorized below. #### Performance Timing Defaults and Tuning Consul 0.7 now defaults the DNS configuration to allow for stale queries by -defaulting [`allow_stale`](/docs/agent/options#allow_stale) to true for +defaulting [`allow_stale`](/docs/agent/config/agent-config-files#allow_stale) to true for better utilization of available servers. If you want to retain the previous behavior, set the following configuration: @@ -917,7 +918,7 @@ behavior, set the following configuration: ``` Consul also 0.7 introduced support for tuning Raft performance using a new -[performance configuration block](/docs/agent/options#performance). Also, +[performance configuration block](/docs/agent/config/agent-config-files#performance). Also, the default Raft timing is set to a lower-performance mode suitable for [minimal Consul servers](/docs/install/performance#minimum). @@ -937,8 +938,8 @@ See the [Server Performance](/docs/install/performance) guide for more details. #### Leave-Related Configuration Defaults -The default behavior of [`leave_on_terminate`](/docs/agent/options#leave_on_terminate) -and [`skip_leave_on_interrupt`](/docs/agent/options#skip_leave_on_interrupt) +The default behavior of [`leave_on_terminate`](/docs/agent/config/agent-config-files#leave_on_terminate) +and [`skip_leave_on_interrupt`](/docs/agent/config/agent-config-files#skip_leave_on_interrupt) are now dependent on whether or not the agent is acting as a server or client: - For servers, `leave_on_terminate` defaults to "false" and `skip_leave_on_interrupt` @@ -977,7 +978,7 @@ using this feature. #### WAN Address Translation in HTTP Endpoints Consul version 0.7 added support for translating WAN addresses in certain -[HTTP endpoints](/docs/agent/options#translate_wan_addrs). The servers +[HTTP endpoints](/docs/agent/config/agent-config-files#translate_wan_addrs). The servers and the agents need to be running version 0.7 or later in order to use this feature. @@ -1059,7 +1060,7 @@ which require it: } When the DNS interface is queried, the agent's -[`acl_token`](/docs/agent/options#acl_token) is used, so be sure +[`acl_token`](/docs/agent/config/agent-config-files#acl_token) is used, so be sure that token has sufficient privileges to return the DNS records you expect to retrieve from it. diff --git a/website/content/partials/http_api_options_client.mdx b/website/content/partials/http_api_options_client.mdx index 890253a61..516579bba 100644 --- a/website/content/partials/http_api_options_client.mdx +++ b/website/content/partials/http_api_options_client.mdx @@ -20,7 +20,7 @@ used instead. The scheme can also be set to HTTPS by setting the environment variable `CONSUL_HTTP_SSL=true`. This may be a unix domain socket using `unix:///path/to/socket` if the [agent is configured to - listen](/docs/agent/options#addresses) that way. + listen](/docs/agent/config/agent-config-files#addresses) that way. - `-tls-server-name=` - The server name to use as the SNI host when connecting via TLS. This can also be specified via the `CONSUL_TLS_SERVER_NAME` From a00492e62232d2f95e4ee846971d2042af962abf Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 17:16:24 -0800 Subject: [PATCH 123/785] chore: rebase updates --- website/content/docs/agent/config/agent-config-cli.mdx | 2 +- website/content/docs/agent/config/agent-config-files.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx index 82d660803..df4a096f6 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/agent-config-cli.mdx @@ -7,7 +7,7 @@ description: >- # Command-line Options ((#commandline_options)) --> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](#config_key_reference) for equivalent HCL Keys. +-> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](/docs/agent/config/agent-config-files#config_key_reference) for equivalent HCL Keys. The options below are all specified on the command-line. diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index 08a9f4398..78b918453 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -43,7 +43,7 @@ definitions support being updated during a reload. } ``` -# Configuration Key Reference +# Configuration Key Reference ((#config_key_reference)) -> **Note:** All the TTL values described below are parsed by Go's `time` package, and have the following [formatting specification](https://golang.org/pkg/time/#ParseDuration): "A From bea810cf6206609b4426b1a8da97221878e13796 Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 17:26:47 -0800 Subject: [PATCH 124/785] docs: pr feedback --- website/content/docs/agent/config/agent-config-cli.mdx | 6 +++++- website/content/docs/agent/config/agent-config-files.mdx | 6 ++++-- website/redirects.js | 4 ++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/agent-config-cli.mdx index df4a096f6..0272dda5d 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/agent-config-cli.mdx @@ -9,7 +9,11 @@ description: >- -> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](/docs/agent/config/agent-config-files#config_key_reference) for equivalent HCL Keys. -The options below are all specified on the command-line. +This topic describes the available command-line options for the Consul agent. + +## Usage + +See [Agent Overview](/docs/agent#starting-the-consul-agent) for examples of how to use flags with the `consul agent` CLI. ## Environment Variables diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/agent-config-files.mdx index 78b918453..9b21fbd14 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/agent-config-files.mdx @@ -7,8 +7,10 @@ description: >- # Configuration Files ((#configuration_files)) -In addition to the command-line options, configuration can be put into -files. This may be easier in certain situations, for example when Consul is +You can create one or more files to configure the Consul agent on startup. We recommend +grouping similar configurations into separate files, such as ACL parameters, to make it +easier to manage configuration changes. Using external files may be easier than +configuring agents on the command-line when Consul is being configured using a configuration management system. The configuration files are JSON formatted, making them easily readable diff --git a/website/redirects.js b/website/redirects.js index ea95d1803..777fa471d 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1262,6 +1262,10 @@ module.exports = [ { source: '/api/:path*', destination: '/api-docs/:path*', + }, + { + source: '/docs/agent/options', + destination: '/docs/agent/config', permanent: true, }, ] From b9ec2222db0060c10be367bea7f0744395a4919b Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 17:30:50 -0800 Subject: [PATCH 125/785] docs: simplify agent docs slugs --- .../network-areas/README.md | 2 +- docs/config/README.md | 4 +- docs/config/checklist-adding-config-fields.md | 4 +- docs/rpc/README.md | 2 +- website/content/api-docs/acl/index.mdx | 10 +- website/content/api-docs/agent/index.mdx | 18 +-- website/content/api-docs/config.mdx | 2 +- .../content/api-docs/connect/intentions.mdx | 2 +- website/content/api-docs/health.mdx | 2 +- website/content/api-docs/index.mdx | 4 +- .../content/api-docs/operator/autopilot.mdx | 2 +- .../content/commands/acl/set-agent-token.mdx | 2 +- website/content/commands/config/index.mdx | 2 +- website/content/commands/connect/envoy.mdx | 2 +- website/content/commands/debug.mdx | 2 +- website/content/commands/index.mdx | 2 +- .../content/commands/operator/autopilot.mdx | 4 +- website/content/commands/validate.mdx | 2 +- website/content/docs/agent/config-entries.mdx | 4 +- .../{agent-config-cli.mdx => cli-flags.mdx} | 12 +- ...gent-config-files.mdx => config-files.mdx} | 96 +++++++-------- website/content/docs/agent/config/index.mdx | 22 ++-- website/content/docs/agent/index.mdx | 28 ++--- website/content/docs/agent/telemetry.mdx | 20 ++-- website/content/docs/connect/ca/aws.mdx | 6 +- website/content/docs/connect/ca/consul.mdx | 2 +- website/content/docs/connect/ca/index.mdx | 2 +- website/content/docs/connect/ca/vault.mdx | 4 +- .../config-entries/exported-services.mdx | 2 +- .../docs/connect/config-entries/index.mdx | 2 +- .../connect/config-entries/proxy-defaults.mdx | 4 +- .../config-entries/service-defaults.mdx | 4 +- .../config-entries/service-intentions.mdx | 4 +- .../content/docs/connect/configuration.mdx | 14 +-- .../docs/connect/connect-internals.mdx | 4 +- .../docs/connect/gateways/ingress-gateway.mdx | 4 +- ...service-to-service-traffic-datacenters.mdx | 10 +- .../service-to-service-traffic-partitions.mdx | 4 +- .../wan-federation-via-mesh-gateways.mdx | 4 +- .../connect/gateways/terminating-gateway.mdx | 4 +- .../docs/connect/intentions-legacy.mdx | 2 +- website/content/docs/connect/intentions.mdx | 2 +- .../docs/connect/observability/index.mdx | 6 +- .../observability/ui-visualization.mdx | 14 +-- .../content/docs/connect/proxies/built-in.mdx | 4 +- .../content/docs/connect/proxies/envoy.mdx | 2 +- .../connect/proxies/managed-deprecated.mdx | 4 +- .../registration/service-registration.mdx | 4 +- .../connect/registration/sidecar-service.mdx | 4 +- website/content/docs/discovery/checks.mdx | 10 +- website/content/docs/discovery/dns.mdx | 24 ++-- .../content/docs/dynamic-app-config/kv.mdx | 4 +- .../docs/dynamic-app-config/watches.mdx | 2 +- .../content/docs/enterprise/audit-logging.mdx | 4 +- .../docs/enterprise/license/overview.mdx | 8 +- .../docs/enterprise/network-segments.mdx | 32 ++--- .../content/docs/enterprise/read-scale.mdx | 2 +- .../content/docs/install/bootstrapping.mdx | 14 +-- .../content/docs/install/cloud-auto-join.mdx | 4 +- .../content/docs/install/manual-bootstrap.mdx | 2 +- website/content/docs/install/performance.mdx | 20 ++-- website/content/docs/install/ports.mdx | 2 +- .../docs/k8s/connect/connect-ca-provider.mdx | 4 +- website/content/docs/k8s/helm.mdx | 12 +- .../servers-outside-kubernetes.mdx | 2 +- .../installation/multi-cluster/kubernetes.mdx | 4 +- .../multi-cluster/vms-and-kubernetes.mdx | 2 +- website/content/docs/nia/configuration.mdx | 6 +- .../docs/nia/installation/requirements.mdx | 2 +- .../docs/releases/release-notes/v1_9_0.mdx | 2 +- .../content/docs/security/acl/acl-legacy.mdx | 102 ++++++++-------- .../content/docs/security/acl/acl-rules.mdx | 20 ++-- .../docs/security/acl/auth-methods/index.mdx | 2 +- website/content/docs/security/encryption.mdx | 22 ++-- .../docs/security/security-models/core.mdx | 54 ++++----- .../docs/troubleshoot/common-errors.mdx | 6 +- website/content/docs/troubleshoot/faq.mdx | 8 +- .../instructions/general-process.mdx | 4 +- .../instructions/upgrade-to-1-6-x.mdx | 6 +- .../docs/upgrading/upgrade-specific.mdx | 110 +++++++++--------- .../partials/http_api_options_client.mdx | 2 +- website/data/docs-nav-data.json | 4 +- 82 files changed, 434 insertions(+), 434 deletions(-) rename website/content/docs/agent/config/{agent-config-cli.mdx => cli-flags.mdx} (96%) rename website/content/docs/agent/config/{agent-config-files.mdx => config-files.mdx} (97%) diff --git a/docs/cluster-federation/network-areas/README.md b/docs/cluster-federation/network-areas/README.md index efe10aa06..08a2014d5 100644 --- a/docs/cluster-federation/network-areas/README.md +++ b/docs/cluster-federation/network-areas/README.md @@ -35,7 +35,7 @@ Every Consul Enterprise server maintains a reconciliation routine where every 30 Joining a network area pool involves: 1. Setting memberlist and Serf configuration. - * Prior to Consul `v1.8.11` and `v1.9.5`, network areas were configured with memberlist's [DefaultWANConfig](https://github.com/hashicorp/memberlist/blob/838073fef1a4e1f6cb702a57a8075304098b1c31/config.go#L315). This was then updated to instead use the server's [gossip_wan](https://www.consul.io/docs/agent/config/agent-config-files#gossip_wan) configuration, which falls back to the DefaultWANConfig if it was not specified. + * Prior to Consul `v1.8.11` and `v1.9.5`, network areas were configured with memberlist's [DefaultWANConfig](https://github.com/hashicorp/memberlist/blob/838073fef1a4e1f6cb702a57a8075304098b1c31/config.go#L315). This was then updated to instead use the server's [gossip_wan](https://www.consul.io/docs/agent/config/config-files#gossip_wan) configuration, which falls back to the DefaultWANConfig if it was not specified. * As of Consul `v1.8.11`/`v1.9.5` it is not possible to tune gossip communication on a per-area basis. 2. Update the server's gossip network, which keeps track of network areas that the server is a part of. This gossip network is also used to dispatch incoming **gossip** connections to handlers for the appropriate area. diff --git a/docs/config/README.md b/docs/config/README.md index fe38011b9..98cd35ee8 100644 --- a/docs/config/README.md +++ b/docs/config/README.md @@ -13,7 +13,7 @@ See also the [checklist for adding a new field] to the configuration. [Agent Configuration]: https://www.consul.io/docs/agent/config [checklist for adding a new field]: ./checklist-adding-config-fields.md [Auto-Config]: #auto-config -[Config Entries]: https://www.consul.io/docs/agent/config/agent-config-files#config_entries +[Config Entries]: https://www.consul.io/docs/agent/config/config-files#config_entries [Services]: https://www.consul.io/docs/discovery/services [Checks]: https://www.consul.io/docs/discovery/checks @@ -53,6 +53,6 @@ implemented in a couple packages. * the server RPC endpoint is in [agent/consul/auto_config_endpoint.go] * the client that receives and applies the config is implemented in [agent/auto-config] -[auto_config]: https://www.consul.io/docs/agent/config/agent-config-files#auto_config +[auto_config]: https://www.consul.io/docs/agent/config/config-files#auto_config [agent/consul/auto_config_endpoint.go]: https://github.com/hashicorp/consul/blob/main/agent/consul/auto_config_endpoint.go [agent/auto-config]: https://github.com/hashicorp/consul/tree/main/agent/auto-config diff --git a/docs/config/checklist-adding-config-fields.md b/docs/config/checklist-adding-config-fields.md index 66807c072..e17139411 100644 --- a/docs/config/checklist-adding-config-fields.md +++ b/docs/config/checklist-adding-config-fields.md @@ -55,7 +55,7 @@ There are four specific cases covered with increasing complexity: state for client agent's RPC client. - [ ] Add a test to `agent/agent_test.go` similar to others with prefix `TestAgent_reloadConfig*`. - - [ ] Add documentation to `website/content/docs/agent/config/agent-config-files.mdx`. + - [ ] Add documentation to `website/content/docs/agent/config/config-files.mdx`. Done! You can now use your new field in a client agent by accessing `s.agent.Config.`. @@ -75,7 +75,7 @@ If the config field also needs a CLI flag, then follow these steps. `TestLoad_IntegrationWithFlags` in `agent/config/runtime_test.go` to ensure setting the flag works. - [ ] Add flag (as well as config file) documentation to - `website/source/docs/agent/config/agent-config-files.mdx` and `website/source/docs/agent/config/agent-config-cli.mdx`. + `website/source/docs/agent/config/config-files.mdx` and `website/source/docs/agent/config/cli-flags.mdx`. ## Adding a Simple Config Field for Servers Consul servers have a separate Config struct for reasons. Note that Consul diff --git a/docs/rpc/README.md b/docs/rpc/README.md index 7d8a75cad..4e9adbacf 100644 --- a/docs/rpc/README.md +++ b/docs/rpc/README.md @@ -22,7 +22,7 @@ The "RPC Server" accepts requests to the [server port] and routes the requests b configuration of the Server and the the first byte in the request. The diagram below shows all the possible routing flows. -[server port]: https://www.consul.io/docs/agent/config/agent-config-files#server_rpc_port +[server port]: https://www.consul.io/docs/agent/config/config-files#server_rpc_port ![RPC Routing](./routing.svg) diff --git a/website/content/api-docs/acl/index.mdx b/website/content/api-docs/acl/index.mdx index 6a39bef52..a1c40b06f 100644 --- a/website/content/api-docs/acl/index.mdx +++ b/website/content/api-docs/acl/index.mdx @@ -16,7 +16,7 @@ the [ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-s ## Bootstrap ACLs This endpoint does a special one-time bootstrap of the ACL system, making the first -management token if the [`acl.tokens.initial_management`](/docs/agent/config/agent-config-files#acl_tokens_initial_management) +management token if the [`acl.tokens.initial_management`](/docs/agent/config/config-files#acl_tokens_initial_management) configuration entry is not specified in the Consul server configuration and if the cluster has not been bootstrapped previously. This is available in Consul 0.9.1 and later, and requires all Consul servers to be upgraded in order to operate. @@ -143,7 +143,7 @@ $ curl \ - `SourceDatacenter` - The authoritative ACL datacenter that ACLs are being replicated from and will match the - [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) configuration. + [`primary_datacenter`](/docs/agent/config/config-files#primary_datacenter) configuration. - `ReplicationType` - The type of replication that is currently in use. @@ -295,7 +295,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be +replication](/docs/agent/config/config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. @@ -425,7 +425,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be +replication](/docs/agent/config/config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. @@ -505,7 +505,7 @@ The table below shows this endpoint's support for -> **Note** - To use the login process to create tokens in any connected secondary datacenter, [ACL -replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) must be +replication](/docs/agent/config/config-files#acl_enable_token_replication) must be enabled. Login requires the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. diff --git a/website/content/api-docs/agent/index.mdx b/website/content/api-docs/agent/index.mdx index 54360d802..e5f91b468 100644 --- a/website/content/api-docs/agent/index.mdx +++ b/website/content/api-docs/agent/index.mdx @@ -440,7 +440,7 @@ page. In order to enable [Prometheus](https://prometheus.io/) support, you need to use the configuration directive -[`prometheus_retention_time`](/docs/agent/config/agent-config-files#telemetry-prometheus_retention_time). +[`prometheus_retention_time`](/docs/agent/config/config-files#telemetry-prometheus_retention_time). Since Consul 1.7.2 this endpoint will also automatically switch output format if the request contains an `Accept` header with a compatible MIME type such as @@ -745,7 +745,7 @@ $ curl \ This endpoint updates the ACL tokens currently in use by the agent. It can be used to introduce ACL tokens to the agent for the first time, or to update tokens that were initially loaded from the agent's configuration. Tokens will be persisted -only if the [`acl.enable_token_persistence`](/docs/agent/config/agent-config-files#acl_enable_token_persistence) +only if the [`acl.enable_token_persistence`](/docs/agent/config/config-files#acl_enable_token_persistence) configuration is `true`. When not being persisted, they will need to be reset if the agent is restarted. @@ -757,9 +757,9 @@ is restarted. | `PUT` | `/agent/token/replication` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`default`](/docs/agent/config/agent-config-files#acl_tokens_default), [`agent`](/docs/agent/config/agent-config-files#acl_tokens_agent), -[`agent_recovery`](/docs/agent/config/agent-config-files#acl_tokens_agent_recovery), and -[`replication`](/docs/agent/config/agent-config-files#acl_tokens_replication). +[`default`](/docs/agent/config/config-files#acl_tokens_default), [`agent`](/docs/agent/config/config-files#acl_tokens_agent), +[`agent_recovery`](/docs/agent/config/config-files#acl_tokens_agent_recovery), and +[`replication`](/docs/agent/config/config-files#acl_tokens_replication). -> **Deprecation Note:** The following paths were deprecated in version 1.11 @@ -768,7 +768,7 @@ The paths above correspond to the token names as found in the agent configuratio | `PUT` | `/agent/token/agent_master` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`agent_master`](/docs/agent/config/agent-config-files#acl_tokens_agent_master). +[`agent_master`](/docs/agent/config/config-files#acl_tokens_agent_master). -> **Deprecation Note:** The following paths were deprecated in version 1.4.3 @@ -780,9 +780,9 @@ The paths above correspond to the token names as found in the agent configuratio | `PUT` | `/agent/token/acl_replication_token` | `application/json` | The paths above correspond to the token names as found in the agent configuration: -[`acl_token`](/docs/agent/config/agent-config-files#acl_token_legacy), [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token_legacy), -[`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy), and -[`acl_replication_token`](/docs/agent/config/agent-config-files#acl_replication_token_legacy). +[`acl_token`](/docs/agent/config/config-files#acl_token_legacy), [`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token_legacy), +[`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token_legacy), and +[`acl_replication_token`](/docs/agent/config/config-files#acl_replication_token_legacy). The table below shows this endpoint's support for [blocking queries](/api-docs/features/blocking), diff --git a/website/content/api-docs/config.mdx b/website/content/api-docs/config.mdx index 1ba83ed68..9a28880c6 100644 --- a/website/content/api-docs/config.mdx +++ b/website/content/api-docs/config.mdx @@ -10,7 +10,7 @@ description: |- The `/config` endpoints create, update, delete and query central configuration entries registered with Consul. See the -[agent configuration](/docs/agent/config/agent-config-files#enable_central_service_config) +[agent configuration](/docs/agent/config/config-files#enable_central_service_config) for more information on how to enable this functionality for centrally configuring services and [configuration entries docs](/docs/agent/config-entries) for a description of the configuration entries content. diff --git a/website/content/api-docs/connect/intentions.mdx b/website/content/api-docs/connect/intentions.mdx index ddbeab469..328c989b8 100644 --- a/website/content/api-docs/connect/intentions.mdx +++ b/website/content/api-docs/connect/intentions.mdx @@ -96,7 +96,7 @@ The corresponding CLI command is [`consul intention create -replace`](/commands/ evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default intention behavior is defined by the default [ACL - policy](/docs/agent/config/agent-config-files#acl_default_policy). + policy](/docs/agent/config/config-files#acl_default_policy). This should be omitted for an L4 intention as it is mutually exclusive with the `Action` field. diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx index 4d60aafe9..1c68f60b0 100644 --- a/website/content/api-docs/health.mdx +++ b/website/content/api-docs/health.mdx @@ -241,7 +241,7 @@ The table below shows this endpoint's support for ascending order based on the estimated round trip time from that node. Passing `?near=_agent` will use the agent's node for the sort. This is specified as part of the URL as a query parameter. **Note** that using `near` will ignore - [`use_streaming_backend`](/docs/agent/config/agent-config-files#use_streaming_backend) and always + [`use_streaming_backend`](/docs/agent/config/config-files#use_streaming_backend) and always use blocking queries, because the data required to sort the results is not available to the streaming backend. diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 40dc5a79b..23b3456eb 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -83,7 +83,7 @@ $ curl \ Consul 0.7 added the ability to translate addresses in HTTP response based on the configuration setting for -[`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs). In order +[`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs). In order to allow clients to know if address translation is in effect, the `X-Consul-Translate-Addresses` header will be added if translation is enabled, and will have a value of `true`. If translation is not enabled then this header @@ -94,7 +94,7 @@ will not be present. All API responses for Consul versions after 1.9 will include an HTTP response header `X-Consul-Default-ACL-Policy` set to either "allow" or "deny" which mirrors the current value of the agent's -[`acl.default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) option. +[`acl.default_policy`](/docs/agent/config/config-files#acl_default_policy) option. This is also the default [intention](/docs/connect/intentions) enforcement action if no intention matches. diff --git a/website/content/api-docs/operator/autopilot.mdx b/website/content/api-docs/operator/autopilot.mdx index f4a2e25c8..ef88056e5 100644 --- a/website/content/api-docs/operator/autopilot.mdx +++ b/website/content/api-docs/operator/autopilot.mdx @@ -69,7 +69,7 @@ $ curl \ ``` For more information about the Autopilot configuration options, see the -[agent configuration section](/docs/agent/config/agent-config-files#autopilot). +[agent configuration section](/docs/agent/config/config-files#autopilot). ## Update Configuration diff --git a/website/content/commands/acl/set-agent-token.mdx b/website/content/commands/acl/set-agent-token.mdx index dfe9f4567..138da2600 100644 --- a/website/content/commands/acl/set-agent-token.mdx +++ b/website/content/commands/acl/set-agent-token.mdx @@ -12,7 +12,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/agent/token/:type](/api-docs/agent This command updates the ACL tokens currently in use by the agent. It can be used to introduce ACL tokens to the agent for the first time, or to update tokens that were initially loaded from the agent's configuration. Tokens are not persisted unless -[`acl.enable_token_persistence`](/docs/agent/config/agent-config-files#acl_enable_token_persistence) +[`acl.enable_token_persistence`](/docs/agent/config/config-files#acl_enable_token_persistence) is `true`, so tokens will need to be updated again if that option is `false` and the agent is restarted. diff --git a/website/content/commands/config/index.mdx b/website/content/commands/config/index.mdx index 5b22e5115..891e1ffce 100644 --- a/website/content/commands/config/index.mdx +++ b/website/content/commands/config/index.mdx @@ -10,7 +10,7 @@ Command: `consul config` The `config` command is used to interact with Consul's central configuration system. It exposes commands for creating, updating, reading, and deleting different kinds of config entries. See the -[agent configuration](/docs/agent/config/agent-config-files#enable_central_service_config) +[agent configuration](/docs/agent/config/config-files#enable_central_service_config) for more information on how to enable this functionality for centrally configuring services and [configuration entries docs](/docs/agent/config-entries) for a description of the configuration entries content. diff --git a/website/content/commands/connect/envoy.mdx b/website/content/commands/connect/envoy.mdx index 68d81d47a..d453a7cbf 100644 --- a/website/content/commands/connect/envoy.mdx +++ b/website/content/commands/connect/envoy.mdx @@ -42,7 +42,7 @@ proxy configuration needed. be used instead. The scheme can also be set to HTTPS by setting the environment variable CONSUL_HTTP_SSL=true. This may be a unix domain socket using `unix:///path/to/socket` if the [agent is configured to - listen](/docs/agent/config/agent-config-files#addresses) that way. + listen](/docs/agent/config/config-files#addresses) that way. -> **Note:** gRPC uses the same TLS settings as the HTTPS API. If HTTPS is enabled then gRPC will require HTTPS diff --git a/website/content/commands/debug.mdx b/website/content/commands/debug.mdx index 23ff3d0da..0e7b16e82 100644 --- a/website/content/commands/debug.mdx +++ b/website/content/commands/debug.mdx @@ -78,7 +78,7 @@ information when `debug` is running. By default, it captures all information. | `members` | A list of all the WAN and LAN members in the cluster. | | `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. | | `logs` | `DEBUG` level logs for the target agent, captured for the duration. | -| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/config/agent-config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | +| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | ## Examples diff --git a/website/content/commands/index.mdx b/website/content/commands/index.mdx index 4ff1b0735..47c4085b6 100644 --- a/website/content/commands/index.mdx +++ b/website/content/commands/index.mdx @@ -235,7 +235,7 @@ CONSUL_TLS_SERVER_NAME=consulserver.domain Like [`CONSUL_HTTP_ADDR`](#consul_http_addr) but configures the address the local agent is listening for gRPC requests. Currently gRPC is only used for integrating [Envoy proxy](/docs/connect/proxies/envoy) and must be [enabled -explicitly](/docs/agent/config/agent-config-files#grpc_port) in agent configuration. +explicitly](/docs/agent/config/config-files#grpc_port) in agent configuration. ``` CONSUL_GRPC_ADDR=127.0.0.1:8502 diff --git a/website/content/commands/operator/autopilot.mdx b/website/content/commands/operator/autopilot.mdx index 2a0a2c29a..f0b16a5de 100644 --- a/website/content/commands/operator/autopilot.mdx +++ b/website/content/commands/operator/autopilot.mdx @@ -104,10 +104,10 @@ Usage: `consul operator autopilot set-config [options]` - `-disable-upgrade-migration` - Controls whether Consul will avoid promoting new servers until it can perform a migration. Must be one of `[true|false]`. -- `-redundancy-zone-tag` - Controls the [`-node-meta`](/docs/agent/config/agent-config-cli#_node_meta) +- `-redundancy-zone-tag` - Controls the [`-node-meta`](/docs/agent/config/cli-flags#_node_meta) key name used for separating servers into different redundancy zones. -- `-upgrade-version-tag` - Controls the [`-node-meta`](/docs/agent/config/agent-config-cli#_node_meta) +- `-upgrade-version-tag` - Controls the [`-node-meta`](/docs/agent/config/cli-flags#_node_meta) tag to use for version info when performing upgrade migrations. If left blank, the Consul version will be used. ### Command Output diff --git a/website/content/commands/validate.mdx b/website/content/commands/validate.mdx index ade133928..abdb0657f 100644 --- a/website/content/commands/validate.mdx +++ b/website/content/commands/validate.mdx @@ -21,7 +21,7 @@ to be loaded by the agent. This command cannot operate on partial configuration fragments since those won't pass the full agent validation. For more information on the format of Consul's configuration files, read the -consul agent [Configuration Files](/docs/agent/config/agent-config-files) +consul agent [Configuration Files](/docs/agent/config/config-files) section. ## Usage diff --git a/website/content/docs/agent/config-entries.mdx b/website/content/docs/agent/config-entries.mdx index 48a996e05..09098e96c 100644 --- a/website/content/docs/agent/config-entries.mdx +++ b/website/content/docs/agent/config-entries.mdx @@ -58,7 +58,7 @@ Configuration entries outside of Kubernetes should be managed with the Consul [CLI](/commands/config) or [API](/api-docs/config). Additionally, as a convenience for initial cluster bootstrapping, configuration entries can be specified in all of the Consul servers's -[configuration files](/docs/agent/config/agent-config-files#config_entries_bootstrap) +[configuration files](/docs/agent/config/config-files#config_entries_bootstrap) ### Managing Configuration Entries with the CLI @@ -162,7 +162,7 @@ api ### Bootstrapping From A Configuration File Configuration entries can be bootstrapped by adding them [inline to each Consul -server's configuration file](/docs/agent/config/agent-config-files#config_entries). When a +server's configuration file](/docs/agent/config/config-files#config_entries). When a server gains leadership, it will attempt to initialize the configuration entries. If a configuration entry does not already exist outside of the servers configuration, then it will create it. If a configuration entry does exist, that diff --git a/website/content/docs/agent/config/agent-config-cli.mdx b/website/content/docs/agent/config/cli-flags.mdx similarity index 96% rename from website/content/docs/agent/config/agent-config-cli.mdx rename to website/content/docs/agent/config/cli-flags.mdx index 0272dda5d..3b97a76b0 100644 --- a/website/content/docs/agent/config/agent-config-cli.mdx +++ b/website/content/docs/agent/config/cli-flags.mdx @@ -7,7 +7,7 @@ description: >- # Command-line Options ((#commandline_options)) --> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](/docs/agent/config/agent-config-files#config_key_reference) for equivalent HCL Keys. +-> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](/docs/agent/config/config-files#config_key_reference) for equivalent HCL Keys. This topic describes the available command-line options for the Consul agent. @@ -30,7 +30,7 @@ information. limit of 4k for maximum size of checks, this is a positive value. By limiting this size, it allows to put less pressure on Consul servers when many checks are having a very large output in their checks. In order to completely disable check output - capture, it is possible to use [`discard_check_output`](/docs/agent/config/agent-config-files#discard_check_output). + capture, it is possible to use [`discard_check_output`](/docs/agent/config/config-files#discard_check_output). - `-client` ((#\_client)) - The address to which Consul will bind client interfaces, including the HTTP and DNS servers. By default, this is "127.0.0.1", @@ -126,7 +126,7 @@ information. - `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version of the Raft consensus protocol used for server communications. This must be set - to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](/docs/agent/config/agent-config-files#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrade-specific#raft-protocol-version-compatibility) for more details. + to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](/docs/agent/config/config-files#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/docs/upgrade-specific#raft-protocol-version-compatibility) for more details. - `-segment` ((#\_segment)) - This flag is used to set the name of the network segment the agent belongs to. An agent can only join and @@ -150,13 +150,13 @@ information. - `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used to change the address that we advertise to server nodes joining through the WAN. - This can also be set on client agents when used in combination with the [`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address + This can also be set on client agents when used in combination with the [`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address is advertised. However, in some cases all members of all datacenters cannot be on the same physical or virtual network, especially on hybrid setups mixing cloud and private datacenters. This flag enables server nodes gossiping through the public network for the WAN while using private VLANs for gossiping to each other and their client agents, and it allows client agents to be reached at this address when being - accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] + accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr] template that is resolved at runtime. ## Address Bind Options @@ -294,7 +294,7 @@ information. If Consul is running on the non-default Serf LAN port, the port must be specified in the join address, or configured as the agent's default Serf port - using the [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port) configuration option or + using the [`ports.serf_lan`](/docs/agent/config/config-files#serf_lan_port) configuration option or [`-serf-lan-port`](#_serf_lan_port) command line flag. If using network segments (Enterprise), see [additional documentation on diff --git a/website/content/docs/agent/config/agent-config-files.mdx b/website/content/docs/agent/config/config-files.mdx similarity index 97% rename from website/content/docs/agent/config/agent-config-files.mdx rename to website/content/docs/agent/config/config-files.mdx index 9b21fbd14..88cdc1780 100644 --- a/website/content/docs/agent/config/agent-config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -82,7 +82,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `https` - The HTTPS API. Defaults to `client_addr` - `grpc` - The gRPC API. Defaults to `client_addr` -- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](/docs/agent/config/agent-config-cli#_alt_domain) +- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](/docs/agent/config/cli-flags#_alt_domain) - `audit` - Added in Consul 1.8, the audit object allow users to enable auditing and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](https://learn.hashicorp.com/tutorials/consul/audit-logging). @@ -209,7 +209,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in the local datacenter to use for the initial RPC. These addresses support - [Cloud Auto-Joining](/docs/agent/config/agent-config-cli#cloud-auto-joining) and can optionally include a port to + [Cloud Auto-Joining](/docs/agent/config/cli-flags#cloud-auto-joining) and can optionally include a port to use when making the outbound connection. If not port is provided the `server_port` will be used. @@ -312,7 +312,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `partition` - The admin partition name the client is requesting. -- `bind_addr` Equivalent to the [`-bind` command-line flag](/docs/agent/config/agent-config-cli#_bind). +- `bind_addr` Equivalent to the [`-bind` command-line flag](/docs/agent/config/cli-flags#_bind). This parameter can be set to a go-sockaddr template that resolves to a single address. Special characters such as backslashes `\` or double quotes `"` @@ -360,7 +360,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr changes state, the new state and associated output is synchronized immediately. To disable this behavior, set the value to "0s". -- `client_addr` Equivalent to the [`-client` command-line flag](/docs/agent/config/agent-config-cli#_client). +- `client_addr` Equivalent to the [`-client` command-line flag](/docs/agent/config/cli-flags#_client). - `config_entries` This object allows setting options for centralized config entries. @@ -374,9 +374,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr See the [configuration entry docs](/docs/agent/config-entries) for more details about the contents of each entry. -- `datacenter` Equivalent to the [`-datacenter` command-line flag](/docs/agent/config/agent-config-cli#_datacenter). +- `datacenter` Equivalent to the [`-datacenter` command-line flag](/docs/agent/config/cli-flags#_datacenter). -- `data_dir` Equivalent to the [`-data-dir` command-line flag](/docs/agent/config/agent-config-cli#_data_dir). +- `data_dir` Equivalent to the [`-data-dir` command-line flag](/docs/agent/config/cli-flags#_data_dir). - `disable_anonymous_signature` Disables providing an anonymous signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check). @@ -406,17 +406,17 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `enable_debug` When set, enables some additional debugging features. Currently, this is only used to access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. -- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/docs/agent/config/agent-config-cli#_enable_script_checks). +- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/docs/agent/config/cli-flags#_enable_script_checks). ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information. ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) for more details. -- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](/docs/agent/config/agent-config-cli#_enable_local_script_checks). +- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](/docs/agent/config/cli-flags#_enable_local_script_checks). - `disable_keyring_file` - Equivalent to the - [`-disable-keyring-file` command-line flag](/docs/agent/config/agent-config-cli#_disable_keyring_file). + [`-disable-keyring-file` command-line flag](/docs/agent/config/cli-flags#_disable_keyring_file). - `disable_coordinates` - Disables sending of [network coordinates](/docs/architecture/coordinates). When network coordinates are disabled the `near` query param will not work to sort the nodes, @@ -476,9 +476,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/api/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence. - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/api/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. -- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](/docs/agent/config/agent-config-cli#_default_query_time). +- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](/docs/agent/config/cli-flags#_default_query_time). -- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](/docs/agent/config/agent-config-cli#_max_query_time). +- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](/docs/agent/config/cli-flags#_max_query_time). - `partition` - This flag is used to set the name of the admin partition the agent belongs to. An agent can only join @@ -559,7 +559,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr enforcement of ACLs. - `primary_gateways` Equivalent to the [`-primary-gateway` - command-line flag](/docs/agent/config/agent-config-cli#_primary_gateway). Takes a list of addresses to use as the + command-line flag](/docs/agent/config/cli-flags#_primary_gateway). Takes a list of addresses to use as the mesh gateways for the primary datacenter when authoritative replicated catalog data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval) until at least one primary mesh gateway is discovered. This was added in Consul @@ -570,7 +570,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr 30s. This was added in Consul 1.8.0. - `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line - flag](/docs/agent/config/agent-config-cli#_protocol). + flag](/docs/agent/config/cli-flags#_protocol). - `reap` This controls Consul's automatic reaping of child processes, which is useful if Consul is running as PID 1 in a Docker container. If this isn't @@ -612,7 +612,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr servers in all federated datacenters must have this enabled before any client can use [`use_streaming_backend`](#use_streaming_backend). -- `segment` - Equivalent to the [`-segment` command-line flag](/docs/agent/config/agent-config-cli#_segment). +- `segment` - Equivalent to the [`-segment` command-line flag](/docs/agent/config/cli-flags#_segment). ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option. @@ -635,11 +635,11 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr port. Only valid if the segment's bind address differs from the [`-bind`](#_bind) address. Defaults to false. -- `server` Equivalent to the [`-server` command-line flag](/docs/agent/config/agent-config-cli#_server). +- `server` Equivalent to the [`-server` command-line flag](/docs/agent/config/cli-flags#_server). - `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** -- `read_replica` - Equivalent to the [`-read-replica` command-line flag](/docs/agent/config/agent-config-cli#_read_replica). +- `read_replica` - Equivalent to the [`-read-replica` command-line flag](/docs/agent/config/cli-flags#_read_replica). - `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTL's shorter than the specified limit. It is recommended to keep this limit at or above @@ -907,7 +907,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility. If there's a partition or other outage affecting the authoritative datacenter, and the - [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) is set to "extend-cache", tokens not + [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) is set to "extend-cache", tokens not in the cache can be resolved during the outage using the replicated set of ACLs. - `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See @@ -937,13 +937,13 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Advertise Address Parameters -- `advertise_addr` Equivalent to the [`-advertise` command-line flag](/docs/agent/config/agent-config-cli#_advertise). +- `advertise_addr` Equivalent to the [`-advertise` command-line flag](/docs/agent/config/cli-flags#_advertise). - `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. - `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. -- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](/docs/agent/config/agent-config-cli#_advertise-wan). +- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](/docs/agent/config/cli-flags#_advertise-wan). - `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. @@ -956,9 +956,9 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Bootstrap Parameters -- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](/docs/agent/config/agent-config-cli#_bootstrap). +- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](/docs/agent/config/cli-flags#_bootstrap). -- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](/docs/agent/config/agent-config-cli#_bootstrap_expect). +- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](/docs/agent/config/cli-flags#_bootstrap_expect). ## Connect Parameters @@ -1230,7 +1230,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr versions and will assume the label is the datacenter. See: [this section](/docs/discovery/dns#namespaced-services) for more details. -- `domain` Equivalent to the [`-domain` command-line flag](/docs/agent/config/agent-config-cli#_domain). +- `domain` Equivalent to the [`-domain` command-line flag](/docs/agent/config/cli-flags#_domain). ## Encryption Parameters @@ -1273,7 +1273,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr the certificates requested by `auto_encrypt` from the server have these `ip_san` set as IP SAN. -- `encrypt` Equivalent to the [`-encrypt` command-line flag](/docs/agent/config/agent-config-cli#_encrypt). +- `encrypt` Equivalent to the [`-encrypt` command-line flag](/docs/agent/config/cli-flags#_encrypt). - `encrypt_verify_incoming` - This is an optional parameter that can be used to disable enforcing encryption for incoming gossip @@ -1375,15 +1375,15 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Join Parameters -- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](/docs/agent/config/agent-config-cli#_rejoin). +- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](/docs/agent/config/cli-flags#_rejoin). -- `retry_join` - Equivalent to the [`-retry-join`](/docs/agent/config/agent-config-cli#retry-join) command-line flag. +- `retry_join` - Equivalent to the [`-retry-join`](/docs/agent/config/cli-flags#retry-join) command-line flag. -- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](/docs/agent/config/agent-config-cli#_retry_interval). +- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](/docs/agent/config/cli-flags#_retry_interval). -- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](/docs/agent/config/agent-config-cli#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. +- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](/docs/agent/config/cli-flags#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works. -- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](/docs/agent/config/agent-config-cli#_retry_interval_wan). +- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](/docs/agent/config/cli-flags#_retry_interval_wan). - `start_join` An array of strings specifying addresses of nodes to [`-join`](#_join) upon startup. Note that using @@ -1395,19 +1395,19 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Log Parameters -- `log_file` Equivalent to the [`-log-file` command-line flag](/docs/agent/config/agent-config-cli#_log_file). +- `log_file` Equivalent to the [`-log-file` command-line flag](/docs/agent/config/cli-flags#_log_file). -- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_duration). +- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](/docs/agent/config/cli-flags#_log_rotate_duration). -- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_bytes). +- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](/docs/agent/config/cli-flags#_log_rotate_bytes). -- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](/docs/agent/config/agent-config-cli#_log_rotate_max_files). +- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](/docs/agent/config/cli-flags#_log_rotate_max_files). -- `log_level` Equivalent to the [`-log-level` command-line flag](/docs/agent/config/agent-config-cli#_log_level). +- `log_level` Equivalent to the [`-log-level` command-line flag](/docs/agent/config/cli-flags#_log_level). -- `log_json` Equivalent to the [`-log-json` command-line flag](/docs/agent/config/agent-config-cli#_log_json). +- `log_json` Equivalent to the [`-log-json` command-line flag](/docs/agent/config/cli-flags#_log_json). -- `enable_syslog` Equivalent to the [`-syslog` command-line flag](/docs/agent/config/agent-config-cli#_syslog). +- `enable_syslog` Equivalent to the [`-syslog` command-line flag](/docs/agent/config/cli-flags#_syslog). - `syslog_facility` When [`enable_syslog`](#enable_syslog) is provided, this controls to which facility messages are sent. By default, `LOCAL0` @@ -1415,11 +1415,11 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Node Parameters -- `node_id` Equivalent to the [`-node-id` command-line flag](/docs/agent/config/agent-config-cli#_node_id). +- `node_id` Equivalent to the [`-node-id` command-line flag](/docs/agent/config/cli-flags#_node_id). -- `node_name` Equivalent to the [`-node` command-line flag](/docs/agent/config/agent-config-cli#_node). +- `node_name` Equivalent to the [`-node` command-line flag](/docs/agent/config/cli-flags#_node). -- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](/docs/agent/config/agent-config-cli#_node_meta) for more information. +- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](/docs/agent/config/cli-flags#_node_meta) for more information. ```json { @@ -1429,7 +1429,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr } ``` -- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](/docs/agent/config/agent-config-cli#_disable_host_node_id). +- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](/docs/agent/config/cli-flags#_disable_host_node_id). ## Raft Parameters @@ -1444,7 +1444,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `raft_protocol` ((#raft_protocol)) Equivalent to the [`-raft-protocol` - command-line flag](/docs/agent/config/agent-config-cli#_raft_protocol). + command-line flag](/docs/agent/config/cli-flags#_raft_protocol). - `raft_snapshot_threshold` ((#\_raft_snapshot_threshold)) This controls the minimum number of raft commit entries between snapshots that are saved to @@ -1493,14 +1493,14 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## Serf Parameters -- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](/docs/agent/config/agent-config-cli#_serf_lan_bind). +- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](/docs/agent/config/cli-flags#_serf_lan_bind). This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port). -- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](/docs/agent/config/agent-config-cli#_serf_lan_allowed_cidrs). +- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](/docs/agent/config/cli-flags#_serf_lan_allowed_cidrs). -- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](/docs/agent/config/agent-config-cli#_serf_wan_bind). +- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](/docs/agent/config/cli-flags#_serf_wan_bind). -- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](/docs/agent/config/agent-config-cli#_serf_wan_allowed_cidrs). +- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](/docs/agent/config/cli-flags#_serf_wan_allowed_cidrs). ## Telemetry Paramters @@ -1639,7 +1639,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr ## UI Parameters - `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.** - Equivalent to the [`-ui`](/docs/agent/config/agent-config-cli#_ui) command-line flag. + Equivalent to the [`-ui`](/docs/agent/config/cli-flags#_ui) command-line flag. - `ui_config` - This object allows a number of sub-keys to be set which controls the display or features available in the UI. Configuring the UI with this @@ -1650,12 +1650,12 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `enabled` ((#ui_config_enabled)) - This enables the service of the web UI from this agent. Boolean value, defaults to false. In `-dev` mode this defaults to true. Replaces `ui` from before 1.9.0. Equivalent to the - [`-ui`](/docs/agent/config/agent-config-cli#_ui) command-line flag. + [`-ui`](/docs/agent/config/cli-flags#_ui) command-line flag. - `dir` ((#ui_config_dir)) - This specifies that the web UI should be served from an external dir rather than the build in one. This allows for customization or development. Replaces `ui_dir` from before 1.9.0. - Equivalent to the [`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) command-line flag. + Equivalent to the [`-ui-dir`](/docs/agent/config/cli-flags#_ui_dir) command-line flag. - `content_path` ((#ui_config_content_path)) - This specifies the HTTP path that the web UI should be served from. Defaults to `/ui/`. Equivalent to the @@ -1764,7 +1764,7 @@ bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr - `{{Datacenter}}` - Replaced with the current service's datacenter. - `ui_dir` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.dir`](#ui_config_dir) field instead.** - Equivalent to the [`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) command-line + Equivalent to the [`-ui-dir`](/docs/agent/config/cli-flags#_ui_dir) command-line flag. This configuration key is not required as of Consul version 0.7.0 and later. Specifying this configuration key will enable the web UI. There is no need to specify both ui-dir and ui. Specifying both will result in an error. diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx index f9daad3b9..0284b708b 100644 --- a/website/content/docs/agent/config/index.mdx +++ b/website/content/docs/agent/config/index.mdx @@ -16,8 +16,8 @@ descriptions. Configuration precedence is evaluated in the following order: -1. [Command line arguments](/docs/agent/config/agent-config-cli) -2. [Configuration files](/docs/agent/config/agent-config-files) +1. [Command line arguments](/docs/agent/config/cli-flags) +2. [Configuration files](/docs/agent/config/config-files) When loading configuration, the Consul agent loads the configuration from files and directories in lexical order. For example, configuration file @@ -57,22 +57,22 @@ Reloading configuration does not reload all configuration items. The items which are reloaded include: - ACL Tokens -- [Configuration Entry Bootstrap](/docs/agent/config/agent-config-files#config_entries_bootstrap) +- [Configuration Entry Bootstrap](/docs/agent/config/config-files#config_entries_bootstrap) - Checks -- [Discard Check Output](/docs/agent/config/agent-config-files#discard_check_output) +- [Discard Check Output](/docs/agent/config/config-files#discard_check_output) - HTTP Client Address - Log level -- [Metric Prefix Filter](/docs/agent/config/agent-config-files#telemetry-prefix_filter) -- [Node Metadata](/docs/agent/config/agent-config-files#node_meta) +- [Metric Prefix Filter](/docs/agent/config/config-files#telemetry-prefix_filter) +- [Node Metadata](/docs/agent/config/config-files#node_meta) - Some Raft options (since Consul 1.10.0) - - [`raft_snapshot_threshold`](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) - - [`raft_snapshot_interval`](/docs/agent/config/agent-config-files#_raft_snapshot_interval) - - [`raft_trailing_logs`](/docs/agent/config/agent-config-files#_raft_trailing_logs) + - [`raft_snapshot_threshold`](/docs/agent/config/config-files#_raft_snapshot_threshold) + - [`raft_snapshot_interval`](/docs/agent/config/config-files#_raft_snapshot_interval) + - [`raft_trailing_logs`](/docs/agent/config/config-files#_raft_trailing_logs) - These can be important in certain outage situations so being able to control them without a restart provides a recovery path that doesn't involve downtime. They generally shouldn't be changed otherwise. -- [RPC rate limiting](/docs/agent/config/agent-config-files#limits) -- [HTTP Maximum Connections per Client](/docs/agent/config/agent-config-files#http_max_conns_per_client) +- [RPC rate limiting](/docs/agent/config/config-files#limits) +- [HTTP Maximum Connections per Client](/docs/agent/config/config-files#http_max_conns_per_client) - Services - TLS Configuration - Please be aware that this is currently limited to reload a configuration that is already TLS enabled. You cannot enable or disable TLS only with reloading. diff --git a/website/content/docs/agent/index.mdx b/website/content/docs/agent/index.mdx index 2d963d13f..0a4ba38aa 100644 --- a/website/content/docs/agent/index.mdx +++ b/website/content/docs/agent/index.mdx @@ -127,16 +127,16 @@ $ consul agent -data-dir=/tmp/consul - **Node name**: This is a unique name for the agent. By default, this is the hostname of the machine, but you may customize it using the - [`-node`](/docs/agent/config/agent-config-cli#_node) flag. + [`-node`](/docs/agent/config/cli-flags#_node) flag. - **Datacenter**: This is the datacenter in which the agent is configured to - run. For single-DC configurations, the agent will default to `dc1`, but you can configure which datacenter the agent reports to with the [`-datacenter`](/docs/agent/config/agent-config-cli#_datacenter) flag. + run. For single-DC configurations, the agent will default to `dc1`, but you can configure which datacenter the agent reports to with the [`-datacenter`](/docs/agent/config/cli-flags#_datacenter) flag. Consul has first-class support for multiple datacenters, but configuring each node to report its datacenter improves agent efficiency. - **Server**: This indicates whether the agent is running in server or client mode. Running an agent in server mode requires additional overhead. This is because they participate in the consensus quorum, store cluster state, and handle queries. A server may also be - in ["bootstrap"](/docs/agent/config/agent-config-cli#_bootstrap_expect) mode, which enables the server to elect itselft as the Raft leader. Multiple servers cannot be in bootstrap mode because it would put the cluster in an inconsistent state. + in ["bootstrap"](/docs/agent/config/cli-flags#_bootstrap_expect) mode, which enables the server to elect itself as the Raft leader. Multiple servers cannot be in bootstrap mode because it would put the cluster in an inconsistent state. - **Client Addr**: This is the address used for client interfaces to the agent. This includes the ports for the HTTP and DNS interfaces. By default, this @@ -179,18 +179,18 @@ The following settings are commonly used in the configuration file (also called | Parameter | Description | Default | | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | -| `node_name` | String value that specifies a name for the agent node.
See [`-node-id`](/docs/agent/config/agent-config-cli#_node_id) for details. | Hostname of the machine | -| `server` | Boolean value that determines if the agent runs in server mode.
See [`-server`](/docs/agent/config/agent-config-cli#_server) for details. | `false` | -| `datacenter` | String value that specifies which datacenter the agent runs in.
See [-datacenter](/docs/agent/config/agent-config-cli#_datacenter) for details. | `dc1` | -| `data_dir` | String value that specifies a directory for storing agent state data.
See [`-data-dir`](/docs/agent/config/agent-config-cli#_data_dir) for details. | none | -| `log_level` | String value that specifies the level of logging the agent reports.
See [`-log-level`](/docs/agent/config/agent-config-cli#_log_level) for details. | `info` | -| `retry_join` | Array of string values that specify one or more agent addresses to join after startup. The agent will continue trying to join the specified agents until it has successfully joined another member.
See [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) for details. | none | -| `addresses` | Block of nested objects that define addresses bound to the agent for internal cluster communication. | `"http": "0.0.0.0"` See the Agent Configuration page for [default address values](/docs/agent/config/agent-config-files#addresses) | -| `ports` | Block of nested objects that define ports bound to agent addresses.
See (link to addresses option) for details. | See the Agent Configuration page for [default port values](/docs/agent/config/agent-config-files#ports) | +| `node_name` | String value that specifies a name for the agent node.
See [`-node-id`](/docs/agent/config/cli-flags#_node_id) for details. | Hostname of the machine | +| `server` | Boolean value that determines if the agent runs in server mode.
See [`-server`](/docs/agent/config/cli-flags#_server) for details. | `false` | +| `datacenter` | String value that specifies which datacenter the agent runs in.
See [-datacenter](/docs/agent/config/cli-flags#_datacenter) for details. | `dc1` | +| `data_dir` | String value that specifies a directory for storing agent state data.
See [`-data-dir`](/docs/agent/config/cli-flags#_data_dir) for details. | none | +| `log_level` | String value that specifies the level of logging the agent reports.
See [`-log-level`](/docs/agent/config/cli-flags#_log_level) for details. | `info` | +| `retry_join` | Array of string values that specify one or more agent addresses to join after startup. The agent will continue trying to join the specified agents until it has successfully joined another member.
See [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) for details. | none | +| `addresses` | Block of nested objects that define addresses bound to the agent for internal cluster communication. | `"http": "0.0.0.0"` See the Agent Configuration page for [default address values](/docs/agent/config/config-files#addresses) | +| `ports` | Block of nested objects that define ports bound to agent addresses.
See (link to addresses option) for details. | See the Agent Configuration page for [default port values](/docs/agent/config/config-files#ports) | ### Server Node in a Service Mesh -The following example configuration is for a server agent named "`consul-server`". The server is [bootstrapped](/docs/agent/config/agent-config-cli#_bootstrap) and the Consul GUI is enabled. +The following example configuration is for a server agent named "`consul-server`". The server is [bootstrapped](/docs/agent/config/cli-flags#_bootstrap) and the Consul GUI is enabled. The reason this server agent is configured for a service mesh is that the `connect` configuration is enabled. Connect is Consul's service mesh component that provides service-to-service connection authorization and encryption using mutual Transport Layer Security (TLS). Applications can use sidecar proxies in a service mesh configuration to establish TLS connections for inbound and outbound connections without being aware of Connect at all. See [Connect](/docs/connect) for details. @@ -448,8 +448,8 @@ may not be important for your use case. For example, for a web server and load balancer setup, both result in the same outcome: the web node is removed from the load balancer pool. -The [`skip_leave_on_interrupt`](/docs/agent/config/agent-config-files#skip_leave_on_interrupt) and -[`leave_on_terminate`](/docs/agent/config/agent-config-files#leave_on_terminate) configuration +The [`skip_leave_on_interrupt`](/docs/agent/config/config-files#skip_leave_on_interrupt) and +[`leave_on_terminate`](/docs/agent/config/config-files#leave_on_terminate) configuration options allow you to adjust this behavior. diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index dcea27e05..53eac2d0f 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -29,7 +29,7 @@ This telemetry information can be used for debugging or otherwise getting a better view of what Consul is doing. Review the [Monitoring and Metrics tutorial](https://learn.hashicorp.com/tutorials/consul/monitor-datacenter-health?utm_source=consul.io&utm_medium=docs) to learn how collect and interpret Consul data. -Additionally, if the [`telemetry` configuration options](/docs/agent/config/agent-config-files#telemetry) +Additionally, if the [`telemetry` configuration options](/docs/agent/config/config-files#telemetry) are provided, the telemetry information will be streamed to a [statsite](http://github.com/armon/statsite) or [statsd](http://github.com/etsy/statsd) server where it can be aggregated and flushed to Graphite or any other metrics store. @@ -140,7 +140,7 @@ you will need to apply a function such as InfluxDB's [`non_negative_difference() | Metric Name | Description | Unit | Type | | :--------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | :------ | | `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server | requests | counter | -| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/agent-config-files#limits) configuration. | requests | counter | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. | requests | counter | | `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | **Why they're important:** These measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. A high RPC count, especially from `consul.client.rpcexceeded` meaning that the requests are being rate-limited, could imply a misconfigured Consul agent. @@ -172,7 +172,7 @@ Under these conditions, a follower after a restart may be unable to catch up on replication and become a voter again since it takes longer to restore from disk or the leader than the leader takes to write a new snapshot and truncate its logs. Servers retain -[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) (default +[`raft_trailing_logs`](/docs/agent/config/config-files#raft_trailing_logs) (default `10240`) log entries even if their snapshot was more recent. On a leader processing 500 commits/second, that is only about 20 seconds worth of logs. Assuming the leader is able to write out a snapshot and truncate the logs in @@ -197,7 +197,7 @@ repeatedly as well as reduce the fault tolerance and serving capacity of the cluster. Since Consul 1.5.3 -[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) has been +[`raft_trailing_logs`](/docs/agent/config/config-files#raft_trailing_logs) has been configurable. Increasing it allows the leader to retain more logs and give followers more time to restore and catch up. The tradeoff is potentially slower appends which eventually might affect write throughput and latency @@ -208,7 +208,7 @@ mean loosing cluster availability and needing to recover the cluster from a loss of quorum. Since Consul 1.10.0 -[`raft_trailing_logs`](/docs/agent/config/agent-config-files#raft_trailing_logs) is now +[`raft_trailing_logs`](/docs/agent/config/config-files#raft_trailing_logs) is now reloadable with `consul reload` or `SIGHUP` allowing operators to increase this without the leader restarting or loosing leadership allowing the cluster to be recovered gracefully. @@ -332,7 +332,7 @@ This is a full list of metrics emitted by Consul. | `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter | | `consul.api.http` | Migrated from consul.http.. this samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer | | `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter | -| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/agent-config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | | `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | | `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter | | `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter | @@ -431,7 +431,7 @@ These metrics are used to monitor the health of the Consul servers. | `consul.raft.last_index` | Represents the raft applied index. | index | gauge | | `consul.raft.leader.dispatchLog` | Measures the time it takes for the leader to write log entries to disk. | ms | timer | | `consul.raft.leader.dispatchNumLogs` | Measures the number of logs committed to disk in a batch. | logs | gauge | -| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/docs/agent/config/agent-config-files#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/docs/install/performance) guide for more details. | ms | timer | +| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/docs/agent/config/config-files#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/docs/install/performance) guide for more details. | ms | timer | | `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. Note: this metric won't be emitted until the leader writes a snapshot. After an upgrade to Consul 1.10.0 it won't be emitted until the oldest log was written after the upgrade. | ms | gauge | | `consul.raft.replication.heartbeat` | Measures the time taken to invoke appendEntries on a peer, so that it doesn’t timeout on a periodic basis. | ms | timer | | `consul.raft.replication.appendEntries` | Measures the time it takes to replicate log entries to followers. This is a general indicator of the load pressure on the Consul servers, as well as the performance of the communication between the servers. | ms | timer | @@ -575,7 +575,7 @@ These metrics give insight into the health of the cluster as a whole. | `consul.memberlist.degraded.timeout` | Counts the number of times an agent was marked as a dead node, whilst not getting enough confirmations from a randomly selected list of agent nodes in an agent's membership. | occurrence / interval | counter | | `consul.memberlist.msg.dead` | Counts the number of times an agent has marked another agent to be a dead node. | messages / interval | counter | | `consul.memberlist.health.score` | Describes a node's perception of its own health based on how well it is meeting the soft real-time requirements of the protocol. This metric ranges from 0 to 8, where 0 indicates "totally healthy". This health score is used to scale the time between outgoing probes, and higher scores translate into longer probing intervals. For more details see section IV of the Lifeguard paper: https://arxiv.org/pdf/1707.00788.pdf | score | gauge | -| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | suspect messages received / interval | counter | +| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/docs/agent/config/config-files#ports). | suspect messages received / interval | counter | | `consul.memberlist.tcp.accept` | Counts the number of times an agent has accepted an incoming TCP stream connection. | connections accepted / interval | counter | | `consul.memberlist.udp.sent/received` | Measures the total number of bytes sent/received by an agent through the UDP protocol. | bytes sent or bytes received / interval | counter | | `consul.memberlist.tcp.connect` | Counts the number of times an agent has initiated a push/pull sync with an other agent. | push/pull initiated / interval | counter | @@ -586,8 +586,8 @@ These metrics give insight into the health of the cluster as a whole. | `consul.memberlist.msg_suspect` | The number of suspect messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter | | `consul.memberlist.probeNode` | Measures the time taken to perform a single round of failure detection on a select agent. | nodes / Interval | counter | | `consul.memberlist.pushPullNode` | Measures the number of agents that have exchanged state with this agent. | nodes / Interval | counter | -| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | failures / interval | counter | -| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/agent-config-files#ports). | flaps / interval | counter | +| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/config-files#ports). | failures / interval | counter | +| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/docs/agent/config/config-files#ports). | flaps / interval | counter | | `consul.serf.member.join` | Increments when an agent joins the cluster. If an agent flapped or failed this counter also increments when it re-joins. | joins / interval | counter | | `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter | | `consul.serf.events` | Increments when an agent processes an [event](/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter | diff --git a/website/content/docs/connect/ca/aws.mdx b/website/content/docs/connect/ca/aws.mdx index 3b4ed80b9..1c608c2c6 100644 --- a/website/content/docs/connect/ca/aws.mdx +++ b/website/content/docs/connect/ca/aws.mdx @@ -173,11 +173,11 @@ So monthly cost would be calculated as: - 500 ⨉ 13.3 = 6,650 certificates issued in dc3 The number of certificates issued could be reduced by increasing -[`leaf_cert_ttl`](/docs/agent/config/agent-config-files#ca_leaf_cert_ttl) in the CA Provider +[`leaf_cert_ttl`](/docs/agent/config/config-files#ca_leaf_cert_ttl) in the CA Provider configuration if the longer lived credentials are an acceptable risk tradeoff against the cost. -[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config -[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider +[`ca_config`]: /docs/agent/config/config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/config-files#connect_ca_provider [`/connect/ca/configuration`]: /api-docs/connect/ca#update-ca-configuration diff --git a/website/content/docs/connect/ca/consul.mdx b/website/content/docs/connect/ca/consul.mdx index ba7645171..3f65a85e2 100644 --- a/website/content/docs/connect/ca/consul.mdx +++ b/website/content/docs/connect/ca/consul.mdx @@ -92,7 +92,7 @@ Connect is enabled - the PrivateKey and RootCert fields have not been set, so th been generated (as seen above in the roots list). There are two ways to have the Consul CA use a custom private key and root certificate: -either through the `ca_config` section of the [Agent configuration](/docs/agent/config/agent-config-files#connect_ca_config) (which can only be used during the cluster's +either through the `ca_config` section of the [Agent configuration](/docs/agent/config/config-files#connect_ca_config) (which can only be used during the cluster's initial bootstrap) or through the [Update CA Configuration endpoint](/api-docs/connect/ca#update-ca-configuration). Currently Consul requires that root certificates are valid [SPIFFE SVID Signing certificates](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx index 16d5c0021..45b9b3149 100644 --- a/website/content/docs/connect/ca/index.mdx +++ b/website/content/docs/connect/ca/index.mdx @@ -47,7 +47,7 @@ will generate the initial root certificates and setup the internal Consul server state. For the initial bootstrap, the CA provider can be configured through the -[Agent configuration](/docs/agent/config/agent-config-files#connect_ca_config). After +[Agent configuration](/docs/agent/config/config-files#connect_ca_config). After initialization, the CA can only be updated through the [Update CA Configuration API endpoint](/api-docs/connect/ca#update-ca-configuration). If a CA is already initialized, any changes to the CA configuration in the diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index 376d50bfc..be0953364 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -280,6 +280,6 @@ path "/connect_inter/*" {
-[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config -[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider +[`ca_config`]: /docs/agent/config/config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/config-files#connect_ca_provider [`/connect/ca/configuration`]: /api-docs/connect/ca#update-ca-configuration diff --git a/website/content/docs/connect/config-entries/exported-services.mdx b/website/content/docs/connect/config-entries/exported-services.mdx index 790d773c6..bf41a7cc6 100644 --- a/website/content/docs/connect/config-entries/exported-services.mdx +++ b/website/content/docs/connect/config-entries/exported-services.mdx @@ -28,7 +28,7 @@ You can configure the settings defined in the `exported-services` configuration ## Usage 1. Verify that your datacenter meets the conditions specified in the [Requirements](#requirements). -1. Specify the `exported-services` configuration in the agent configuration file (see [`config_entries`](/docs/agent/config/agent-config-files#config_entries)) as described in [Configuration](#configuration). +1. Specify the `exported-services` configuration in the agent configuration file (see [`config_entries`](/docs/agent/config/config-files#config_entries)) as described in [Configuration](#configuration). 1. Apply the configuration using one of the following methods: - Kubernetes CRD: Refer to the [Custom Resource Definitions](/docs/k8s/crds) documentation for details. - Issue the `consul config write` command: Refer to the [Consul Config Write](/commands/config/write) documentation for details. diff --git a/website/content/docs/connect/config-entries/index.mdx b/website/content/docs/connect/config-entries/index.mdx index 2ff6142fa..43b2a3d99 100644 --- a/website/content/docs/connect/config-entries/index.mdx +++ b/website/content/docs/connect/config-entries/index.mdx @@ -49,7 +49,7 @@ See [Agent - Config Entries](/docs/agent/config-entries). ## Using Configuration Entries For Service Defaults Outside of Kubernetes, when the agent is -[configured](/docs/agent/config/agent-config-files#enable_central_service_config) to enable +[configured](/docs/agent/config/config-files#enable_central_service_config) to enable central service configurations, it will look for service configuration defaults that match a registering service instance. If it finds any, the agent will merge those defaults with the service instance configuration. This allows for things diff --git a/website/content/docs/connect/config-entries/proxy-defaults.mdx b/website/content/docs/connect/config-entries/proxy-defaults.mdx index 2d5d2ebd8..570549477 100644 --- a/website/content/docs/connect/config-entries/proxy-defaults.mdx +++ b/website/content/docs/connect/config-entries/proxy-defaults.mdx @@ -390,8 +390,8 @@ spec: type: 'bool: false', description: `If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). + [advertise address](/docs/agent/config/config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/config-files#expose_min_port) to [expose_max_port](/docs/agent/config/config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost.`, }, { diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 06f1892a3..c69d13169 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -662,8 +662,8 @@ spec: type: 'bool: false', description: `If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). + [advertise address](/docs/agent/config/config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/config-files#expose_min_port) to [expose_max_port](/docs/agent/config/config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost. One example is when running Consul on Kubernetes, and Consul agents run in their own pods.`, }, diff --git a/website/content/docs/connect/config-entries/service-intentions.mdx b/website/content/docs/connect/config-entries/service-intentions.mdx index c2e77fb06..b73eb61fb 100644 --- a/website/content/docs/connect/config-entries/service-intentions.mdx +++ b/website/content/docs/connect/config-entries/service-intentions.mdx @@ -488,7 +488,7 @@ spec: first permission to match in the list is terminal and stops further evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default - intention behavior is defined by the default [ACL policy](/docs/agent/config/agent-config-files#acl_default_policy).

+ intention behavior is defined by the default [ACL policy](/docs/agent/config/config-files#acl_default_policy).

This should be omitted for an L4 intention as it is mutually exclusive with the \`Action\` field.

Setting \`Permissions\` is not valid if a wildcard is used for the \`Name\` or \`Namespace\` because they can only be @@ -498,7 +498,7 @@ spec: first permission to match in the list is terminal and stops further evaluation. As with L4 intentions, traffic that fails to match any of the provided permissions in this intention will be subject to the default - intention behavior is defined by the default [ACL policy](/docs/agent/config/agent-config-files#acl_default_policy).

+ intention behavior is defined by the default [ACL policy](/docs/agent/config/config-files#acl_default_policy).

This should be omitted for an L4 intention as it is mutually exclusive with the \`action\` field.

Setting \`permissions\` is not valid if a wildcard is used for the \`spec.destination.name\` or \`spec.destination.namespace\` diff --git a/website/content/docs/connect/configuration.mdx b/website/content/docs/connect/configuration.mdx index 0018fc4db..e6ebdbf85 100644 --- a/website/content/docs/connect/configuration.mdx +++ b/website/content/docs/connect/configuration.mdx @@ -22,7 +22,7 @@ The first step to use Connect is to enable Connect for your Consul cluster. By default, Connect is disabled. Enabling Connect requires changing the configuration of only your Consul _servers_ (not client agents). To enable Connect, add the following to a new or existing -[server configuration file](/docs/agent/config/agent-config-files). In an existing cluster, this configuration change requires a Consul server restart, which you can perform one server at a time to maintain availability. In HCL: +[server configuration file](/docs/agent/config/config-files). In an existing cluster, this configuration change requires a Consul server restart, which you can perform one server at a time to maintain availability. In HCL: ```hcl connect { @@ -43,20 +43,20 @@ connection attempts to fail until Connect is enabled on the server agents. Other optional Connect configurations that you can set in the server configuration file include: -- [certificate authority settings](/docs/agent/config/agent-config-files#connect) -- [token replication](/docs/agent/config/agent-config-files#acl_tokens_replication) -- [dev mode](/docs/agent/config/agent-config-cli#_dev) -- [server host name verification](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) +- [certificate authority settings](/docs/agent/config/config-files#connect) +- [token replication](/docs/agent/config/config-files#acl_tokens_replication) +- [dev mode](/docs/agent/config/cli-flags#_dev) +- [server host name verification](/docs/agent/config/config-files#tls_internal_rpc_verify_server_hostname) If you would like to use Envoy as your Connect proxy you will need to [enable -gRPC](/docs/agent/config/agent-config-files#grpc_port). +gRPC](/docs/agent/config/config-files#grpc_port). Additionally if you plan on using the observability features of Connect, it can be convenient to configure your proxies and services using [configuration entries](/docs/agent/config-entries) which you can interact with using the CLI or API, or by creating configuration entry files. You will want to enable [centralized service -configuration](/docs/agent/config/agent-config-files#enable_central_service_config) on +configuration](/docs/agent/config/config-files#enable_central_service_config) on clients, which allows each service's proxy configuration to be managed centrally via API. diff --git a/website/content/docs/connect/connect-internals.mdx b/website/content/docs/connect/connect-internals.mdx index 63682983e..01232ab1d 100644 --- a/website/content/docs/connect/connect-internals.mdx +++ b/website/content/docs/connect/connect-internals.mdx @@ -109,10 +109,10 @@ externally routable IPs at the service level. ## Intention Replication Intention replication happens automatically but requires the -[`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) +[`primary_datacenter`](/docs/agent/config/config-files#primary_datacenter) configuration to be set to specify a datacenter that is authoritative for intentions. In production setups with ACLs enabled, the -[replication token](/docs/agent/config/agent-config-files#acl_tokens_replication) must also +[replication token](/docs/agent/config/config-files#acl_tokens_replication) must also be set in the secondary datacenter server's configuration. ## Certificate Authority Federation diff --git a/website/content/docs/connect/gateways/ingress-gateway.mdx b/website/content/docs/connect/gateways/ingress-gateway.mdx index da6155963..62ceffb7c 100644 --- a/website/content/docs/connect/gateways/ingress-gateway.mdx +++ b/website/content/docs/connect/gateways/ingress-gateway.mdx @@ -40,8 +40,8 @@ the [hosts](/docs/connect/config-entries/ingress-gateway#hosts) field. Ingress gateways also require that your Consul datacenters are configured correctly: - You'll need to use Consul version 1.8.0 or newer. -- Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled on the datacenter's Consul servers. -- [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled on all client agents. +- Consul [Connect](/docs/agent/config/config-files#connect) must be enabled on the datacenter's Consul servers. +- [gRPC](/docs/agent/config/config-files#grpc_port) must be enabled on all client agents. Currently, [Envoy](https://www.envoyproxy.io/) is the only proxy with ingress gateway capabilities in Consul. diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx index ee6266030..f016d949f 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters.mdx @@ -30,12 +30,12 @@ Ensure that your Consul environment meets the following requirements. * Consul version 1.6.0 or newer. * A local Consul agent is required to manage its configuration. -* Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled in both datacenters. -* Each [datacenter](/docs/agent/config/agent-config-files#datacenter) must have a unique name. +* Consul [Connect](/docs/agent/config/config-files#connect) must be enabled in both datacenters. +* Each [datacenter](/docs/agent/config/config-files#datacenter) must have a unique name. * Each datacenters must be [WAN joined](https://learn.hashicorp.com/tutorials/consul/federarion-gossip-wan). -* The [primary datacenter](/docs/agent/config/agent-config-files#primary_datacenter) must be set to the same value in both datacenters. This specifies which datacenter is the authority for Connect certificates and is required for services in all datacenters to establish mutual TLS with each other. -* [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled. -* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/agent-config-files#enable_central_service_config). +* The [primary datacenter](/docs/agent/config/config-files#primary_datacenter) must be set to the same value in both datacenters. This specifies which datacenter is the authority for Connect certificates and is required for services in all datacenters to establish mutual TLS with each other. +* [gRPC](/docs/agent/config/config-files#grpc_port) must be enabled. +* If you want to [enable gateways globally](/docs/connect/mesh-gateway#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/config-files#enable_central_service_config). ### Network diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx index 65d133021..dfd4780c6 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx @@ -24,9 +24,9 @@ Ensure that your Consul environment meets the following requirements. * Consul Enterprise version 1.11.0 or newer. * A local Consul agent is required to manage its configuration. -* Consul service mesh must be enabled in all partitions. Refer to the [`connect` documentation](/docs/agent/config/agent-config-files#connect) for details. +* Consul service mesh must be enabled in all partitions. Refer to the [`connect` documentation](/docs/agent/config/config-files#connect) for details. * Each partition must have a unique name. Refer to the [admin partitions documentation](/docs/enterprise/admin-partitions) for details. -* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/agent-config-files#enable_central_service_config). +* If you want to [enable gateways globally](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/docs/agent/config/config-files#enable_central_service_config). ### Proxy diff --git a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx index d6dcf2a42..bf7a3c177 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx @@ -126,8 +126,8 @@ connect { } ``` -The [`start_join_wan`](/docs/agent/config/agent-config-files#start_join_wan) or -[`retry_join_wan`](/docs/agent/config/agent-config-files#retry_join_wan) are +The [`start_join_wan`](/docs/agent/config/config-files#start_join_wan) or +[`retry_join_wan`](/docs/agent/config/config-files#retry_join_wan) are only used for the [traditional federation process](/docs/k8s/installation/multi-cluster#traditional-wan-federation). They must be omitted when federating Consul servers via gateways. diff --git a/website/content/docs/connect/gateways/terminating-gateway.mdx b/website/content/docs/connect/gateways/terminating-gateway.mdx index fdd3891c4..2c8218c38 100644 --- a/website/content/docs/connect/gateways/terminating-gateway.mdx +++ b/website/content/docs/connect/gateways/terminating-gateway.mdx @@ -59,8 +59,8 @@ Each terminating gateway needs: Terminating gateways also require that your Consul datacenters are configured correctly: - You'll need to use Consul version 1.8.0 or newer. -- Consul [Connect](/docs/agent/config/agent-config-files#connect) must be enabled on the datacenter's Consul servers. -- [gRPC](/docs/agent/config/agent-config-files#grpc_port) must be enabled on all client agents. +- Consul [Connect](/docs/agent/config/config-files#connect) must be enabled on the datacenter's Consul servers. +- [gRPC](/docs/agent/config/config-files#grpc_port) must be enabled on all client agents. Currently, [Envoy](https://www.envoyproxy.io/) is the only proxy with terminating gateway capabilities in Consul. diff --git a/website/content/docs/connect/intentions-legacy.mdx b/website/content/docs/connect/intentions-legacy.mdx index 804bd8c65..8f6994d35 100644 --- a/website/content/docs/connect/intentions-legacy.mdx +++ b/website/content/docs/connect/intentions-legacy.mdx @@ -25,7 +25,7 @@ is allowed by testing the intentions. If authorize returns false the connection must be terminated. The default intention behavior is defined by the default [ACL -policy](/docs/agent/config/agent-config-files#acl_default_policy). If the default ACL policy is +policy](/docs/agent/config/config-files#acl_default_policy). If the default ACL policy is "allow all", then all Connect connections are allowed by default. If the default ACL policy is "deny all", then all Connect connections are denied by default. diff --git a/website/content/docs/connect/intentions.mdx b/website/content/docs/connect/intentions.mdx index 71a182823..15a5bcc36 100644 --- a/website/content/docs/connect/intentions.mdx +++ b/website/content/docs/connect/intentions.mdx @@ -49,7 +49,7 @@ target destination. After verifying the TLS client certificate, the cached intentions should be consulted for each incoming connection/request to determine if it should be accepted or rejected. -The default intention behavior is defined by the [`default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) configuration. +The default intention behavior is defined by the [`default_policy`](/docs/agent/config/config-files#acl_default_policy) configuration. If the configuration is set `allow`, then all service mesh Connect connections will be allowed by default. If is set to `deny`, then all connections or requests will be denied by default. diff --git a/website/content/docs/connect/observability/index.mdx b/website/content/docs/connect/observability/index.mdx index 616919f94..eadc3e498 100644 --- a/website/content/docs/connect/observability/index.mdx +++ b/website/content/docs/connect/observability/index.mdx @@ -18,10 +18,10 @@ to: - Define the upstreams for each of your services. If you are using Envoy as your sidecar proxy, you will need to [enable -gRPC](/docs/agent/config/agent-config-files#grpc_port) on your client agents. To define the +gRPC](/docs/agent/config/config-files#grpc_port) on your client agents. To define the metrics destination and service protocol you may want to enable [configuration -entries](/docs/agent/config/agent-config-files#config_entries) and [centralized service -configuration](/docs/agent/config/agent-config-files#enable_central_service_config). +entries](/docs/agent/config/config-files#config_entries) and [centralized service +configuration](/docs/agent/config/config-files#enable_central_service_config). ### Kubernetes If you are using Kubernetes, the Helm chart can simplify much of the configuration needed to enable observability. See diff --git a/website/content/docs/connect/observability/ui-visualization.mdx b/website/content/docs/connect/observability/ui-visualization.mdx index 503163452..edbc96def 100644 --- a/website/content/docs/connect/observability/ui-visualization.mdx +++ b/website/content/docs/connect/observability/ui-visualization.mdx @@ -47,11 +47,11 @@ UI. If there are multiple clients with the UI enabled in a datacenter for redundancy these configurations must be added to all of them. We assume that the UI is already enabled by setting -[`ui_config.enabled`](/docs/agent/config/agent-config-files#ui_config_enabled) to `true` in the +[`ui_config.enabled`](/docs/agent/config/config-files#ui_config_enabled) to `true` in the agent's configuration file. To use the built-in Prometheus provider -[`ui_config.metrics_provider`](/docs/agent/config/agent-config-files#ui_config_metrics_provider) +[`ui_config.metrics_provider`](/docs/agent/config/config-files#ui_config_metrics_provider) must be set to `prometheus`. The UI must query the metrics provider through a proxy endpoint. This simplifies @@ -59,7 +59,7 @@ deployment where Prometheus is not exposed externally to UI user's browsers. To set this up, provide the URL that the _Consul agent_ should use to reach the Prometheus server in -[`ui_config.metrics_proxy.base_url`](/docs/agent/config/agent-config-files#ui_config_metrics_proxy_base_url). +[`ui_config.metrics_proxy.base_url`](/docs/agent/config/config-files#ui_config_metrics_proxy_base_url). For example in Kubernetes, the Prometheus helm chart by default installs a service named `prometheus-server` so each Consul agent can reach it on `http://prometheus-server` (using Kubernetes' DNS resolution). @@ -124,7 +124,7 @@ service-specific dashboard in an external tool like [Grafana](https://grafana.com) or a hosted provider. To configure this, you must provide a URL template in the [agent configuration -file](/docs/agent/config/agent-config-files#ui_config_dashboard_url_templates) for all agents that +file](/docs/agent/config/config-files#ui_config_dashboard_url_templates) for all agents that have the UI enabled. The template is essentially the URL to the external dashboard, but can have placeholder values which will be replaced with the service name, namespace and datacenter where appropriate to allow deep-linking @@ -659,12 +659,12 @@ ui_config { More than one JavaScript file may be specified in -[`metrics_provider_files`](/docs/agent/config/agent-config-files#ui_config_metrics_provider_files) +[`metrics_provider_files`](/docs/agent/config/config-files#ui_config_metrics_provider_files) and all will be served allowing flexibility if needed to include dependencies. Only one metrics provider can be configured and used at one time. The -[`metrics_provider_options_json`](/docs/agent/config/agent-config-files#ui_config_metrics_provider_options_json) +[`metrics_provider_options_json`](/docs/agent/config/config-files#ui_config_metrics_provider_options_json) field is an optional literal JSON object which is passed to the provider's `init` method at startup time. This allows configuring arbitrary parameters for the provider in config rather than hard coding them into the provider itself to @@ -673,7 +673,7 @@ make providers more reusable. The provider may fetch metrics directly from another source although in this case the agent will probably need to serve the correct CORS headers to prevent browsers from blocking these requests. These may be configured with -[`http_config.response_headers`](/docs/agent/config/agent-config-files#response_headers). +[`http_config.response_headers`](/docs/agent/config/config-files#response_headers). Alternatively, the provider may choose to use the [built-in metrics proxy](#metrics-proxy) to avoid cross domain issues or to inject additional diff --git a/website/content/docs/connect/proxies/built-in.mdx b/website/content/docs/connect/proxies/built-in.mdx index 5dc321614..358d80464 100644 --- a/website/content/docs/connect/proxies/built-in.mdx +++ b/website/content/docs/connect/proxies/built-in.mdx @@ -53,8 +53,8 @@ All fields are optional with a reasonable default. - `bind_port` - The port the proxy will bind its _public_ mTLS listener to. If not provided, the agent will assign a random port from its - configured proxy port range specified by [`sidecar_min_port`](/docs/agent/config/agent-config-files#sidecar_min_port) - and [`sidecar_max_port`](/docs/agent/config/agent-config-files#sidecar_max_port). + configured proxy port range specified by [`sidecar_min_port`](/docs/agent/config/config-files#sidecar_min_port) + and [`sidecar_max_port`](/docs/agent/config/config-files#sidecar_max_port). - `local_service_address`- The `[address]:port` that the proxy should use to connect to the local application instance. By default diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index b8fad5af7..516618877 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -184,7 +184,7 @@ the upstream listeners of any downstream service. One example is how users can define a service's protocol in a [`service-defaults` configuration entry](/docs/connect/config-entries/service-defaults). Agents with -[`enable_central_service_config`](/docs/agent/config/agent-config-files#enable_central_service_config) +[`enable_central_service_config`](/docs/agent/config/config-files#enable_central_service_config) set to true will automatically discover the protocol when configuring a proxy for a service. The proxy will discover the main protocol of the service it represents and use this to configure its main public listener. It will also diff --git a/website/content/docs/connect/proxies/managed-deprecated.mdx b/website/content/docs/connect/proxies/managed-deprecated.mdx index 3848ecb64..f4d709d27 100644 --- a/website/content/docs/connect/proxies/managed-deprecated.mdx +++ b/website/content/docs/connect/proxies/managed-deprecated.mdx @@ -24,7 +24,7 @@ Managed proxies have been deprecated since Consul 1.3 and have been fully remove in Consul 1.6. Anyone using Managed Proxies should aim to change their workflow as soon as possible to avoid issues with a later upgrade. -After transitioning away from all managed proxy usage, the `proxy` subdirectory inside [`data_dir`](/docs/agent/config/agent-config-cli#_data_dir) (specified in Consul config) can be deleted to remove extraneous configuration files and free up disk space. +After transitioning away from all managed proxy usage, the `proxy` subdirectory inside [`data_dir`](/docs/agent/config/cli-flags#_data_dir) (specified in Consul config) can be deleted to remove extraneous configuration files and free up disk space. **new and known issues will not be fixed**. @@ -275,6 +275,6 @@ level logs showing service discovery, certificate and authorization information. ~> **Note:** In `-dev` mode there is no `data_dir` unless one is explicitly configured so logging is disabled. You can access logs by providing the -[`-data-dir`](/docs/agent/config/agent-config-cli#_data_dir) CLI option. If a data dir is +[`-data-dir`](/docs/agent/config/cli-flags#_data_dir) CLI option. If a data dir is configured, this will also cause proxy processes to stay running when the agent terminates as described in [Lifecycle](#lifecycle). diff --git a/website/content/docs/connect/registration/service-registration.mdx b/website/content/docs/connect/registration/service-registration.mdx index 8e897da33..d32ddc283 100644 --- a/website/content/docs/connect/registration/service-registration.mdx +++ b/website/content/docs/connect/registration/service-registration.mdx @@ -437,8 +437,8 @@ registrations](/docs/discovery/services#service-definition-parameter-case). - `checks` `(bool: false)` - If enabled, all HTTP and gRPC checks registered with the agent are exposed through Envoy. Envoy will expose listeners for these checks and will only accept connections originating from localhost or Consul's - [advertise address](/docs/agent/config/agent-config-files#advertise). The port for these listeners are dynamically allocated from - [expose_min_port](/docs/agent/config/agent-config-files#expose_min_port) to [expose_max_port](/docs/agent/config/agent-config-files#expose_max_port). + [advertise address](/docs/agent/config/config-files#advertise). The port for these listeners are dynamically allocated from + [expose_min_port](/docs/agent/config/config-files#expose_min_port) to [expose_max_port](/docs/agent/config/config-files#expose_max_port). This flag is useful when a Consul client cannot reach registered services over localhost. One example is when running Consul on Kubernetes, and Consul agents run in their own pods. - `paths` `array: []` - A list of paths to expose through Envoy. diff --git a/website/content/docs/connect/registration/sidecar-service.mdx b/website/content/docs/connect/registration/sidecar-service.mdx index 6b2602f90..4ae04fdfc 100644 --- a/website/content/docs/connect/registration/sidecar-service.mdx +++ b/website/content/docs/connect/registration/sidecar-service.mdx @@ -131,8 +131,8 @@ proxy. - `tags` - Defaults to the tags of the parent service. - `meta` - Defaults to the service metadata of the parent service. - `port` - Defaults to being auto-assigned from a configurable - range specified by [`sidecar_min_port`](/docs/agent/config/agent-config-files#sidecar_min_port) - and [`sidecar_max_port`](/docs/agent/config/agent-config-files#sidecar_max_port). + range specified by [`sidecar_min_port`](/docs/agent/config/config-files#sidecar_min_port) + and [`sidecar_max_port`](/docs/agent/config/config-files#sidecar_max_port). - `kind` - Defaults to `connect-proxy`. This can't be overridden currently. - `check`, `checks` - By default we add a TCP check on the local address and port for the proxy, and a [service alias diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index 8be31decf..401f447c6 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -34,10 +34,10 @@ There are several different kinds of checks: In Consul 0.9.0 and later, script checks are not enabled by default. To use them you can either use : - - [`enable_local_script_checks`](/docs/agent/config/agent-config-cli#_enable_local_script_checks): + - [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks): enable script checks defined in local config files. Script checks defined via the HTTP API will not be allowed. - - [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks): enable + - [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): enable script checks regardless of how they are defined. ~> **Security Warning:** Enabling script checks in some configurations may @@ -109,7 +109,7 @@ There are several different kinds of checks: has to be performed is configurable which makes it possible to run containers which have different shells on the same host. Check output for Docker is limited to 4KB. Any output larger than this will be truncated. In Consul 0.9.0 and later, the agent - must be configured with [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) + must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) set to `true` in order to enable Docker health checks. - `gRPC + Interval` - These checks are intended for applications that support the standard @@ -467,7 +467,7 @@ This is the only convention that Consul depends on. Any output of the script will be captured and stored in the `output` field. In Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) set to `true` +[`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) set to `true` in order to enable script checks. ## Initial Health Check Status @@ -543,7 +543,7 @@ provided by the node will remain unchanged. ## Agent Certificates for TLS Checks -The [enable_agent_tls_for_checks](/docs/agent/config/agent-config-files#enable_agent_tls_for_checks) +The [enable_agent_tls_for_checks](/docs/agent/config/config-files#enable_agent_tls_for_checks) agent configuration option can be utilized to have HTTP or gRPC health checks to use the agent's credentials when configured for TLS. diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index 4a671f246..5e24f34da 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -21,9 +21,9 @@ are located in the `us-east-1` datacenter, and have no failing health checks. It's that simple! There are a number of configuration options that are important for the DNS interface, -specifically [`client_addr`](/docs/agent/config/agent-config-files#client_addr),[`ports.dns`](/docs/agent/config/agent-config-files#dns_port), -[`recursors`](/docs/agent/config/agent-config-files#recursors),[`domain`](/docs/agent/config/agent-config-files#domain), -[`alt_domain`](/docs/agent/config/agent-config-files#alt_domain), and [`dns_config`](/docs/agent/config/agent-config-files#dns_config). +specifically [`client_addr`](/docs/agent/config/config-files#client_addr),[`ports.dns`](/docs/agent/config/config-files#dns_port), +[`recursors`](/docs/agent/config/config-files#recursors),[`domain`](/docs/agent/config/config-files#domain), +[`alt_domain`](/docs/agent/config/config-files#alt_domain), and [`dns_config`](/docs/agent/config/config-files#dns_config). By default, Consul will listen on 127.0.0.1:8600 for DNS queries in the `consul.` domain, without support for further DNS recursion. Please consult the [documentation on configuration options](/docs/agent/config), @@ -32,7 +32,7 @@ specifically the configuration items linked above, for more details. There are a few ways to use the DNS interface. One option is to use a custom DNS resolver library and point it at Consul. Another option is to set Consul as the DNS server for a node and provide a -[`recursors`](/docs/agent/config/agent-config-files#recursors) configuration so that non-Consul queries +[`recursors`](/docs/agent/config/config-files#recursors) configuration so that non-Consul queries can also be resolved. The last method is to forward all queries for the "consul." domain to a Consul agent from the existing DNS server. Review the [DNS Forwarding tutorial](https://learn.hashicorp.com/tutorials/consul/dns-forwarding?utm_source=consul.io&utm_medium=docs) for examples. @@ -412,15 +412,15 @@ are not truncated. ## Alternative Domain By default, Consul responds to DNS queries in the `consul` domain, -but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/config/agent-config-files#domain) parameter. +but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/config/config-files#domain) parameter. In some instances, Consul may need to respond to queries in more than one domain, such as during a DNS migration or to distinguish between internal and external queries. Consul versions 1.5.2+ can be configured to respond to DNS queries on an alternative domain -through the [`alt_domain`](/docs/agent/config/agent-config-files#alt_domain) agent configuration +through the [`alt_domain`](/docs/agent/config/config-files#alt_domain) agent configuration option. As of Consul versions 1.11.0+, Consul's DNS response will use the same domain as was used in the query; -in prior versions, the response may use the primary [`domain`](/docs/agent/config/agent-config-files#domain) no matter which +in prior versions, the response may use the primary [`domain`](/docs/agent/config/config-files#domain) no matter which domain was used in the query. In the following example, the `alt_domain` parameter is set to `test-domain`: @@ -448,7 +448,7 @@ machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" ``` -> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the -[primary domain](/docs/agent/config/agent-config-files#domain) (not the alternative domain), +[primary domain](/docs/agent/config/config-files#domain) (not the alternative domain), as there is no way for the query to specify a domain. ## Caching @@ -463,8 +463,8 @@ for [DNS caching](https://learn.hashicorp.com/tutorials/consul/dns-caching). By default, Consul DNS queries will return a node's local address, even when being queried from a remote datacenter. If you need to use a different address to reach a node from outside its datacenter, you can configure this behavior -using the [`advertise-wan`](/docs/agent/config/agent-config-cli#_advertise-wan) and -[`translate_wan_addrs`](/docs/agent/config/agent-config-files#translate_wan_addrs) configuration +using the [`advertise-wan`](/docs/agent/config/cli-flags#_advertise-wan) and +[`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs) configuration options. ## Namespaced/Partitioned Services @@ -480,7 +480,7 @@ services from other namespaces or partitions the following form can be used: This is the canonical name of a Consul Enterprise service. Currently all parts must be present - in a future version (once the -[`prefer_namespace` configuration](/docs/agent/config/agent-config-files#dns_prefer_namespace) has been +[`prefer_namespace` configuration](/docs/agent/config/config-files#dns_prefer_namespace) has been deprecated), the namespace, partition and datacenter components will become optional and may be individually omitted to default to the `default` namespace, local partition or local datacenter respectively. @@ -494,7 +494,7 @@ are enabled, you must first create ACL tokens with the necessary policies. Consul agents resolve DNS requests using one of the preconfigured tokens below, listed in order of precedence: -1. The agent's [`default` token](/docs/agent/config/agent-config-files#acl_tokens_default). +1. The agent's [`default` token](/docs/agent/config/config-files#acl_tokens_default). 2. The built-in [`anonymous` token](/docs/security/acl/acl-system#builtin-tokens). Because the anonymous token is used when any request is made to Consul without explicitly specifying a token, production deployments should not apply policies diff --git a/website/content/docs/dynamic-app-config/kv.mdx b/website/content/docs/dynamic-app-config/kv.mdx index ad8c57a4a..5d4576066 100644 --- a/website/content/docs/dynamic-app-config/kv.mdx +++ b/website/content/docs/dynamic-app-config/kv.mdx @@ -39,7 +39,7 @@ privileges on one key for developers to update the value related to their application. The datastore itself is located on the Consul servers in the [data -directory](/docs/agent/config/agent-config-cli#_data_dir). To ensure data is not lost in +directory](/docs/agent/config/cli-flags#_data_dir). To ensure data is not lost in the event of a complete outage, use the [`consul snapshot`](/commands/snapshot/restore) feature to backup the data. ## Using Consul KV @@ -48,7 +48,7 @@ Objects are opaque to Consul, meaning there are no restrictions on the type of object stored in a key/value entry. The main restriction on an object is size - the maximum is 512 KB. Due to the maximum object size and main use cases, you should not need extra storage; the general [sizing -recommendations](/docs/agent/config/agent-config-files#kv_max_value_size) +recommendations](/docs/agent/config/config-files#kv_max_value_size) are usually sufficient. Keys, like objects are not restricted by type and can include any character. diff --git a/website/content/docs/dynamic-app-config/watches.mdx b/website/content/docs/dynamic-app-config/watches.mdx index a3fe837d5..e9d7ba726 100644 --- a/website/content/docs/dynamic-app-config/watches.mdx +++ b/website/content/docs/dynamic-app-config/watches.mdx @@ -20,7 +20,7 @@ Watches are implemented using blocking queries in the [HTTP API](/api). Agents automatically make the proper API calls to watch for changes and inform a handler when the data view has updated. -Watches can be configured as part of the [agent's configuration](/docs/agent/config/agent-config-files#watches), +Watches can be configured as part of the [agent's configuration](/docs/agent/config/config-files#watches), causing them to run once the agent is initialized. Reloading the agent configuration allows for adding or removing watches dynamically. diff --git a/website/content/docs/enterprise/audit-logging.mdx b/website/content/docs/enterprise/audit-logging.mdx index 155872ae3..27382043e 100644 --- a/website/content/docs/enterprise/audit-logging.mdx +++ b/website/content/docs/enterprise/audit-logging.mdx @@ -25,14 +25,14 @@ For more experience leveraging Consul's audit logging functionality, explore our HashiCorp Learn tutorial [Capture Consul Events with Audit Logging](https://learn.hashicorp.com/tutorials/consul/audit-logging). For detailed configuration information on configuring the Consul Enterprise's audit -logging, review the Consul [Audit Log](/docs/agent/config/agent-config-files#audit) +logging, review the Consul [Audit Log](/docs/agent/config/config-files#audit) documentation. ## Example Configuration Audit logging must be enabled on every agent in order to accurately capture all operations performed through the HTTP API. To enable logging, add -the [`audit`](/docs/agent/config/agent-config-files#audit) stanza to the agent's configuration. +the [`audit`](/docs/agent/config/config-files#audit) stanza to the agent's configuration. -> **Note**: Consul only logs operations which are initiated via the HTTP API. The audit log does not record operations that take place over the internal RPC diff --git a/website/content/docs/enterprise/license/overview.mdx b/website/content/docs/enterprise/license/overview.mdx index ae99a5874..377137248 100644 --- a/website/content/docs/enterprise/license/overview.mdx +++ b/website/content/docs/enterprise/license/overview.mdx @@ -36,7 +36,7 @@ When using these binaries no further action is necessary to configure the licens ### Binaries Without Built In Licenses For Consul Enterprise 1.10.0 or greater, binaries that do not include built in licenses a license must be available at the time the agent starts. -For server agents this means that they must either have the [`license_path`](/docs/agent/config/agent-config-files#license_path) +For server agents this means that they must either have the [`license_path`](/docs/agent/config/config-files#license_path) configuration set or have a license configured in the servers environment with the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. Both the configuration item and the `CONSUL_LICENSE_PATH` environment variable point to a file containing the license whereas the `CONSUL_LICENSE` environment @@ -55,9 +55,9 @@ to retrieve the license automatically under specific circumstances. When a client agent starts without a license in its configuration or environment, it will try to retrieve the license from the servers via RPCs. That RPC always requires a valid non-anonymous ACL token to authorize the request but the token doesn't need any particular permissions. As the license is required before the client -actually joins the cluster, where to make those RPC requests to is inferred from the [`start_join`](/docs/agent/config/agent-config-files#start_join) -or [`retry_join`](/docs/agent/config/agent-config-files#retry_join) configurations. If those are both unset or no -[`agent` token](/docs/agent/config/agent-config-files#acl_tokens_agent) is set then the client agent will immediately shut itself down. +actually joins the cluster, where to make those RPC requests to is inferred from the [`start_join`](/docs/agent/config/config-files#start_join) +or [`retry_join`](/docs/agent/config/config-files#retry_join) configurations. If those are both unset or no +[`agent` token](/docs/agent/config/config-files#acl_tokens_agent) is set then the client agent will immediately shut itself down. If all preliminary checks pass the client agent will attempt to reach out to any server on its RPC port to request the license. These requests will be retried for up to 5 minutes and if it is unable to retrieve a diff --git a/website/content/docs/enterprise/network-segments.mdx b/website/content/docs/enterprise/network-segments.mdx index d36d121ce..d4a1ffeda 100644 --- a/website/content/docs/enterprise/network-segments.mdx +++ b/website/content/docs/enterprise/network-segments.mdx @@ -15,7 +15,7 @@ description: |- Consul requires full connectivity between all agents (servers and clients) in a -[datacenter](/docs/agent/config/agent-config-cli#_datacenter) within a given +[datacenter](/docs/agent/config/cli-flags#_datacenter) within a given LAN gossip pool. By default, all Consul agents will be a part of one shared Serf LAN gossip pool known as the `` network segment, thus requiring full mesh connectivity within the datacenter. @@ -46,7 +46,7 @@ Consul networking models and their capabilities. **Cluster:** A set of Consul servers forming a Raft quorum along with a collection of Consul clients, all set to the same -[datacenter](/docs/agent/config/agent-config-cli#_datacenter), and joined together to form +[datacenter](/docs/agent/config/cli-flags#_datacenter), and joined together to form what we will call a "local cluster". Consul clients discover the Consul servers in their local cluster through the gossip mechanism and make RPC requests to them. LAN Gossip (OSS) is an open intra-cluster networking model, and Network @@ -72,7 +72,7 @@ group of agents to only connect with the agents in its segment. Server agents are members of all segments. The datacenter includes a `` segment, as well as additional segments defined in the -[`segments`](/docs/agent/config/agent-config-files#segments) server agent configuration option. +[`segments`](/docs/agent/config/config-files#segments) server agent configuration option. Each additional segment is defined by: - a non-empty name @@ -129,19 +129,19 @@ segments = [ -The server [agent configuration](/docs/agent/config/agent-config-files) options relevant to network +The server [agent configuration](/docs/agent/config/config-files) options relevant to network segments are: -- [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port): The Serf LAN port on this server +- [`ports.serf_lan`](/docs/agent/config/config-files#serf_lan_port): The Serf LAN port on this server for the `` network segment's gossip pool. -- [`segments`](/docs/agent/config/agent-config-files#segments): A list of user-defined network segments +- [`segments`](/docs/agent/config/config-files#segments): A list of user-defined network segments on this server, including their names and Serf LAN ports. ## Client Configuration Each client agent can only be a member of one segment at a time. This will be the `` segment unless otherwise specified in the agent's -[`segment`](/docs/agent/config/agent-config-cli#segment) agent configuration option. +[`segment`](/docs/agent/config/cli-flags#_segment) agent configuration option. ### Join a Client to a Segment ((#join_a_client_to_a_segment)) @@ -154,14 +154,14 @@ configured segment. Clients A and B specify the same segment S. Client B is already joined to the segment S LAN gossip pool. Client A wants to join via Client B. In order to do so, Client A -must connect to Client B's configured [Serf LAN port](/docs/agent/config/agent-config-files#serf_lan_port). +must connect to Client B's configured [Serf LAN port](/docs/agent/config/config-files#serf_lan_port).
Client A specifies segment S and wants to join the segment S gossip pool via Server 1. In order to do so, Client A must connect to Server 1's configured [Serf LAN port -for segment S](/docs/agent/config/agent-config-files#segment_port). +for segment S](/docs/agent/config/config-files#segment_port). @@ -171,12 +171,12 @@ of precedence: 1. **Specify an explicit port in the join address**. This can be done at the CLI when starting the agent (e.g., `consul agent -retry-join "client-b-address:8303"`), or in the agent's - configuration using the [retry-join option](/docs/agent/config/agent-config-files#retry_join). This method + configuration using the [retry-join option](/docs/agent/config/config-files#retry_join). This method is not compatible with [cloud auto-join](/docs/install/cloud-auto-join#auto-join-with-network-segments). 2. **Specify an alternate Serf LAN port for the agent**. This can be done at the CLI when starting the agent (e.g., `consul agent -retry-join "client-b-address" -serf-lan-port 8303`), or in - the agent's configuration using the [serf_lan](/docs/agent/config/agent-config-files#serf_lan_port) option. + the agent's configuration using the [serf_lan](/docs/agent/config/config-files#serf_lan_port) option. When a Serf LAN port is not explicitly specified in the join address, the agent will attempt to join the target host at the Serf LAN port specified in CLI or agent configuration. @@ -221,15 +221,15 @@ ports = { -The client [agent configuration](/docs/agent/config/agent-config-files) options relevant to network +The client [agent configuration](/docs/agent/config/config-files) options relevant to network segments are: -- [`segment`](/docs/agent/config/agent-config-files#segment-2): The name of the network segment this +- [`segment`](/docs/agent/config/config-files#segment-2): The name of the network segment this client agent belongs to. -- [`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port): +- [`ports.serf_lan`](/docs/agent/config/config-files#serf_lan_port): Serf LAN port for the above segment on this client. This is not required to match the configured Serf LAN port for other agents on this segment. -- [`retry_join`](/docs/agent/config/agent-config-files#retry_join) or - [`start_join`](/docs/agent/config/agent-config-files#start_join): A list of agent addresses to join +- [`retry_join`](/docs/agent/config/config-files#retry_join) or + [`start_join`](/docs/agent/config/config-files#start_join): A list of agent addresses to join when starting. Ensure the correct Serf LAN port for this segment is used when joining the LAN gossip pool using one of the [available configuration methods](#join_a_client_to_a_segment). diff --git a/website/content/docs/enterprise/read-scale.mdx b/website/content/docs/enterprise/read-scale.mdx index c7b6006b5..3665db34e 100644 --- a/website/content/docs/enterprise/read-scale.mdx +++ b/website/content/docs/enterprise/read-scale.mdx @@ -20,5 +20,5 @@ however, they do not take part in quorum election operations. Expanding your Con reads without impacting write latency. For more details, review the [Consul server configuration](/docs/agent/config) -documentation and the [-read-replica](/docs/agent/config/agent-config-cli#_read_replica) +documentation and the [-read-replica](/docs/agent/config/cli-flags#_read_replica) configuration flag. diff --git a/website/content/docs/install/bootstrapping.mdx b/website/content/docs/install/bootstrapping.mdx index a191be28a..849a3fc3c 100644 --- a/website/content/docs/install/bootstrapping.mdx +++ b/website/content/docs/install/bootstrapping.mdx @@ -30,16 +30,16 @@ as data loss is inevitable in a failure scenario. Please refer to the Manual bootstrapping with `-bootstrap` is not recommended in newer versions of Consul (0.5 and newer) as it is more error-prone. Instead you should use automatic bootstrapping -with [`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect). +with [`-bootstrap-expect`](/docs/agent/config/cli-flags#_bootstrap_expect). ## Bootstrapping the Servers -The recommended way to bootstrap the servers is to use the [`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect) +The recommended way to bootstrap the servers is to use the [`-bootstrap-expect`](/docs/agent/config/cli-flags#_bootstrap_expect) configuration option. This option informs Consul of the expected number of server nodes and automatically bootstraps when that many servers are available. To prevent inconsistencies and split-brain (clusters where multiple servers consider themselves leader) situations, you should either specify the same value for -[`-bootstrap-expect`](/docs/agent/config/agent-config-cli#_bootstrap_expect) +[`-bootstrap-expect`](/docs/agent/config/cli-flags#_bootstrap_expect) or specify no value at all on all the servers. Only servers that specify a value will attempt to bootstrap the cluster. Suppose we are starting a three server cluster. We can start `Node A`, `Node B`, and `Node C` with each @@ -61,11 +61,11 @@ You can trigger leader election by joining the servers together, to create a clu There are multiple options for joining the servers. Choose the method which best suits your environment and specific use case. - Specify a list of servers with - [-join](/docs/agent/config/agent-config-cli#_join) and - [start_join](/docs/agent/config/agent-config-files#start_join) + [-join](/docs/agent/config/cli-flags#_join) and + [start_join](/docs/agent/config/config-files#start_join) options. -- Specify a list of servers with [-retry-join](/docs/agent/config/agent-config-cli#_retry_join) option. -- Use automatic joining by tag for supported cloud environments with the [-retry-join](/docs/agent/config/agent-config-cli#_retry_join) option. +- Specify a list of servers with [-retry-join](/docs/agent/config/cli-flags#_retry_join) option. +- Use automatic joining by tag for supported cloud environments with the [-retry-join](/docs/agent/config/cli-flags#_retry_join) option. All three methods can be set in the agent configuration file or the command line flag. diff --git a/website/content/docs/install/cloud-auto-join.mdx b/website/content/docs/install/cloud-auto-join.mdx index 78dc2b310..3d6072f9b 100644 --- a/website/content/docs/install/cloud-auto-join.mdx +++ b/website/content/docs/install/cloud-auto-join.mdx @@ -69,7 +69,7 @@ to use port `8303` as its Serf LAN port prior to attempting to join the cluster. The following example configuration overrides the default Serf LAN port using the -[`ports.serf_lan`](/docs/agent/config/agent-config-files#serf_lan_port) configuration option. +[`ports.serf_lan`](/docs/agent/config/config-files#serf_lan_port) configuration option. @@ -85,7 +85,7 @@ ports { The following example overrides the default Serf LAN port using the -[`-serf-lan-port`](/docs/agent/config/agent-config-cli#_serf_lan_port) command line flag. +[`-serf-lan-port`](/docs/agent/config/cli-flags#_serf_lan_port) command line flag. ```shell $ consul agent -serf-lan-port=8303 -retry-join "provider=..." diff --git a/website/content/docs/install/manual-bootstrap.mdx b/website/content/docs/install/manual-bootstrap.mdx index 46f8cab3e..bb72b9aec 100644 --- a/website/content/docs/install/manual-bootstrap.mdx +++ b/website/content/docs/install/manual-bootstrap.mdx @@ -23,7 +23,7 @@ storing the cluster state. The client nodes are mostly stateless and rely on the server nodes, so they can be started easily. Manual bootstrapping requires that the first server that is deployed in a new -datacenter provide the [`-bootstrap` configuration option](/docs/agent/config/agent-config-cli#_bootstrap). +datacenter provide the [`-bootstrap` configuration option](/docs/agent/config/cli-flags#_bootstrap). This option allows the server to assert leadership of the cluster without agreement from any other server. This is necessary because at this point, there are no other servers running in diff --git a/website/content/docs/install/performance.mdx b/website/content/docs/install/performance.mdx index 3be2d3801..e5824963c 100644 --- a/website/content/docs/install/performance.mdx +++ b/website/content/docs/install/performance.mdx @@ -18,7 +18,7 @@ reads work from a fully in-memory data store that is optimized for concurrent ac ## Minimum Server Requirements ((#minimum)) -In Consul 0.7, the default server [performance parameters](/docs/agent/config/agent-config-files#performance) +In Consul 0.7, the default server [performance parameters](/docs/agent/config/config-files#performance) were tuned to allow Consul to run reliably (but relatively slowly) on a server cluster of three [AWS t2.micro](https://aws.amazon.com/ec2/instance-types/) instances. These thresholds were determined empirically using a leader instance that was under sufficient read, write, @@ -43,7 +43,7 @@ The default performance configuration is equivalent to this: ## Production Server Requirements ((#production)) When running Consul 0.7 and later in production, it is recommended to configure the server -[performance parameters](/docs/agent/config/agent-config-files#performance) back to Consul's original +[performance parameters](/docs/agent/config/config-files#performance) back to Consul's original high-performance settings. This will let Consul servers detect a failed leader and complete leader elections much more quickly than the default configuration which extends key Raft timeouts by a factor of 5, so it can be quite slow during these events. @@ -103,14 +103,14 @@ Here are some general recommendations: issues between the servers or insufficient CPU resources. Users in cloud environments often bump their servers up to the next instance class with improved networking and CPU until leader elections stabilize, and in Consul 0.7 or later the [performance - parameters](/docs/agent/config/agent-config-files#performance) configuration now gives you tools + parameters](/docs/agent/config/config-files#performance) configuration now gives you tools to trade off performance instead of upsizing servers. You can use the [`consul.raft.leader.lastContact` telemetry](/docs/agent/telemetry#leadership-changes) to observe how the Raft timing is performing and guide the decision to de-tune Raft performance or add more powerful servers. - For DNS-heavy workloads, configuring all Consul agents in a cluster with the - [`allow_stale`](/docs/agent/config/agent-config-files#allow_stale) configuration option will allow reads to + [`allow_stale`](/docs/agent/config/config-files#allow_stale) configuration option will allow reads to scale across all Consul servers, not just the leader. Consul 0.7 and later enables stale reads for DNS by default. See [Stale Reads](https://learn.hashicorp.com/tutorials/consul/dns-caching#stale-reads) in the [DNS Caching](https://learn.hashicorp.com/tutorials/consul/dns-caching) guide for more details. It's also good to set @@ -121,7 +121,7 @@ Here are some general recommendations: [stale consistency mode](/api-docs/features/consistency#stale) available to allow reads to scale across all the servers and not just be forwarded to the leader. -- In Consul 0.9.3 and later, a new [`limits`](/docs/agent/config/agent-config-files#limits) configuration is +- In Consul 0.9.3 and later, a new [`limits`](/docs/agent/config/config-files#limits) configuration is available on Consul clients to limit the RPC request rate they are allowed to make against the Consul servers. After hitting the limit, requests will start to return rate limit errors until time has passed and more requests are allowed. Configuring this across the cluster can help with @@ -156,11 +156,11 @@ For **write-heavy** workloads, the total RAM available for overhead must approxi RAM NEEDED = number of keys * average key size * 2-3x ``` -Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/config/agent-config-cli#_data_dir)) +Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/config/cli-flags#_data_dir)) For a **read-heavy** workload, configure all Consul server agents with the `allow_stale` DNS option, or query the API with the `stale` [consistency mode](/api-docs/features/consistency). By default, all queries made to the server are RPC forwarded to and serviced by the leader. By enabling stale reads, any server will respond to any query, thereby reducing overhead on the leader. Typically, the stale response is `100ms` or less from consistent mode but it drastically improves performance and reduces latency under high load. -If the leader server is out of memory or the disk is full, the server eventually stops responding, loses its election and cannot move past its last commit time. However, by configuring `max_stale` and setting it to a large value, Consul will continue to respond to queries during such outage scenarios. ([max_stale documentation](/docs/agent/config/agent-config-files#max_stale)). +If the leader server is out of memory or the disk is full, the server eventually stops responding, loses its election and cannot move past its last commit time. However, by configuring `max_stale` and setting it to a large value, Consul will continue to respond to queries during such outage scenarios. ([max_stale documentation](/docs/agent/config/config-files#max_stale)). It should be noted that `stale` is not appropriate for coordination where strong consistency is important (i.e. locking or application leader election). For critical cases, the optional `consistent` API query mode is required for true linearizability; the trade off is that this turns a read into a full quorum write so requires more resources and takes longer. @@ -168,7 +168,7 @@ It should be noted that `stale` is not appropriate for coordination where strong Consul’s agents use network sockets for communicating with the other nodes (gossip) and with the server agent. In addition, file descriptors are also opened for watch handlers, health checks, and log files. For a **write heavy** cluster, the `ulimit` size must be increased from the default value (`1024`) to prevent the leader from running out of file descriptors. -To prevent any CPU spikes from a misconfigured client, RPC requests to the server should be [rate limited](/docs/agent/config/agent-config-files#limits) +To prevent any CPU spikes from a misconfigured client, RPC requests to the server should be [rate limited](/docs/agent/config/config-files#limits) ~> **NOTE** Rate limiting is configured on the client agent only. @@ -191,8 +191,8 @@ Smearing requests over 30s is sufficient to bring RPC load to a reasonable level in all but the very largest clusters, but the extra CPU load from cryptographic operations could impact the server's normal work. To limit that, Consul since 1.4.1 exposes two ways to limit the impact Certificate signing has on the leader -[`csr_max_per_second`](/docs/agent/config/agent-config-files#ca_csr_max_per_second) and -[`csr_max_concurrent`](/docs/agent/config/agent-config-files#ca_csr_max_concurrent). +[`csr_max_per_second`](/docs/agent/config/config-files#ca_csr_max_per_second) and +[`csr_max_concurrent`](/docs/agent/config/config-files#ca_csr_max_concurrent). By default we set a limit of 50 per second which is reasonable on modest hardware but may be too low and impact rotation times if more than 1500 service diff --git a/website/content/docs/install/ports.mdx b/website/content/docs/install/ports.mdx index 6cbc9eb82..0923935ba 100644 --- a/website/content/docs/install/ports.mdx +++ b/website/content/docs/install/ports.mdx @@ -55,4 +55,4 @@ the Serf WAN port (TCP/UDP) to be listening on both WAN and LAN interfaces. See **Server RPC** This is used by servers to handle incoming requests from other agents. -Note, the default ports can be changed in the [agent configuration](/docs/agent/config/agent-config-files#ports). +Note, the default ports can be changed in the [agent configuration](/docs/agent/config/config-files#ports). diff --git a/website/content/docs/k8s/connect/connect-ca-provider.mdx b/website/content/docs/k8s/connect/connect-ca-provider.mdx index 5025fcd7b..0fe9d664a 100644 --- a/website/content/docs/k8s/connect/connect-ca-provider.mdx +++ b/website/content/docs/k8s/connect/connect-ca-provider.mdx @@ -200,5 +200,5 @@ To update any settings under these keys, you must use Consul's [Update CA Config To renew the Vault token, use the [`vault token renew`](https://www.vaultproject.io/docs/commands/token/renew) CLI command or API. -[`ca_config`]: /docs/agent/config/agent-config-files#connect_ca_config -[`ca_provider`]: /docs/agent/config/agent-config-files#connect_ca_provider +[`ca_config`]: /docs/agent/config/config-files#connect_ca_config +[`ca_provider`]: /docs/agent/config/config-files#connect_ca_provider diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index f2ca6d800..f20934631 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -58,7 +58,7 @@ Use these links to navigate to a particular top-level stanza. the prefix will be `-consul`. - `domain` ((#v-global-domain)) (`string: consul`) - The domain Consul will answer DNS queries for - (see `-domain` (https://consul.io/docs/agent/config/agent-config-cli#_domain)) and the domain services synced from + (see `-domain` (https://consul.io/docs/agent/config/cli-flags#_domain)) and the domain services synced from Consul into Kubernetes will have, e.g. `service-name.service.consul`. - `adminPartitions` ((#v-global-adminpartitions)) - Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters. @@ -261,7 +261,7 @@ Use these links to navigate to a particular top-level stanza. ``` - `gossipEncryption` ((#v-global-gossipencryption)) - Configures Consul's gossip encryption key. - (see `-encrypt` (https://consul.io/docs/agent/config/agent-config-cli#_encrypt)). + (see `-encrypt` (https://consul.io/docs/agent/config/cli-flags#_encrypt)). By default, gossip encryption is not enabled. The gossip encryption key may be set automatically or manually. The recommended method is to automatically generate the key. To automatically generate and set a gossip encryption key, set autoGenerate to true. @@ -292,7 +292,7 @@ Use these links to navigate to a particular top-level stanza. - `recursors` ((#v-global-recursors)) (`array: []`) - A list of addresses of upstream DNS servers that are used to recursively resolve DNS queries. These values are given as `-recursor` flags to Consul servers and clients. - See https://www.consul.io/docs/agent/config/agent-config-cli#_recursor for more details. + See https://www.consul.io/docs/agent/config/cli-flags#_recursor for more details. If this is an empty array (the default), then Consul DNS will only resolve queries for the Consul top level domain (by default `.consul`). - `tls` ((#v-global-tls)) - Enables TLS (https://learn.hashicorp.com/tutorials/consul/tls-encryption-secure) @@ -864,7 +864,7 @@ Use these links to navigate to a particular top-level stanza. - `image` ((#v-client-image)) (`string: null`) - The name of the Docker image (including any tag) for the containers running Consul client agents. - - `join` ((#v-client-join)) (`array: null`) - A list of valid `-retry-join` values (https://consul.io/docs/agent/config/agent-config-files#retry-join). + - `join` ((#v-client-join)) (`array: null`) - A list of valid `-retry-join` values (https://consul.io/docs/agent/config/config-files#retry-join). If this is `null` (default), then the clients will attempt to automatically join the server cluster running within Kubernetes. This means that with `server.enabled` set to true, clients will automatically @@ -885,7 +885,7 @@ Use these links to navigate to a particular top-level stanza. required for Connect. - `nodeMeta` ((#v-client-nodemeta)) - nodeMeta specifies an arbitrary metadata key/value pair to associate with the node - (see https://www.consul.io/docs/agent/config/agent-config-cli#_node_meta) + (see https://www.consul.io/docs/agent/config/cli-flags#_node_meta) - `pod-name` ((#v-client-nodemeta-pod-name)) (`string: ${HOSTNAME}`) @@ -1238,7 +1238,7 @@ Use these links to navigate to a particular top-level stanza. will inherit from `global.metrics.enabled` value. - `provider` ((#v-ui-metrics-provider)) (`string: prometheus`) - Provider for metrics. See - https://www.consul.io/docs/agent/config/agent-config-files#ui_config_metrics_provider + https://www.consul.io/docs/agent/config/config-files#ui_config_metrics_provider This value is only used if `ui.enabled` is set to true. - `baseURL` ((#v-ui-metrics-baseurl)) (`string: http://prometheus-server`) - baseURL is the URL of the prometheus server, usually the service URL. diff --git a/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx b/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx index 6cc85bb0f..82a7c7ea0 100644 --- a/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx +++ b/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx @@ -22,7 +22,7 @@ you want the clients to be exposed on the Kubernetes internal node IPs (`true`) their pod IPs (`false`). Finally, `client.join` is set to an array of valid -[`-retry-join` values](/docs/agent/config/agent-config-cli#retry-join). In the +[`-retry-join` values](/docs/agent/config/cli-flags#retry-join). In the example above, a fake [cloud auto-join](/docs/agent/cloud-auto-join) value is specified. This should be set to resolve to the proper addresses of your existing Consul cluster. diff --git a/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx b/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx index 1c37161e0..aa68ee50e 100644 --- a/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx +++ b/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx @@ -271,8 +271,8 @@ The automatically generated federation secret contains: - **Consul server config** - This is a JSON snippet that must be used as part of the server config for secondary datacenters. It sets: - - [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter) to the name of the primary datacenter. - - [`primary_gateways`](/docs/agent/config/agent-config-files#primary_gateways) to an array of IPs or hostnames + - [`primary_datacenter`](/docs/agent/config/config-files#primary_datacenter) to the name of the primary datacenter. + - [`primary_gateways`](/docs/agent/config/config-files#primary_gateways) to an array of IPs or hostnames for the mesh gateways in the primary datacenter. These are the addresses that Consul servers in secondary clusters will use to communicate with the primary datacenter. diff --git a/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx b/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx index f792ae115..e031de71b 100644 --- a/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx +++ b/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx @@ -95,7 +95,7 @@ The following sections detail how to export this data. ==> Saved dc1-client-consul-0-key.pem ``` - Or use the [auto_encrypt](/docs/agent/config/agent-config-files#auto_encrypt) feature. + Or use the [auto_encrypt](/docs/agent/config/config-files#auto_encrypt) feature. ### Mesh Gateway Addresses diff --git a/website/content/docs/nia/configuration.mdx b/website/content/docs/nia/configuration.mdx index 52da3e570..54e42ac03 100644 --- a/website/content/docs/nia/configuration.mdx +++ b/website/content/docs/nia/configuration.mdx @@ -61,7 +61,7 @@ tls { The `consul` block is used to configure CTS connection with a Consul agent to perform queries to the Consul Catalog and Consul KV pertaining to task execution. --> **Note:** Use HTTP/2 to improve Consul-Terraform-Sync performance when communicating with the local Consul process. [TLS/HTTPS](/docs/agent/config/agent-config-files) must be configured for the local Consul with the [cert_file](/docs/agent/config/agent-config-filess#cert_file) and [key_file](/docs/agent/config/agent-config-files#key_file) parameters set. For the Consul-Terraform-Sync configuration, set `tls.enabled = true` and set the `address` parameter to the HTTPS URL, e.g., `address = example.consul.com:8501`. If using self-signed certificates for Consul, you will also need to set `tls.verify = false` or add the certificate to `ca_cert` or `ca_path`. +-> **Note:** Use HTTP/2 to improve Consul-Terraform-Sync performance when communicating with the local Consul process. [TLS/HTTPS](/docs/agent/config/config-files) must be configured for the local Consul with the [cert_file](/docs/agent/config/config-filess#cert_file) and [key_file](/docs/agent/config/config-files#key_file) parameters set. For the Consul-Terraform-Sync configuration, set `tls.enabled = true` and set the `address` parameter to the HTTPS URL, e.g., `address = example.consul.com:8501`. If using self-signed certificates for Consul, you will also need to set `tls.verify = false` or add the certificate to `ca_cert` or `ca_path`. To read more on suggestions for configuring the Consul agent, see [run an agent](/docs/nia/installation/requirements#run-an-agent). @@ -80,7 +80,7 @@ consul { - `enabled` - (bool) - `username` - (string) - `password` - (string) -- `tls` - Configure TLS to use a secure client connection with Consul. Using HTTP/2 can solve issues related to hitting Consul's maximum connection limits, as well as improve efficiency when processing many blocking queries. This option is required for Consul-Terraform-Sync when connecting to a [Consul agent with TLS verification enabled for HTTPS connections](/docs/agent/config/agent-config-files#verify_incoming). +- `tls` - Configure TLS to use a secure client connection with Consul. Using HTTP/2 can solve issues related to hitting Consul's maximum connection limits, as well as improve efficiency when processing many blocking queries. This option is required for Consul-Terraform-Sync when connecting to a [Consul agent with TLS verification enabled for HTTPS connections](/docs/agent/config/config-files#verify_incoming). - `enabled` - (bool) Enable TLS. Providing a value for any of the TLS options will enable this parameter implicitly. - `verify` - (bool: true) Enables TLS peer verification. The default is enabled, which will check the global certificate authority (CA) chain to make sure the certificates returned by Consul are valid. - If Consul is using a self-signed certificate that you have not added to the global CA chain, you can set this certificate with `ca_cert` or `ca_path`. Alternatively, you can disable SSL verification by setting `verify` to false. However, disabling verification is a potential security vulnerability. @@ -98,7 +98,7 @@ consul { - `max_idle_conns` - (int: 0) The maximum number of total idle connections across all hosts. The limit is disabled by default. - `max_idle_conns_per_host` - (int: 100) The maximum number of idle connections per remote host. The majority of connections are established with one host, the Consul agent. - To achieve the shortest latency between a Consul service update to a task execution, configure `max_idle_conns_per_host` equal to or greater than the number of services in automation across all tasks. - - This value should be lower than the configured [`http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) for the Consul agent. If `max_idle_conns_per_host` and the number of services in automation is greater than the Consul agent limit, Consul-Terraform-Sync may error due to connection limits (status code 429). You may increase the agent limit with caution. _Note: requests to the Consul agent made by Terraform subprocesses or any other process on the same host as Consul-Terraform-Sync will contribute to the Consul agent connection limit._ + - This value should be lower than the configured [`http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) for the Consul agent. If `max_idle_conns_per_host` and the number of services in automation is greater than the Consul agent limit, Consul-Terraform-Sync may error due to connection limits (status code 429). You may increase the agent limit with caution. _Note: requests to the Consul agent made by Terraform subprocesses or any other process on the same host as Consul-Terraform-Sync will contribute to the Consul agent connection limit._ - `tls_handshake_timeout` - (string: "10s") amount of time to wait to complete the TLS handshake. ## Service diff --git a/website/content/docs/nia/installation/requirements.mdx b/website/content/docs/nia/installation/requirements.mdx index 27f482699..17f13ff5e 100644 --- a/website/content/docs/nia/installation/requirements.mdx +++ b/website/content/docs/nia/installation/requirements.mdx @@ -35,7 +35,7 @@ The Consul agent must be running in order to dynamically update network devices. When running a Consul agent with CTS in production, we suggest to keep a few considerations in mind. CTS uses [blocking queries](/api-docs/features/blocking) to monitor task dependencies, like changes to registered services. This results in multiple long running TCP connections between CTS and the agent to poll changes for each dependency. Monitoring a high number of services may quickly hit the default Consul agent connection limits. -There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between Consul-Terraform-Sync and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by Consul-Terraform-Sync. +There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between Consul-Terraform-Sync and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by Consul-Terraform-Sync. ### Register Services diff --git a/website/content/docs/releases/release-notes/v1_9_0.mdx b/website/content/docs/releases/release-notes/v1_9_0.mdx index 1c7610aca..6d9c4b9aa 100644 --- a/website/content/docs/releases/release-notes/v1_9_0.mdx +++ b/website/content/docs/releases/release-notes/v1_9_0.mdx @@ -21,7 +21,7 @@ page_title: 1.9.0 - **Active Health Checks for Consul on Kubernetes:** Consul service mesh now integrates with Kubernetes Readiness probes. This provides the ability to natively detect health status from Kubernetes via Readiness probe, and is then used for directing service mesh traffic. - **Streaming:** This feature introduces a major architectural enhancement in how update notifications for blocking queries are delivered within the cluster. Streaming results in very significant reduction of CPU and network bandwidth usage on Consul servers in large-scale deployments. Streaming is particularly helpful in scaling blocking queries in Consul clusters that have rapid changes in service state. - - Streaming is now available for the service health HTTP endpoint, and can be enabled through the [`use_streaming_backend`](/docs/agent/config/agent-config-files#use_streaming_backend) client configuration option, and [`rpc.enable_streaming`](/docs/agent/config/agent-config-files#rpc_enable_streaming) option on the servers. We will continue to enable streaming in more endpoints in subsequent releases. + - Streaming is now available for the service health HTTP endpoint, and can be enabled through the [`use_streaming_backend`](/docs/agent/config/config-files#use_streaming_backend) client configuration option, and [`rpc.enable_streaming`](/docs/agent/config/config-files#rpc_enable_streaming) option on the servers. We will continue to enable streaming in more endpoints in subsequent releases. ## What's Changed diff --git a/website/content/docs/security/acl/acl-legacy.mdx b/website/content/docs/security/acl/acl-legacy.mdx index 17fcbae22..542d2bbef 100644 --- a/website/content/docs/security/acl/acl-legacy.mdx +++ b/website/content/docs/security/acl/acl-legacy.mdx @@ -89,7 +89,7 @@ and [Policies](/api-docs/acl/policies). ~> **Warning**: In this document we use the deprecated configuration parameter `acl_datacenter`. In Consul 1.4 and newer the -parameter has been updated to [`primary_datacenter`](/docs/agent/config/agent-config-files#primary_datacenter). +parameter has been updated to [`primary_datacenter`](/docs/agent/config/config-files#primary_datacenter). Consul provides an optional Access Control List (ACL) system which can be used to control access to data and APIs. The ACL is @@ -129,7 +129,7 @@ token are automatically applied. The anonymous token is managed using the Tokens are bound to a set of rules that control which Consul resources the token has access to. Policies can be defined in either an allowlist or denylist mode depending on the configuration of -[`acl_default_policy`](/docs/agent/config/agent-config-files#acl_default_policy). If the default +[`acl_default_policy`](/docs/agent/config/config-files#acl_default_policy). If the default policy is to "deny" all actions, then token rules can be set to allowlist specific actions. In the inverse, the "allow" all default behavior is a denylist where rules are used to prohibit actions. By default, Consul will allow all actions. @@ -169,7 +169,7 @@ Constructing rules from these policies is covered in detail in the #### ACL Datacenter All nodes (clients and servers) must be configured with a -[`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) which enables ACL +[`acl_datacenter`](/docs/agent/config/config-files#acl_datacenter) which enables ACL enforcement but also specifies the authoritative datacenter. Consul relies on [RPC forwarding](/docs/architecture) to support multi-datacenter configurations. However, because requests can be made across datacenter boundaries, @@ -179,14 +179,14 @@ is considered authoritative and stores the canonical set of tokens. When a request is made to an agent in a non-authoritative datacenter, it must be resolved into the appropriate policy. This is done by reading the token from the authoritative server and caching the result for a configurable -[`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl). The implication of caching is that +[`acl_ttl`](/docs/agent/config/config-files#acl_ttl). The implication of caching is that the cache TTL is an upper bound on the staleness of policy that is enforced. It is possible to set a zero TTL, but this has adverse performance impacts, as every request requires refreshing the policy via an RPC call. During an outage of the ACL datacenter, or loss of connectivity, the cache will be used as long as the TTL is valid, or the cache may be extended if the -[`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) is set accordingly. +[`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) is set accordingly. This configuration also allows the ACL system to fail open or closed. [ACL replication](#replication) is also available to allow for the full set of ACL tokens to be replicated for use during an outage. @@ -198,10 +198,10 @@ as to whether they are set on servers, clients, or both. | Configuration Option | Servers | Clients | Purpose | | --------------------------------------------------------------------- | ---------- | ---------- | ----------------------------------------------------------------------------------------- | -| [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs | -| [`acl_default_policy`](/docs/agent/config/agent-config-files#acl_default_policy_legacy) | `OPTIONAL` | `N/A` | Determines allowlist or denylist mode | -| [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy_legacy) | `OPTIONAL` | `OPTIONAL` | Determines what to do when the ACL datacenter is offline | -| [`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl_legacy) | `OPTIONAL` | `OPTIONAL` | Determines time-to-live for cached ACLs | +| [`acl_datacenter`](/docs/agent/config/config-files#acl_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs | +| [`acl_default_policy`](/docs/agent/config/config-files#acl_default_policy_legacy) | `OPTIONAL` | `N/A` | Determines allowlist or denylist mode | +| [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy_legacy) | `OPTIONAL` | `OPTIONAL` | Determines what to do when the ACL datacenter is offline | +| [`acl_ttl`](/docs/agent/config/config-files#acl_ttl_legacy) | `OPTIONAL` | `OPTIONAL` | Determines time-to-live for cached ACLs | There are some additional configuration items related to [ACL replication](#replication) and [Version 8 ACL support](#version_8_acls). These are discussed in those respective sections @@ -210,19 +210,19 @@ below. A number of special tokens can also be configured which allow for bootstrapping the ACL system, or accessing Consul in special situations: -| Special Token | Servers | Clients | Purpose | -| ----------------------------------------------------------------------------------------------- | ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api-docs/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | -| [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | -| [`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token_legacy) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | -| [`acl_token`](/docs/agent/config/agent-config-files#acl_token_legacy) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | +| Special Token | Servers | Clients | Purpose | +| ------------------------------------------------------------------------------------------| ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api-docs/agent) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | +| [`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token_legacy) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | +| [`acl_master_token`](/docs/agent/config/config-files#acl_master_token_legacy) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | +| [`acl_token`](/docs/agent/config/config-files#acl_token_legacy) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the [/v1/agent/token API](/api-docs/agent#update-acl-tokens). #### ACL Agent Master Token -Since the [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token_legacy) is designed to be used when the Consul servers are not available, its policy is managed locally on the agent and does not need to have a token defined on the Consul servers via the ACL API. Once set, it implicitly has the following policy associated with it (the `node` policy was added in Consul 0.9.0): +Since the [`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token_legacy) is designed to be used when the Consul servers are not available, its policy is managed locally on the agent and does not need to have a token defined on the Consul servers via the ACL API. Once set, it implicitly has the following policy associated with it (the `node` policy was added in Consul 0.9.0): ```hcl agent "" { @@ -238,7 +238,7 @@ In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via #### ACL Agent Token -The [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/config/agent-config-files#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: +The [`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/config/config-files#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: 1. Updating the agent's node entry using the [Catalog API](/api-docs/catalog), including updating its node metadata, tagged addresses, and network coordinates 2. Performing [anti-entropy](/docs/architecture/anti-entropy) syncing, in particular reading the node metadata and services registered with the catalog @@ -258,7 +258,7 @@ key "_rexec" { } ``` -The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/config/agent-config-files#disable_remote_exec), the default, then the `key` policy can be omitted. +The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/config/config-files#disable_remote_exec), the default, then the `key` policy can be omitted. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the [/v1/agent/token API](/api-docs/agent#update-acl-tokens). @@ -294,12 +294,12 @@ The servers will need to be restarted to load the new configuration. Please take to start the servers one at a time, and ensure each server has joined and is operating correctly before starting another. -The [`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) will be created +The [`acl_master_token`](/docs/agent/config/config-files#acl_master_token) will be created as a "management" type token automatically. The -[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) is only installed when +[`acl_master_token`](/docs/agent/config/config-files#acl_master_token) is only installed when a server acquires cluster leadership. If you would like to install or change the -[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token), set the new value for -[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) in the configuration +[`acl_master_token`](/docs/agent/config/config-files#acl_master_token), set the new value for +[`acl_master_token`](/docs/agent/config/config-files#acl_master_token) in the configuration for all servers. Once this is done, restart the current leader to force a leader election. In Consul 0.9.1 and later, you can use the [/v1/acl/bootstrap API](/api-docs/acl#bootstrap-acls) @@ -332,7 +332,7 @@ servers related to permission denied errors: ``` These errors are because the agent doesn't yet have a properly configured -[`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) that it can use for its +[`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token) that it can use for its own internal operations like updating its node information in the catalog and performing [anti-entropy](/docs/architecture/anti-entropy) syncing. We can create a token using the ACL API, and the ACL master token we set in the previous step: @@ -550,9 +550,9 @@ The next section shows an alternative to the anonymous token. #### Set Agent-Specific Default Tokens (Optional) -An alternative to the anonymous token is the [`acl_token`](/docs/agent/config/agent-config-files#acl_token) +An alternative to the anonymous token is the [`acl_token`](/docs/agent/config/config-files#acl_token) configuration item. When a request is made to a particular Consul agent and no token is -supplied, the [`acl_token`](/docs/agent/config/agent-config-files#acl_token) will be used for the token, +supplied, the [`acl_token`](/docs/agent/config/config-files#acl_token) will be used for the token, instead of being left empty which would normally invoke the anonymous token. In Consul 0.9.1 and later, the agent ACL tokens can be introduced or updated via the @@ -563,7 +563,7 @@ agent, if desired. For example, this allows more fine grained control of what DN given agent can service, or can give the agent read access to some key-value store prefixes by default. -If using [`acl_token`](/docs/agent/config/agent-config-files#acl_token), then it's likely the anonymous +If using [`acl_token`](/docs/agent/config/config-files#acl_token), then it's likely the anonymous token will have a more restrictive policy than shown in the examples here. #### Create Tokens for UI Use (Optional) @@ -727,7 +727,7 @@ starts with "bar". Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be -configured with [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) to allow +configured with [`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token) to allow write access to these operations even if no ACL resolution capability is available. #### Event Rules @@ -753,7 +753,7 @@ starts with "deploy". The [`consul exec`](/commands/exec) command uses events with the "\_rexec" prefix during operation, so to enable this feature in a Consul environment with ACLs enabled, you will need to give agents a token with access to this event prefix, in addition to configuring -[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) to `false`. +[`disable_remote_exec`](/docs/agent/config/config-files#disable_remote_exec) to `false`. #### Key/Value Rules @@ -861,13 +861,13 @@ the example above, the rules allow read-only access to any node name with the em read-write access to any node name that starts with "app", and deny all access to any node name that starts with "admin". -Agents need to be configured with an [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) +Agents need to be configured with an [`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token) with at least "write" privileges to their own node name in order to register their information with the catalog, such as node metadata and tagged addresses. If this is configured incorrectly, the agent will print an error to the console when it tries to sync its state with the catalog. Consul's DNS interface is also affected by restrictions on node rules. If the -[`acl_token`](/docs/agent/config/agent-config-files#acl_token) used by the agent does not have "read" access to a +[`acl_token`](/docs/agent/config/config-files#acl_token) used by the agent does not have "read" access to a given node, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, node rules are @@ -880,7 +880,7 @@ periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which may requir ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl_token](/docs/agent/config/agent-config-files#acl_token) configuration +1. Using the [acl_token](/docs/agent/config/config-files#acl_token) configuration directive. This allows a single token to be configured globally and used during all check registration operations. 2. Providing an ACL token with service and check definitions at @@ -891,7 +891,7 @@ to use for registration events: [HTTP API](/api) for operations that require them. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) set to `true` in order to enable +[`enable_script_checks`](/docs/agent/config/config-files#enable_script_checks) set to `true` in order to enable script checks. #### Operator Rules @@ -1025,7 +1025,7 @@ read-write access to any service name that starts with "app", and deny all acces starts with "admin". Consul's DNS interface is affected by restrictions on service rules. If the -[`acl_token`](/docs/agent/config/agent-config-files#acl_token) used by the agent does not have "read" access to a +[`acl_token`](/docs/agent/config/config-files#acl_token) used by the agent does not have "read" access to a given service, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, service rules are @@ -1037,7 +1037,7 @@ performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which m ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl_token](/docs/agent/config/agent-config-files#acl_token) configuration +1. Using the [acl_token](/docs/agent/config/config-files#acl_token) configuration directive. This allows a single token to be configured globally and used during all service and check registration operations. 2. Providing an ACL token with service and check definitions at registration @@ -1048,12 +1048,12 @@ to use for registration events: API](/api) for operations that require them. **Note:** all tokens passed to an agent are persisted on local disk to allow recovery from restarts. See [`-data-dir` flag - documentation](/docs/agent/config/agent-config-files#acl_token) for notes on securing + documentation](/docs/agent/config/config-files#acl_token) for notes on securing access. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) or -[`enable_local_script_checks`](/docs/agent/config/agent-config-files#enable_local_script_checks) +[`enable_script_checks`](/docs/agent/config/config-files#enable_script_checks) or +[`enable_local_script_checks`](/docs/agent/config/config-files#enable_local_script_checks) set to `true` in order to enable script checks. #### Session Rules @@ -1084,20 +1084,20 @@ name that starts with "admin". #### Outages and ACL Replication ((#replication)) The Consul ACL system is designed with flexible rules to accommodate for an outage -of the [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) or networking +of the [`acl_datacenter`](/docs/agent/config/config-files#acl_datacenter) or networking issues preventing access to it. In this case, it may be impossible for agents in non-authoritative datacenters to resolve tokens. Consul provides -a number of configurable [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) +a number of configurable [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) choices to tune behavior. It is possible to deny or permit all actions or to ignore cache TTLs and enter a fail-safe mode. The default is to ignore cache TTLs for any previously resolved tokens and to deny any uncached tokens. Consul 0.7 added an ACL Replication capability that can allow non-authoritative datacenter agents to resolve even uncached tokens. This is enabled by setting an -[`acl_replication_token`](/docs/agent/config/agent-config-files#acl_replication_token) in the +[`acl_replication_token`](/docs/agent/config/config-files#acl_replication_token) in the configuration on the servers in the non-authoritative datacenters. In Consul 0.9.1 and later you can enable ACL replication using -[`enable_acl_replication`](/docs/agent/config/agent-config-files#enable_acl_replication) and +[`enable_acl_replication`](/docs/agent/config/config-files#enable_acl_replication) and then set the token later using the [agent token API](/api-docs/agent#update-acl-tokens) on each server. This can also be used to rotate the token without restarting the Consul servers. @@ -1113,7 +1113,7 @@ every 30 seconds. Replicated changes are written at a rate that's throttled to a large set of ACLs. If there's a partition or other outage affecting the authoritative datacenter, -and the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) +and the [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) is set to "extend-cache", tokens will be resolved during the outage using the replicated set of ACLs. An [ACL replication status](/api-docs/acl#check-acl-replication) endpoint is available to monitor the health of the replication process. @@ -1123,7 +1123,7 @@ already cached and is expired while similar semantics than "extend-cache". It allows to avoid having issues when connectivity with the authoritative is not completely broken, but very slow. -Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/config/agent-config-files#acl_ttl) +Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/config/config-files#acl_ttl) setting of the non-authoritative datacenter, so these entries may persist in the cache for up to the TTL, even after the authoritative datacenter comes back online. @@ -1149,7 +1149,7 @@ Consul 0.8 added many more ACL policy types and brought ACL enforcement to Consu agents for the first time. To ease the transition to Consul 0.8 for existing ACL users, there's a configuration option to disable these new features. To disable support for these new ACLs, set the -[`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) configuration +[`acl_enforce_version_8`](/docs/agent/config/config-files#acl_enforce_version_8) configuration option to `false` on Consul clients and servers. Here's a summary of the new features: @@ -1172,31 +1172,31 @@ Here's a summary of the new features: Two new configuration options are used once version 8 ACLs are enabled: -- [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) is used as +- [`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token) is used as a special access token that has `agent` ACL policy `write` privileges on each agent where it is configured, as well as `node` ACL policy `read` privileges for all nodes. This token should only be used by operators during outages when Consul servers aren't available to resolve ACL tokens. Applications should use regular ACL tokens during normal operation. -- [`acl_agent_token`](/docs/agent/config/agent-config-files#acl_agent_token) is used internally by +- [`acl_agent_token`](/docs/agent/config/config-files#acl_agent_token) is used internally by Consul agents to perform operations to the service catalog when registering themselves or sending network coordinates to the servers. This token must at least have `node` ACL policy `write` access to the node name it will register as in order to register any node-level information like metadata or tagged addresses. -Since clients now resolve ACLs locally, the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) +Since clients now resolve ACLs locally, the [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) now applies to Consul clients as well as Consul servers. This will determine what the client will do in the event that the servers are down. -Consul clients must have [`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) configured +Consul clients must have [`acl_datacenter`](/docs/agent/config/config-files#acl_datacenter) configured in order to enable agent-level ACL features. If this is set, the agents will contact the Consul servers to determine if ACLs are enabled at the cluster level. If they detect that ACLs are not enabled, they will check at most every 2 minutes to see if they have become enabled, and will start enforcing ACLs automatically. If an agent has an `acl_datacenter` defined, operators will -need to use the [`acl_agent_master_token`](/docs/agent/config/agent-config-files#acl_agent_master_token) to +need to use the [`acl_agent_master_token`](/docs/agent/config/config-files#acl_agent_master_token) to perform agent-level operations if the Consul servers aren't present (such as for a manual join -to the cluster), unless the [`acl_down_policy`](/docs/agent/config/agent-config-files#acl_down_policy) on the +to the cluster), unless the [`acl_down_policy`](/docs/agent/config/config-files#acl_down_policy) on the agent is set to "allow". Non-server agents do not need to have the -[`acl_master_token`](/docs/agent/config/agent-config-files#acl_master_token) configured; it is not +[`acl_master_token`](/docs/agent/config/config-files#acl_master_token) configured; it is not used by agents in any way. diff --git a/website/content/docs/security/acl/acl-rules.mdx b/website/content/docs/security/acl/acl-rules.mdx index 59c24b31d..5e6762c74 100644 --- a/website/content/docs/security/acl/acl-rules.mdx +++ b/website/content/docs/security/acl/acl-rules.mdx @@ -227,7 +227,7 @@ with `bar`. Since [Agent API](/api-docs/agent) utility operations may be required before an agent is joined to a cluster, or during an outage of the Consul servers or ACL datacenter, a special token may be -configured with [`acl.tokens.agent_recovery`](/docs/agent/config/agent-config-files#acl_tokens_agent_recovery) to allow +configured with [`acl.tokens.agent_recovery`](/docs/agent/config/config-files#acl_tokens_agent_recovery) to allow write access to these operations even if no ACL resolution capability is available. ## Event Rules @@ -272,7 +272,7 @@ read-only access to any event, and firing of the "deploy" event. The [`consul exec`](/commands/exec) command uses events with the "\_rexec" prefix during operation, so to enable this feature in a Consul environment with ACLs enabled, you will need to give agents a token with access to this event prefix, in addition to configuring -[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) to `false`. +[`disable_remote_exec`](/docs/agent/config/config-files#disable_remote_exec) to `false`. ## Key/Value Rules @@ -640,16 +640,16 @@ node "admin" { Agents must be configured with `write` privileges for their own node name so that the agent can register their node metadata, tagged addresses, and other information in the catalog. If configured incorrectly, the agent will print an error to the console when it tries to sync its state with the catalog. -Configure `write` access in the [`acl.tokens.agent`](/docs/agent/config/agent-config-files#acl_tokens_agent) parameter. +Configure `write` access in the [`acl.tokens.agent`](/docs/agent/config/config-files#acl_tokens_agent) parameter. -The [`acl.token.default`](/docs/agent/config/agent-config-files#acl_tokens_default) used by the agent should have `read` access to a given node so that the DNS interface can be queried. +The [`acl.token.default`](/docs/agent/config/config-files#acl_tokens_default) used by the agent should have `read` access to a given node so that the DNS interface can be queried. Node rules are used to filter query results when reading from the catalog or retrieving information from the health endpoints. This allows for configurations where a token has access to a given service name, but only on an allowed subset of node names. Consul agents check tokens locally when health checks are registered and when Consul performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs. These actions may required an ACL token to complete. Use the following methods to configure ACL tokens for registration events: -* Configure a global token in the [acl.tokens.default](/docs/agent/config/agent-config-files#acl_tokens_default) parameter. +* Configure a global token in the [acl.tokens.default](/docs/agent/config/config-files#acl_tokens_default) parameter. This allows a single token to be used during all check registration operations. * Provide an ACL token with `service` and `check` definitions at registration time. This allows for greater flexibility and enables the use of multiple tokens on the same agent. @@ -835,7 +835,7 @@ service "admin" { Consul's DNS interface is affected by restrictions on service rules. If the -[`acl.tokens.default`](/docs/agent/config/agent-config-files#acl_tokens_default) used by the agent does not have `read` access to a +[`acl.tokens.default`](/docs/agent/config/config-files#acl_tokens_default) used by the agent does not have `read` access to a given service, then the DNS interface will return no records when queried for it. When reading from the catalog or retrieving information from the health endpoints, service rules are @@ -847,7 +847,7 @@ performs periodic [anti-entropy](/docs/architecture/anti-entropy) syncs, which m ACL token to complete. To accommodate this, Consul provides two methods of configuring ACL tokens to use for registration events: -1. Using the [acl.tokens.default](/docs/agent/config/agent-config-files#acl_tokens_default) configuration +1. Using the [acl.tokens.default](/docs/agent/config/config-files#acl_tokens_default) configuration directive. This allows a single token to be configured globally and used during all service and check registration operations. 2. Providing an ACL token with service and check definitions at registration @@ -858,12 +858,12 @@ to use for registration events: API](/api) for operations that require them. **Note:** all tokens passed to an agent are persisted on local disk to allow recovery from restarts. See [`-data-dir` flag - documentation](/docs/agent/config/agent-config-files#acl_token) for notes on securing + documentation](/docs/agent/config/config-files#acl_token) for notes on securing access. In addition to ACLs, in Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/agent-config-files#enable_script_checks) or -[`enable_local_script_checks`](/docs/agent/config/agent-config-files#enable_local_script_checks) +[`enable_script_checks`](/docs/agent/config/config-files#enable_script_checks) or +[`enable_local_script_checks`](/docs/agent/config/config-files#enable_local_script_checks) set to `true` in order to enable script checks. Service rules are also used to grant read or write access to intentions. The diff --git a/website/content/docs/security/acl/auth-methods/index.mdx b/website/content/docs/security/acl/auth-methods/index.mdx index 253f038a0..0c4750486 100644 --- a/website/content/docs/security/acl/auth-methods/index.mdx +++ b/website/content/docs/security/acl/auth-methods/index.mdx @@ -60,7 +60,7 @@ using the API or command line before they can be used by applications. endpoints](/api-docs/acl/binding-rules). -> **Note** - To configure auth methods in any connected secondary datacenter, -[ACL token replication](/docs/agent/config/agent-config-files#acl_enable_token_replication) +[ACL token replication](/docs/agent/config/config-files#acl_enable_token_replication) must be enabled. Auth methods require the ability to create local tokens which is restricted to the primary datacenter and any secondary datacenters with ACL token replication enabled. diff --git a/website/content/docs/security/encryption.mdx b/website/content/docs/security/encryption.mdx index 3f9c3659b..2b5652641 100644 --- a/website/content/docs/security/encryption.mdx +++ b/website/content/docs/security/encryption.mdx @@ -75,17 +75,17 @@ CA then signs keys for each of the agents, as in ~> Certificates need to be created with x509v3 extendedKeyUsage attributes for both clientAuth and serverAuth since Consul uses a single cert/key pair for both server and client communications. TLS can be used to verify the authenticity of the servers or verify the authenticity of clients. -These modes are controlled by the [`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing), -[`verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname), -and [`verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) options, respectively. +These modes are controlled by the [`verify_outgoing`](/docs/agent/config/config-files#tls_internal_rpc_verify_outgoing), +[`verify_server_hostname`](/docs/agent/config/config-files#tls_internal_rpc_verify_server_hostname), +and [`verify_incoming`](/docs/agent/config/config-files#tls_internal_rpc_verify_incoming) options, respectively. -If [`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) is set, agents verify the +If [`verify_outgoing`](/docs/agent/config/config-files#tls_internal_rpc_verify_outgoing) is set, agents verify the authenticity of Consul for outgoing connections. Server nodes must present a certificate signed by a common certificate authority present on all agents, set via the agent's -[`ca_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_ca_file) and [`ca_path`](/docs/agent/config/agent-config-files#tls_internal_rpc_ca_path) -options. All server nodes must have an appropriate key pair set using [`cert_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_cert_file) and [`key_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_key_file). +[`ca_file`](/docs/agent/config/config-files#tls_internal_rpc_ca_file) and [`ca_path`](/docs/agent/config/config-files#tls_internal_rpc_ca_path) +options. All server nodes must have an appropriate key pair set using [`cert_file`](/docs/agent/config/config-files#tls_internal_rpc_cert_file) and [`key_file`](/docs/agent/config/config-files#tls_internal_rpc_key_file). -If [`verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) is set, then +If [`verify_server_hostname`](/docs/agent/config/config-files#tls_internal_rpc_verify_server_hostname) is set, then outgoing connections perform hostname verification. All servers must have a certificate valid for `server..` or the client will reject the handshake. This is a new configuration as of 0.5.1, and it is used to prevent a compromised client from being @@ -93,12 +93,12 @@ able to restart in server mode and perform a MITM (Man-In-The-Middle) attack. Ne to true, and generate the proper certificates, but this is defaulted to false to avoid breaking existing deployments. -If [`verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) is set, the servers verify the +If [`verify_incoming`](/docs/agent/config/config-files#tls_internal_rpc_verify_incoming) is set, the servers verify the authenticity of all incoming connections. All clients must have a valid key pair set using -[`cert_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_cert_file) and -[`key_file`](/docs/agent/config/agent-config-files#tls_internal_rpc_key_file). Servers will +[`cert_file`](/docs/agent/config/config-files#tls_internal_rpc_cert_file) and +[`key_file`](/docs/agent/config/config-files#tls_internal_rpc_key_file). Servers will also disallow any non-TLS connections. To force clients to use TLS, -[`verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) must also be set. +[`verify_outgoing`](/docs/agent/config/config-files#tls_internal_rpc_verify_outgoing) must also be set. TLS is used to secure the RPC calls between agents, but gossip between nodes is done over UDP and is secured using a symmetric key. See above for enabling gossip encryption. diff --git a/website/content/docs/security/security-models/core.mdx b/website/content/docs/security/security-models/core.mdx index 59b9a0cdc..815abdfd7 100644 --- a/website/content/docs/security/security-models/core.mdx +++ b/website/content/docs/security/security-models/core.mdx @@ -72,32 +72,32 @@ environment and adapt these configurations accordingly. - **mTLS** - Mutual authentication of both the TLS server and client x509 certificates prevents internal abuse through unauthorized access to Consul agents within the cluster. - - [`tls.defaults.verify_incoming`](/docs/agent/config/agent-config-files#tls_defaults_verify_incoming) - By default this is false, and + - [`tls.defaults.verify_incoming`](/docs/agent/config/config-files#tls_defaults_verify_incoming) - By default this is false, and should almost always be set to true to require TLS verification for incoming client connections. This applies to the internal RPC, HTTPS and gRPC APIs. - - [`tls.https.verify_incoming`](/docs/agent/config/agent-config-files#tls_https_verify_incoming) - By default this is false, and should + - [`tls.https.verify_incoming`](/docs/agent/config/config-files#tls_https_verify_incoming) - By default this is false, and should be set to true to require clients to provide a valid TLS certificate when the Consul HTTPS API is enabled. TLS for the API may be not be necessary if it is exclusively served over a loopback interface such as `localhost`. - - [`tls.internal_rpc.verify_incoming`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_incoming) - By default this is false, + - [`tls.internal_rpc.verify_incoming`](/docs/agent/config/config-files#tls_internal_rpc_verify_incoming) - By default this is false, and should almost always be set to true to require clients to provide a valid TLS certificate for Consul agent RPCs. - [`tls.grpc.verify_incoming`](/docs/agent/options#tls_grpc_verify_incoming) - By default this is false, and should be set to true to require clients to provide a valid TLS certificate when the Consul gRPC API is enabled. TLS for the API may be not be necessary if it is exclusively served over a loopback interface such as `localhost`. - - [`tls.internal_rpc.verify_outgoing`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_outgoing) - By default this is false, + - [`tls.internal_rpc.verify_outgoing`](/docs/agent/config/config-files#tls_internal_rpc_verify_outgoing) - By default this is false, and should be set to true to require TLS for outgoing connections from server or client agents. Servers that specify `verify_outgoing = true` will always talk to other servers over TLS, but they still accept non-TLS connections to allow for a transition of all clients to TLS. Currently the only way to enforce that no client can communicate with a server unencrypted is to also enable `verify_incoming` which requires client certificates too. - - [`enable_agent_tls_for_checks`](/docs/agent/config/agent-config-files#enable_agent_tls_for_checks) - By default this is false, and + - [`enable_agent_tls_for_checks`](/docs/agent/config/config-files#enable_agent_tls_for_checks) - By default this is false, and should almost always be set to true to require mTLS to set up the client for HTTP or gRPC health checks. This was added in Consul 1.0.1. - - [`tls.internal_rpc.verify_server_hostname`](/docs/agent/config/agent-config-files#tls_internal_rpc_verify_server_hostname) - By default + - [`tls.internal_rpc.verify_server_hostname`](/docs/agent/config/config-files#tls_internal_rpc_verify_server_hostname) - By default this is false, and should be set to true to require that the TLS certificate presented by the servers matches `server..` hostname for outgoing TLS connections. The default configuration does not verify the hostname of the certificate, only that it is signed by a trusted CA. This setting is critical to prevent a @@ -108,14 +108,14 @@ environment and adapt these configurations accordingly. [CVE-2018-19653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-19653) for more details. This is fixed in 1.4.1. - - [`auto_encrypt`](/docs/agent/config/agent-config-files#auto_encrypt) - Enables automated TLS certificate distribution for client - agent RPC communication using the Connect CA. Using this configuration a [`ca_file`](/docs/agent/config/agent-config-files#tls_defaults_ca_file) + - [`auto_encrypt`](/docs/agent/config/config-files#auto_encrypt) - Enables automated TLS certificate distribution for client + agent RPC communication using the Connect CA. Using this configuration a [`ca_file`](/docs/agent/config/config-files#tls_defaults_ca_file) and ACL token would still need to be distributed to client agents. - - [`allow_tls`](/docs/agent/config/agent-config-files#allow_tls) - By default this is false, and should be set to true on server + - [`allow_tls`](/docs/agent/config/config-files#allow_tls) - By default this is false, and should be set to true on server agents to allow certificates to be automatically generated and distributed from the Connect CA to client agents. - - [`tls`](/docs/agent/config/agent-config-files#tls) - By default this is false, and should be set to true on client agents to + - [`tls`](/docs/agent/config/config-files#tls) - By default this is false, and should be set to true on client agents to automatically request a client TLS certificate from the server's Connect CA. **Example Server Agent TLS Configuration** @@ -161,7 +161,7 @@ environment and adapt these configurations accordingly. } ``` - -> The client agent TLS configuration from above sets [`verify_incoming`](/docs/agent/config/agent-config-files#tls_defaults_verify_incoming) + -> The client agent TLS configuration from above sets [`verify_incoming`](/docs/agent/config/config-files#tls_defaults_verify_incoming) to false which assumes all incoming traffic is restricted to `localhost`. The primary benefit for this configuration would be to avoid provisioning client TLS certificates (in addition to ACL tokens) for all tools or applications using the local Consul agent. In this case ACLs should be enabled to provide authorization and only ACL tokens would @@ -169,7 +169,7 @@ environment and adapt these configurations accordingly. - **ACLs** - The access control list (ACL) system provides a security mechanism for Consul administrators to grant capabilities tied to an individual human, or machine operator identity. To ultimately secure the ACL system, - administrators should configure the [`default_policy`](/docs/agent/config/agent-config-files#acl_default_policy) to "deny". + administrators should configure the [`default_policy`](/docs/agent/config/config-files#acl_default_policy) to "deny". The [system](/docs/security/acl/acl-system) is comprised of five major components: @@ -196,10 +196,10 @@ environment and adapt these configurations accordingly. Two optional gossip encryption options enable Consul servers without gossip encryption to safely upgrade. After upgrading, the verification options should be enabled, or removed to set them to their default state: - - [`encrypt_verify_incoming`](/docs/agent/config/agent-config-files#encrypt_verify_incoming) - By default this is true to enforce + - [`encrypt_verify_incoming`](/docs/agent/config/config-files#encrypt_verify_incoming) - By default this is true to enforce encryption on _incoming_ gossip communications. - - [`encrypt_verify_outgoing`](/docs/agent/config/agent-config-files#encrypt_verify_outgoing) - By default this is true to enforce + - [`encrypt_verify_outgoing`](/docs/agent/config/config-files#encrypt_verify_outgoing) - By default this is true to enforce encryption on _outgoing_ gossip communications. - **Namespaces** - Read and write operations should be scoped to logical namespaces to @@ -240,16 +240,16 @@ environment and adapt these configurations accordingly. - **Linux Security Modules** - Use of security modules that can be directly integrated into operating systems such as AppArmor, SElinux, and Seccomp on Consul agent hosts. -- **Customize TLS Settings** - TLS settings such as the [available cipher suites](/docs/agent/config/agent-config-files#tls_defaults_tls_cipher_suites), +- **Customize TLS Settings** - TLS settings such as the [available cipher suites](/docs/agent/config/config-files#tls_defaults_tls_cipher_suites), should be tuned to fit the needs of your environment. - - [`tls_min_version`](/docs/agent/config/agent-config-files#tls_defaults_tls_min_version) - Used to specify the minimum TLS version to use. + - [`tls_min_version`](/docs/agent/config/config-files#tls_defaults_tls_min_version) - Used to specify the minimum TLS version to use. - - [`tls_cipher_suites`](/docs/agent/config/agent-config-files#tls_defaults_tls_cipher_suites) - Used to specify which TLS cipher suites are allowed. + - [`tls_cipher_suites`](/docs/agent/config/config-files#tls_defaults_tls_cipher_suites) - Used to specify which TLS cipher suites are allowed. - **Customize HTTP Response Headers** - Additional security headers, such as [`X-XSS-Protection`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection), can be - [configured](/docs/agent/config/agent-config-files#response_headers) for HTTP API responses. + [configured](/docs/agent/config/config-files#response_headers) for HTTP API responses. ```hcl http_config { @@ -262,28 +262,28 @@ environment and adapt these configurations accordingly. - **Customize Default Limits** - Consul has a number of builtin features with default connection limits that should be tuned to fit your environment. - - [`http_max_conns_per_client`](/docs/agent/config/agent-config-files#http_max_conns_per_client) - Used to limit concurrent access from + - [`http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) - Used to limit concurrent access from a single client to the HTTP(S) endpoint on Consul agents. - - [`https_handshake_timeout`](/docs/agent/config/agent-config-files#https_handshake_timeout) - Used to timeout TLS connection for the + - [`https_handshake_timeout`](/docs/agent/config/config-files#https_handshake_timeout) - Used to timeout TLS connection for the HTTP(S) endpoint for Consul agents. - - [`rpc_handshake_timeout`](/docs/agent/config/agent-config-files#rpc_handshake_timeout) - Used to timeout TLS connections for the RPC + - [`rpc_handshake_timeout`](/docs/agent/config/config-files#rpc_handshake_timeout) - Used to timeout TLS connections for the RPC endpoint for Consul agents. - - [`rpc_max_conns_per_client`](/docs/agent/config/agent-config-files#rpc_max_conns_per_client) - Used to limit concurrent access from a + - [`rpc_max_conns_per_client`](/docs/agent/config/config-files#rpc_max_conns_per_client) - Used to limit concurrent access from a single client to the RPC endpoint on Consul agents. - - [`rpc_rate`](/docs/agent/config/agent-config-files#rpc_rate) - Disabled by default, this is used to limit (requests/second) for client + - [`rpc_rate`](/docs/agent/config/config-files#rpc_rate) - Disabled by default, this is used to limit (requests/second) for client agents making RPC calls to server agents. - - [`rpc_max_burst`](/docs/agent/config/agent-config-files#rpc_max_burst) - Used as the token bucket size for client agents making RPC + - [`rpc_max_burst`](/docs/agent/config/config-files#rpc_max_burst) - Used as the token bucket size for client agents making RPC calls to server agents. - - [`kv_max_value_size`](/docs/agent/config/agent-config-files#kv_max_value_size) - Used to configure the max number of bytes in a + - [`kv_max_value_size`](/docs/agent/config/config-files#kv_max_value_size) - Used to configure the max number of bytes in a key-value API request. - - [`txn_max_req_len`](/docs/agent/config/agent-config-files#txn_max_req_len) - Used to configure the max number of bytes in a + - [`txn_max_req_len`](/docs/agent/config/config-files#txn_max_req_len) - Used to configure the max number of bytes in a transaction API request. - **Secure UI Access** - Access to Consul’s builtin UI can be secured in various ways: @@ -303,7 +303,7 @@ environment and adapt these configurations accordingly. [Securing Consul with Access Control Lists (ACLs)](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production), which includes a section on [creating ACL tokens that provide a desired level UI access](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production#consul-ui-token). - - **Restrict HTTP Writes** - Using the [`allow_write_http_from`](/docs/agent/config/agent-config-files#allow_write_http_from) + - **Restrict HTTP Writes** - Using the [`allow_write_http_from`](/docs/agent/config/config-files#allow_write_http_from) configuration option to restrict write access for agent endpoints to hosts on the specified list of CIDRs. **Example Agent Configuration** diff --git a/website/content/docs/troubleshoot/common-errors.mdx b/website/content/docs/troubleshoot/common-errors.mdx index ca659196f..2ec8de87c 100644 --- a/website/content/docs/troubleshoot/common-errors.mdx +++ b/website/content/docs/troubleshoot/common-errors.mdx @@ -198,14 +198,14 @@ We recommend raising an issue with the CNI you're using to add support for `host and switching back to `hostPort` eventually. [troubleshooting]: https://learn.hashicorp.com/consul/day-2-operations/advanced-operations/troubleshooting -[node_name]: /docs/agent/config/agent-config-files#node_name -[retry_join]: /docs/agent/config/agent-config-cli#retry-join +[node_name]: /docs/agent/config/config-files#node_name +[retry_join]: /docs/agent/config/cli-flags#retry-join [license]: /commands/license [releases]: https://releases.hashicorp.com/consul/ [files]: https://easyengine.io/tutorials/linux/increase-open-files-limit [certificates]: https://learn.hashicorp.com/consul/advanced/day-1-operations/certificates [systemd]: https://learn.hashicorp.com/consul/advanced/day-1-operations/deployment-guide#configure-systemd [monitoring]: https://learn.hashicorp.com/consul/advanced/day-1-operations/monitoring -[bind]: /docs/agent/config/agent-config-cli#_bind +[bind]: /docs/agent/config/cli-flags#_bind [jq]: https://stedolan.github.io/jq/ [go-sockaddr]: https://godoc.org/github.com/hashicorp/go-sockaddr/template diff --git a/website/content/docs/troubleshoot/faq.mdx b/website/content/docs/troubleshoot/faq.mdx index 7e85ecd23..8c89582ec 100644 --- a/website/content/docs/troubleshoot/faq.mdx +++ b/website/content/docs/troubleshoot/faq.mdx @@ -62,8 +62,8 @@ messages. This anonymous ID can be disabled. In fact, using the Checkpoint service is optional and can be disabled. -See [`disable_anonymous_signature`](/docs/agent/config/agent-config-files#disable_anonymous_signature) -and [`disable_update_check`](/docs/agent/config/agent-config-files#disable_update_check). +See [`disable_anonymous_signature`](/docs/agent/config/config-files#disable_anonymous_signature) +and [`disable_update_check`](/docs/agent/config/config-files#disable_update_check). ### Q: Does Consul rely on UDP Broadcast or Multicast? @@ -116,7 +116,7 @@ as well as race conditions between data updates and watch registrations. ### Q: What network ports does Consul use? -The [Ports Used](/docs/agent/config/agent-config-files#ports) section of the Configuration +The [Ports Used](/docs/agent/config/config-files#ports) section of the Configuration documentation lists all ports that Consul uses. ### Q: Does Consul require certain user process resource limits? @@ -143,7 +143,7 @@ of any excessive resource utilization before arbitrarily increasing the limits. The default recommended limit on a key's value size is 512KB. This is strictly enforced and an HTTP 413 status will be returned to any client that attempts to store more than that limit in a value. The limit can be increased by using the -[`kv_max_value_size`](/docs/agent/config/agent-config-files#kv_max_value_size) configuration option. +[`kv_max_value_size`](/docs/agent/config/config-files#kv_max_value_size) configuration option. It should be noted that the Consul key/value store is not designed to be used as a general purpose database. See diff --git a/website/content/docs/upgrading/instructions/general-process.mdx b/website/content/docs/upgrading/instructions/general-process.mdx index eeed1069a..597279ab4 100644 --- a/website/content/docs/upgrading/instructions/general-process.mdx +++ b/website/content/docs/upgrading/instructions/general-process.mdx @@ -74,7 +74,7 @@ this snapshot somewhere safe. More documentation on snapshot usage is available - [consul.io/commands/snapshot](/commands/snapshot) - -**2.** Temporarily modify your Consul configuration so that its [log_level](/docs/agent/config/agent-config-cli#_log_level) +**2.** Temporarily modify your Consul configuration so that its [log_level](/docs/agent/config/cli-flags#_log_level) is set to `debug`. After doing this, issue the following command on your servers to reload the configuration: @@ -183,7 +183,7 @@ then the following options for further assistance are available: When contacting Hashicorp Support, please include the following information in your ticket: - Consul version you were upgrading FROM and TO. -- [Debug level logs](/docs/agent/config/agent-config-cli#_log_level) from all servers in the cluster +- [Debug level logs](/docs/agent/config/cli-flags#_log_level) from all servers in the cluster that you are having trouble with. These should include logs from prior to the upgrade attempt up through the current time. If your logs were not set at debug level prior to the upgrade, please include those logs as well. Also, update your config to use debug logs, diff --git a/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx b/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx index e1aa4a6b9..fe3531da1 100644 --- a/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx +++ b/website/content/docs/upgrading/instructions/upgrade-to-1-6-x.mdx @@ -51,7 +51,7 @@ Looking through these changes prior to upgrading is highly recommended. Two very notable items are: - 1.6.2 introduced more strict JSON decoding. Invalid JSON that was previously ignored might result in errors now (e.g., `Connect: null` in service definitions). See [[GH#6680](https://github.com/hashicorp/consul/pull/6680)]. -- 1.6.3 introduced the [http_max_conns_per_client](/docs/agent/config/agent-config-files#http_max_conns_per_client) limit. This defaults to 200. Prior to this, connections per client were unbounded. [[GH#7159](https://github.com/hashicorp/consul/issues/7159)] +- 1.6.3 introduced the [http_max_conns_per_client](/docs/agent/config/config-files#http_max_conns_per_client) limit. This defaults to 200. Prior to this, connections per client were unbounded. [[GH#7159](https://github.com/hashicorp/consul/issues/7159)] ## Procedure @@ -202,8 +202,8 @@ update those now to avoid issues when moving to newer versions. These are the changes you will need to make: -- `acl_datacenter` is now named `primary_datacenter` (review our [docs](/docs/agent/config/agent-config-files#primary_datacenter) for more info) -- `acl_default_policy`, `acl_down_policy`, `acl_ttl`, `acl_*_token` and `enable_acl_replication` options are now specified like this (review our [docs](/docs/agent/config/agent-config-files#acl) for more info): +- `acl_datacenter` is now named `primary_datacenter` (review our [docs](/docs/agent/config/config-files#primary_datacenter) for more info) +- `acl_default_policy`, `acl_down_policy`, `acl_ttl`, `acl_*_token` and `enable_acl_replication` options are now specified like this (review our [docs](/docs/agent/config/config-files#acl) for more info): ```hcl acl { enabled = true/false diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index 551979958..ceb16af11 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -54,7 +54,7 @@ Due to this rename the following endpoint is also deprecated: These config keys are now deprecated: - `audit.sink[].name` - - [`dns_config.dns_prefer_namespace`](/docs/agent/config/agent-config-files#dns_prefer_namespace) + - [`dns_config.dns_prefer_namespace`](/docs/agent/config/config-files#dns_prefer_namespace) ### Deprecated CLI Subcommands @@ -119,8 +119,8 @@ have a license loaded from a configuration file or from their environment the sa agents must have the license specified. Both agents can still perform automatic retrieval of their license but with a few extra stipulations. First, license auto-retrieval now requires that ACLs are on and that the client or snapshot agent is configured with a valid ACL token. Secondly, client -agents require that either the [`start_join`](/docs/agent/config/agent-config-files#start_join) or -[`retry_join`](/docs/agent/config/agent-config-files#retry_join) configurations are set and that they resolve to server +agents require that either the [`start_join`](/docs/agent/config/config-files#start_join) or +[`retry_join`](/docs/agent/config/config-files#retry_join) configurations are set and that they resolve to server agents. If those stipulations are not met, attempting to start the client or snapshot agent will result in it immediately shutting down. @@ -214,7 +214,7 @@ to Consul 1.9.0. ### Changes to Configuration Defaults -The [`enable_central_service_config`](/docs/agent/config/agent-config-files#enable_central_service_config) +The [`enable_central_service_config`](/docs/agent/config/config-files#enable_central_service_config) configuration now defaults to `true`. ### Changes to Intentions @@ -283,7 +283,7 @@ behavior: #### Removal of Deprecated Features -The [`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) +The [`acl_enforce_version_8`](/docs/agent/config/config-files#acl_enforce_version_8) configuration has been removed (with version 8 ACL support by being on by default). @@ -326,7 +326,7 @@ to more precisely capture the view of _active_ blocking queries. ### Vault: default `http_max_conns_per_client` too low to run Vault properly -Consul 1.7.0 introduced [limiting of connections per client](/docs/agent/config/agent-config-files#http_max_conns_per_client). The default value +Consul 1.7.0 introduced [limiting of connections per client](/docs/agent/config/config-files#http_max_conns_per_client). The default value was 100, but Vault could use up to 128, which caused problems. If you want to use Vault with Consul 1.7.0, you should change the value to 200. Starting with Consul 1.7.1 this is the new default. @@ -334,7 +334,7 @@ Starting with Consul 1.7.1 this is the new default. ### Vault: default `http_max_conns_per_client` too low to run Vault properly -Consul 1.6.3 introduced [limiting of connections per client](/docs/agent/config/agent-config-files#http_max_conns_per_client). The default value +Consul 1.6.3 introduced [limiting of connections per client](/docs/agent/config/config-files#http_max_conns_per_client). The default value was 100, but Vault could use up to 128, which caused problems. If you want to use Vault with Consul 1.6.3 through 1.7.0, you should change the value to 200. Starting with Consul 1.7.1 this is the new default. @@ -373,7 +373,7 @@ datacenter". All configuration is backwards compatible and shouldn't need to change prior to upgrade although it's strongly recommended to migrate ACL configuration to the new syntax soon after upgrade. This includes moving to `primary_datacenter` rather than `acl_datacenter` and `acl_*` to the new [ACL -block](/docs/agent/config/agent-config-files#acl). +block](/docs/agent/config/config-files#acl). Datacenters can be upgraded in any order although secondaries will remain in [Legacy ACL mode](#legacy-acl-mode) until the primary datacenter is fully @@ -500,11 +500,11 @@ The following previously deprecated fields and config options have been removed: Consul 1.0.1 (and earlier versions of Consul) checked for raft snapshots every 5 seconds, and created new snapshots for every 8192 writes. These defaults cause constant disk IO in large busy clusters. Consul 1.1.0 increases these to larger values, -and makes them tunable via the [raft_snapshot_interval](/docs/agent/config/agent-config-files#_raft_snapshot_interval) and -[raft_snapshot_threshold](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) parameters. We recommend +and makes them tunable via the [raft_snapshot_interval](/docs/agent/config/config-files#_raft_snapshot_interval) and +[raft_snapshot_threshold](/docs/agent/config/config-files#_raft_snapshot_threshold) parameters. We recommend keeping the new defaults. However, operators can go back to the old defaults by changing their -config if they prefer more frequent snapshots. See the documentation for [raft_snapshot_interval](/docs/agent/config/agent-config-files#_raft_snapshot_interval) -and [raft_snapshot_threshold](/docs/agent/config/agent-config-files#_raft_snapshot_threshold) to understand the trade-offs +config if they prefer more frequent snapshots. See the documentation for [raft_snapshot_interval](/docs/agent/config/config-files#_raft_snapshot_interval) +and [raft_snapshot_threshold](/docs/agent/config/config-files#_raft_snapshot_threshold) to understand the trade-offs when tuning these. ## Consul 1.0.7 @@ -532,7 +532,7 @@ before proceeding. #### Carefully Check and Remove Stale Servers During Rolling Upgrades Consul 1.0 (and earlier versions of Consul when running with [Raft protocol -3](/docs/agent/config/agent-config-files#_raft_protocol) had an issue where performing +3](/docs/agent/config/config-files#_raft_protocol) had an issue where performing rolling updates of Consul servers could result in an outage from old servers remaining in the cluster. [Autopilot](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations) @@ -553,7 +553,7 @@ Please be sure to read over all the details here before upgrading. #### Raft Protocol Now Defaults to 3 -The [`-raft-protocol`](/docs/agent/config/agent-config-cli#_raft_protocol) default has +The [`-raft-protocol`](/docs/agent/config/cli-flags#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations) features by default. @@ -582,7 +582,7 @@ servers, and then slowly stand down each of the older servers in a similar fashion. When using Raft protocol version 3, servers are identified by their -[`-node-id`](/docs/agent/config/agent-config-cli#_node_id) instead of their IP address +[`-node-id`](/docs/agent/config/cli-flags#_node_id) instead of their IP address when Consul makes changes to its internal Raft quorum configuration. This means that once a cluster has been upgraded with servers all running Raft protocol version 3, it will no longer allow servers running any older Raft protocol @@ -597,7 +597,7 @@ to map the server to its node ID in the Raft quorum configuration. As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the -[`-config-file`](/docs/agent/config/agent-config-cli#_config_file) argument to specify a +[`-config-file`](/docs/agent/config/cli-flags#_config_file) argument to specify a file directly. #### Service Definition Parameter Case changed @@ -614,31 +614,31 @@ upgrading. Here's the complete list of removed options and their equivalents: | Removed Option | Equivalent | | ------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `-dc` | [`-datacenter`](/docs/agent/config/agent-config-cli#_datacenter) | -| `-retry-join-azure-tag-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-azure-tag-value` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-ec2-region` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-ec2-tag-key` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-ec2-tag-value` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-gce-credentials-file` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-gce-project-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-gce-tag-name` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | -| `-retry-join-gce-zone-pattern` | [`-retry-join`](/docs/agent/config/agent-config-cli#_retry_join) | +| `-dc` | [`-datacenter`](/docs/agent/config/cli-flags#_datacenter) | +| `-retry-join-azure-tag-name` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-azure-tag-value` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-ec2-region` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-ec2-tag-key` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-ec2-tag-value` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-gce-credentials-file` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-gce-project-name` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-gce-tag-name` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | +| `-retry-join-gce-zone-pattern` | [`-retry-join`](/docs/agent/config/cli-flags#_retry_join) | | `addresses.rpc` | None, the RPC server for CLI commands is no longer supported. | -| `advertise_addrs` | [`ports`](/docs/agent/config/agent-config-files#ports) with [`advertise_addr`](/docs/agent/config/agent-config-files#advertise_addr) and/or [`advertise_addr_wan`](/docs/agent/config/agent-config-files#advertise_addr_wan) | -| `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](/docs/agent/config/agent-config-files#telemetry-dogstatsd_addr) | -| `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](/docs/agent/config/agent-config-files#telemetry-dogstatsd_tags) | -| `http_api_response_headers` | [`http_config.response_headers`](/docs/agent/config/agent-config-files#response_headers) | +| `advertise_addrs` | [`ports`](/docs/agent/config/config-files#ports) with [`advertise_addr`](/docs/agent/config/config-files#advertise_addr) and/or [`advertise_addr_wan`](/docs/agent/config/config-files#advertise_addr_wan) | +| `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](/docs/agent/config/config-files#telemetry-dogstatsd_addr) | +| `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](/docs/agent/config/config-files#telemetry-dogstatsd_tags) | +| `http_api_response_headers` | [`http_config.response_headers`](/docs/agent/config/config-files#response_headers) | | `ports.rpc` | None, the RPC server for CLI commands is no longer supported. | -| `recursor` | [`recursors`](/docs/agent/config/agent-config-files#recursors) | -| `retry_join_azure` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | -| `retry_join_ec2` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | -| `retry_join_gce` | [`retry-join`](/docs/agent/config/agent-config-files#retry_join) | -| `statsd_addr` | [`telemetry.statsd_address`](/docs/agent/config/agent-config-files#telemetry-statsd_address) | -| `statsite_addr` | [`telemetry.statsite_address`](/docs/agent/config/agent-config-files#telemetry-statsite_address) | -| `statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix) | -| `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix) | +| `recursor` | [`recursors`](/docs/agent/config/config-files#recursors) | +| `retry_join_azure` | [`retry-join`](/docs/agent/config/config-files#retry_join) | +| `retry_join_ec2` | [`retry-join`](/docs/agent/config/config-files#retry_join) | +| `retry_join_gce` | [`retry-join`](/docs/agent/config/config-files#retry_join) | +| `statsd_addr` | [`telemetry.statsd_address`](/docs/agent/config/config-files#telemetry-statsd_address) | +| `statsite_addr` | [`telemetry.statsite_address`](/docs/agent/config/config-files#telemetry-statsite_address) | +| `statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/config-files#telemetry-metrics_prefix) | +| `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](/docs/agent/config/config-files#telemetry-metrics_prefix) | | (service definitions) `serviceid` | [`id`](/api-docs/agent/service#id) | | (service definitions) `dockercontainerid` | [`docker_container_id`](/api-docs/agent/check#dockercontainerid) | | (service definitions) `tlsskipverify` | [`tls_skip_verify`](/api-docs/agent/check#tlsskipverify) | @@ -648,7 +648,7 @@ upgrading. Here's the complete list of removed options and their equivalents: Since the `statsite_prefix` configuration option applied to all telemetry providers, `statsite_prefix` was renamed to -[`metrics_prefix`](/docs/agent/config/agent-config-files#telemetry-metrics_prefix). +[`metrics_prefix`](/docs/agent/config/config-files#telemetry-metrics_prefix). Configuration files will need to be updated when upgrading to this version of Consul. @@ -660,8 +660,8 @@ wrongly stated that you could configure both host and port. #### Escaping Behavior Changed for go-discover Configs -The format for [`-retry-join`](/docs/agent/config/agent-config-cli#retry-join) and -[`-retry-join-wan`](/docs/agent/config/agent-config-cli#retry-join-wan) values that use +The format for [`-retry-join`](/docs/agent/config/cli-flags#retry-join) and +[`-retry-join-wan`](/docs/agent/config/cli-flags#retry-join-wan) values that use [go-discover](https://github.com/hashicorp/go-discover) cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or @@ -779,7 +779,7 @@ invalid health checks would get skipped. #### Script Checks Are Now Opt-In -A new [`enable_script_checks`](/docs/agent/config/agent-config-cli#_enable_script_checks) +A new [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) configuration option was added, and defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box @@ -801,10 +801,10 @@ for more information. Consul releases will no longer include a `web_ui.zip` file with the compiled web assets. These have been built in to the Consul binary since the 0.7.x -series and can be enabled with the [`-ui`](/docs/agent/config/agent-config-cli#_ui) +series and can be enabled with the [`-ui`](/docs/agent/config/cli-flags#_ui) configuration option. These built-in web assets have always been identical to the contents of the `web_ui.zip` file for each release. The -[`-ui-dir`](/docs/agent/config/agent-config-cli#_ui_dir) option is still available for +[`-ui-dir`](/docs/agent/config/cli-flags#_ui_dir) option is still available for hosting customized versions of the web assets, but the vast majority of Consul users can just use the built in web assets. @@ -836,12 +836,12 @@ to the following commands: #### Version 8 ACLs Are Now Opt-Out -The [`acl_enforce_version_8`](/docs/agent/config/agent-config-files#acl_enforce_version_8) +The [`acl_enforce_version_8`](/docs/agent/config/config-files#acl_enforce_version_8) configuration now defaults to `true` to enable full version 8 ACL support by default. If you are upgrading an existing cluster with ACLs enabled, you will need to set this to `false` during the upgrade on **both Consul agents and Consul servers**. Version 8 ACLs were also changed so that -[`acl_datacenter`](/docs/agent/config/agent-config-files#acl_datacenter) must be set on +[`acl_datacenter`](/docs/agent/config/config-files#acl_datacenter) must be set on agents in order to enable the agent-side enforcement of ACLs. This makes for a smoother experience in clusters where ACLs aren't enabled at all, but where the agents would have to wait to contact a Consul server before learning that. @@ -849,14 +849,14 @@ agents would have to wait to contact a Consul server before learning that. #### Remote Exec Is Now Opt-In The default for -[`disable_remote_exec`](/docs/agent/config/agent-config-files#disable_remote_exec) was +[`disable_remote_exec`](/docs/agent/config/config-files#disable_remote_exec) was changed to "true", so now operators need to opt-in to having agents support running commands remotely via [`consul exec`](/commands/exec). #### Raft Protocol Version Compatibility When upgrading to Consul 0.8.0 from a version lower than 0.7.0, users will need -to set the [`-raft-protocol`](/docs/agent/config/agent-config-cli#_raft_protocol) option +to set the [`-raft-protocol`](/docs/agent/config/cli-flags#_raft_protocol) option to 1 in order to maintain backwards compatibility with the old servers during the upgrade. After the servers have been migrated to version 0.8.0, `-raft-protocol` can be moved up to 2 and the servers restarted to match the @@ -891,7 +891,7 @@ process to reap child processes. #### DNS Resiliency Defaults -The default for [`max_stale`](/docs/agent/config/agent-config-files#max_stale) has been +The default for [`max_stale`](/docs/agent/config/config-files#max_stale) has been increased from 5 seconds to a near-indefinite threshold (10 years) to allow DNS queries to continue to be served in the event of a long outage with no leader. A new telemetry counter was added at `consul.dns.stale_queries` to track when @@ -905,7 +905,7 @@ to be aware of during an upgrade are categorized below. #### Performance Timing Defaults and Tuning Consul 0.7 now defaults the DNS configuration to allow for stale queries by -defaulting [`allow_stale`](/docs/agent/config/agent-config-files#allow_stale) to true for +defaulting [`allow_stale`](/docs/agent/config/config-files#allow_stale) to true for better utilization of available servers. If you want to retain the previous behavior, set the following configuration: @@ -918,7 +918,7 @@ behavior, set the following configuration: ``` Consul also 0.7 introduced support for tuning Raft performance using a new -[performance configuration block](/docs/agent/config/agent-config-files#performance). Also, +[performance configuration block](/docs/agent/config/config-files#performance). Also, the default Raft timing is set to a lower-performance mode suitable for [minimal Consul servers](/docs/install/performance#minimum). @@ -938,8 +938,8 @@ See the [Server Performance](/docs/install/performance) guide for more details. #### Leave-Related Configuration Defaults -The default behavior of [`leave_on_terminate`](/docs/agent/config/agent-config-files#leave_on_terminate) -and [`skip_leave_on_interrupt`](/docs/agent/config/agent-config-files#skip_leave_on_interrupt) +The default behavior of [`leave_on_terminate`](/docs/agent/config/config-files#leave_on_terminate) +and [`skip_leave_on_interrupt`](/docs/agent/config/config-files#skip_leave_on_interrupt) are now dependent on whether or not the agent is acting as a server or client: - For servers, `leave_on_terminate` defaults to "false" and `skip_leave_on_interrupt` @@ -978,7 +978,7 @@ using this feature. #### WAN Address Translation in HTTP Endpoints Consul version 0.7 added support for translating WAN addresses in certain -[HTTP endpoints](/docs/agent/config/agent-config-files#translate_wan_addrs). The servers +[HTTP endpoints](/docs/agent/config/config-files#translate_wan_addrs). The servers and the agents need to be running version 0.7 or later in order to use this feature. @@ -1060,7 +1060,7 @@ which require it: } When the DNS interface is queried, the agent's -[`acl_token`](/docs/agent/config/agent-config-files#acl_token) is used, so be sure +[`acl_token`](/docs/agent/config/config-files#acl_token) is used, so be sure that token has sufficient privileges to return the DNS records you expect to retrieve from it. diff --git a/website/content/partials/http_api_options_client.mdx b/website/content/partials/http_api_options_client.mdx index 516579bba..472c250e6 100644 --- a/website/content/partials/http_api_options_client.mdx +++ b/website/content/partials/http_api_options_client.mdx @@ -20,7 +20,7 @@ used instead. The scheme can also be set to HTTPS by setting the environment variable `CONSUL_HTTP_SSL=true`. This may be a unix domain socket using `unix:///path/to/socket` if the [agent is configured to - listen](/docs/agent/config/agent-config-files#addresses) that way. + listen](/docs/agent/config/config-files#addresses) that way. - `-tls-server-name=` - The server name to use as the SNI host when connecting via TLS. This can also be specified via the `CONSUL_TLS_SERVER_NAME` diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index f23305dbb..1538aa9ac 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -895,11 +895,11 @@ }, { "title": "CLI Reference", - "path": "agent/config/agent-config-cli" + "path": "agent/config/cli-flags" }, { "title": "Configuration Reference", - "path": "agent/config/agent-config-files" + "path": "agent/config/config-files" } ] }, From cd17e98800b706b67eac21349763e19a4dee3e10 Mon Sep 17 00:00:00 2001 From: Natalie Smith Date: Mon, 10 Jan 2022 17:50:06 -0800 Subject: [PATCH 126/785] docs: fix yet more references to agent/options --- .../linux/package/etc/consul.d/consul.hcl | 2 +- CHANGELOG.md | 80 +++++++++---------- agent/config/runtime.go | 6 +- agent/consul/server.go | 2 +- agent/kvs_endpoint.go | 2 +- agent/txn_endpoint.go | 4 +- 6 files changed, 48 insertions(+), 48 deletions(-) diff --git a/.release/linux/package/etc/consul.d/consul.hcl b/.release/linux/package/etc/consul.d/consul.hcl index e1b8e6e19..a064e95af 100644 --- a/.release/linux/package/etc/consul.d/consul.hcl +++ b/.release/linux/package/etc/consul.d/consul.hcl @@ -1,4 +1,4 @@ -# Fullconfiguration options can be found at https://www.consul.io/docs/agent/options.html +# Full configuration options can be found at https://www.consul.io/docs/agent/config # datacenter # This flag controls the datacenter in which the agent is running. If not provided, diff --git a/CHANGELOG.md b/CHANGELOG.md index fd5776250..c9e452af0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -473,7 +473,7 @@ token. [[GH-10795](https://github.com/hashicorp/consul/issues/10795)] KNOWN ISSUES: -* The change to enable streaming by default uncovered an incompatibility between streaming and WAN federation over mesh gateways causing traffic to fall back to attempting a direct WAN connection rather than transiting through the gateways. We currently suggest explicitly setting [`use_streaming_backend=false`](https://www.consul.io/docs/agent/options#use_streaming_backend) if using WAN federation over mesh gateways when upgrading to 1.10.1 and are working to address this issue in a future patch release. +* The change to enable streaming by default uncovered an incompatibility between streaming and WAN federation over mesh gateways causing traffic to fall back to attempting a direct WAN connection rather than transiting through the gateways. We currently suggest explicitly setting [`use_streaming_backend=false`](https://www.consul.io/docs/agent/config/config-files#use_streaming_backend) if using WAN federation over mesh gateways when upgrading to 1.10.1 and are working to address this issue in a future patch release. SECURITY: @@ -1891,7 +1891,7 @@ FEATURES: * **Connect Envoy Supports L7 Routing:** Additional configuration entry types `service-router`, `service-resolver`, and `service-splitter`, allow for configuring Envoy sidecars to enable reliability and deployment patterns at L7 such as HTTP path-based routing, traffic shifting, and advanced failover capabilities. For more information see the [L7 traffic management](https://www.consul.io/docs/connect/l7-traffic-management.html) docs. * **Mesh Gateways:** Envoy can now be run as a gateway to route Connect traffic across datacenters using SNI headers, allowing connectivty across platforms and clouds and other complex network topologies. Read more in the [mesh gateway docs](https://www.consul.io/docs/connect/mesh_gateway.html). -* **Intention & CA Replication:** In order to enable connecitivty for services across datacenters, Connect intentions are now replicated and the Connect CA cross-signs from the [primary_datacenter](/docs/agent/options.html#primary_datacenter). This feature was previously part of Consul Enterprise. +* **Intention & CA Replication:** In order to enable connecitivty for services across datacenters, Connect intentions are now replicated and the Connect CA cross-signs from the [primary_datacenter](/docs/agent/config/config-files.html#primary_datacenter). This feature was previously part of Consul Enterprise. * agent: add `local-only` parameter to operator/keyring list requests to force queries to only hit local servers. [[GH-6279](https://github.com/hashicorp/consul/pull/6279)] * connect: expose an API endpoint to compile the discovery chain [[GH-6248](https://github.com/hashicorp/consul/issues/6248)] * connect: generate the full SNI names for discovery targets in the compiler rather than in the xds package [[GH-6340](https://github.com/hashicorp/consul/issues/6340)] @@ -2327,7 +2327,7 @@ FEATURES: IMPROVEMENTS: * proxy: With `-register` flag, heartbeat failures will only log once service registration succeeds. [[GH-4314](https://github.com/hashicorp/consul/pull/4314)] -* http: 1.0.3 introduced rejection of non-printable chars in HTTP URLs due to a security vulnerability. Some users who had keys written with an older version which are now dissallowed were unable to delete them. A new config option [disable_http_unprintable_char_filter](https://www.consul.io/docs/agent/options.html#disable_http_unprintable_char_filter) is added to allow those users to remove the offending keys. Leaving this new option set long term is strongly discouraged as it bypasses filtering necessary to prevent some known vulnerabilities. [[GH-4442](https://github.com/hashicorp/consul/pull/4442)] +* http: 1.0.3 introduced rejection of non-printable chars in HTTP URLs due to a security vulnerability. Some users who had keys written with an older version which are now dissallowed were unable to delete them. A new config option [disable_http_unprintable_char_filter](https://www.consul.io/docs/agent/config/config-files.html#disable_http_unprintable_char_filter) is added to allow those users to remove the offending keys. Leaving this new option set long term is strongly discouraged as it bypasses filtering necessary to prevent some known vulnerabilities. [[GH-4442](https://github.com/hashicorp/consul/pull/4442)] * agent: Allow for advanced configuration of some gossip related parameters. [[GH-4058](https://github.com/hashicorp/consul/issues/4058)] * agent: Make some Gossip tuneables configurable via the config file [[GH-4444](https://github.com/hashicorp/consul/pull/4444)] * ui: Included searching on `.Tags` when using the freetext search field. [[GH-4383](https://github.com/hashicorp/consul/pull/4383)] @@ -2559,13 +2559,13 @@ IMPROVEMENTS: * agent: (Consul Enterprise) Added [AWS KMS support](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) for S3 snapshots using the snapshot agent. * agent: Watches in the Consul agent can now be configured to invoke an HTTP endpoint instead of an executable. [[GH-3305](https://github.com/hashicorp/consul/issues/3305)] -* agent: Added a new [`-config-format`](https://www.consul.io/docs/agent/options.html#_config_format) command line option which can be set to `hcl` or `json` to specify the format of configuration files. This is useful for cases where the file name cannot be controlled in order to provide the required extension. [[GH-3620](https://github.com/hashicorp/consul/issues/3620)] +* agent: Added a new [`-config-format`](https://www.consul.io/docs/agent/config/cli-flags.html#_config_format) command line option which can be set to `hcl` or `json` to specify the format of configuration files. This is useful for cases where the file name cannot be controlled in order to provide the required extension. [[GH-3620](https://github.com/hashicorp/consul/issues/3620)] * agent: DNS recursors can now be specified as [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) templates. [[GH-2932](https://github.com/hashicorp/consul/issues/2932)] * agent: Serf snapshots no longer save network coordinate information. This enables recovery from errors upon agent restart. [[GH-489](https://github.com/hashicorp/serf/issues/489)] * agent: Added defensive code to prevent out of range ping times from infecting network coordinates. Updates to the coordinate system with negative round trip times or round trip times higher than 10 seconds will log an error but will be ignored. * agent: The agent now warns when there are extra unparsed command line arguments and refuses to start. [[GH-3397](https://github.com/hashicorp/consul/issues/3397)] * agent: Updated go-sockaddr library to get CoreOS route detection fixes and the new `mask` functionality. [[GH-3633](https://github.com/hashicorp/consul/issues/3633)] -* agent: Added a new [`enable_agent_tls_for_checks`](https://www.consul.io/docs/agent/options.html#enable_agent_tls_for_checks) configuration option that allows HTTP health checks for services requiring 2-way TLS to be checked using the agent's credentials. [[GH-3364](https://github.com/hashicorp/consul/issues/3364)] +* agent: Added a new [`enable_agent_tls_for_checks`](https://www.consul.io/docs/agent/config/config-files.html#enable_agent_tls_for_checks) configuration option that allows HTTP health checks for services requiring 2-way TLS to be checked using the agent's credentials. [[GH-3364](https://github.com/hashicorp/consul/issues/3364)] * agent: Made logging of health check status more uniform and moved log entries with full check output from DEBUG to TRACE level for less noise. [[GH-3683](https://github.com/hashicorp/consul/issues/3683)] * build: Consul is now built with Go 1.9.2. [[GH-3663](https://github.com/hashicorp/consul/issues/3663)] @@ -2590,8 +2590,8 @@ SECURITY: BREAKING CHANGES: -* **Raft Protocol Now Defaults to 3:** The [`-raft-protocol`](https://www.consul.io/docs/agent/options.html#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://www.consul.io/docs/guides/autopilot.html) features by default. Version 3 requires Consul running 0.8.0 or newer on all servers in order to work, so if you are upgrading with older servers in a cluster then you will need to set this back to 2 in order to upgrade. See [Raft Protocol Version Compatibility](https://www.consul.io/docs/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the lastest Raft protocol. See [Manual Recovery Using peers.json](https://www.consul.io/docs/guides/outage.html#manual-recovery-using-peers-json) for a description of the required format. [[GH-3477](https://github.com/hashicorp/consul/issues/3477)] -* **Config Files Require an Extension:** As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the [`-config-file`](https://www.consul.io/docs/agent/options.html#_config_file) argument to specify a file directly. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] +* **Raft Protocol Now Defaults to 3:** The [`-raft-protocol`](https://www.consul.io/docs/agent/config/cli-flags.html#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://www.consul.io/docs/guides/autopilot.html) features by default. Version 3 requires Consul running 0.8.0 or newer on all servers in order to work, so if you are upgrading with older servers in a cluster then you will need to set this back to 2 in order to upgrade. See [Raft Protocol Version Compatibility](https://www.consul.io/docs/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the lastest Raft protocol. See [Manual Recovery Using peers.json](https://www.consul.io/docs/guides/outage.html#manual-recovery-using-peers-json) for a description of the required format. [[GH-3477](https://github.com/hashicorp/consul/issues/3477)] +* **Config Files Require an Extension:** As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the [`-config-file`](https://www.consul.io/docs/agent/config/cli-flags.html#_config_file) argument to specify a file directly. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] * **Deprecated Options Have Been Removed:** All of Consul's previously deprecated command line flags and config options have been removed, so these will need to be mapped to their equivalents before upgrading. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)]
Detailed List of Removed Options and their Equivalents @@ -2602,35 +2602,35 @@ BREAKING CHANGES: | `-atlas-token`| None, Atlas is no longer supported. | | `-atlas-join` | None, Atlas is no longer supported. | | `-atlas-endpoint` | None, Atlas is no longer supported. | - | `-dc` | [`-datacenter`](https://www.consul.io/docs/agent/options.html#_datacenter) | - | `-retry-join-azure-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#microsoft-azure) | - | `-retry-join-azure-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#microsoft-azure) | - | `-retry-join-ec2-region` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#amazon-ec2) | - | `-retry-join-ec2-tag-key` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#amazon-ec2) | - | `-retry-join-ec2-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#amazon-ec2) | - | `-retry-join-gce-credentials-file` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#google-compute-engine) | - | `-retry-join-gce-project-name` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#google-compute-engine) | - | `-retry-join-gce-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#google-compute-engine) | - | `-retry-join-gce-zone-pattern` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#google-compute-engine) | + | `-dc` | [`-datacenter`](https://www.consul.io/docs/agent/config/cli-flags.html#_datacenter) | + | `-retry-join-azure-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-azure-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-ec2-region` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-ec2-tag-key` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-ec2-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-gce-credentials-file` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-gce-project-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-gce-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-retry-join-gce-zone-pattern` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | | `addresses.rpc` | None, the RPC server for CLI commands is no longer supported. | - | `advertise_addrs` | [`ports`](https://www.consul.io/docs/agent/options.html#ports) with [`advertise_addr`](https://www.consul/io/docs/agent/options.html#advertise_addr) and/or [`advertise_addr_wan`](https://www.consul.io/docs/agent/options.html#advertise_addr_wan) | + | `advertise_addrs` | [`ports`](https://www.consul.io/docs/agent/config/config-files.html#ports) with [`advertise_addr`](https://www.consul/io/docs/agent/config/config-files.html#advertise_addr) and/or [`advertise_addr_wan`](https://www.consul.io/docs/agent/config/config-files.html#advertise_addr_wan) | | `atlas_infrastructure` | None, Atlas is no longer supported. | | `atlas_token` | None, Atlas is no longer supported. | | `atlas_acl_token` | None, Atlas is no longer supported. | | `atlas_join` | None, Atlas is no longer supported. | | `atlas_endpoint` | None, Atlas is no longer supported. | - | `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](https://www.consul.io/docs/agent/options.html#telemetry-dogstatsd_addr) | - | `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](https://www.consul.io/docs/agent/options.html#telemetry-dogstatsd_tags) | - | `http_api_response_headers` | [`http_config.response_headers`](https://www.consul.io/docs/agent/options.html#response_headers) | + | `dogstatsd_addr` | [`telemetry.dogstatsd_addr`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-dogstatsd_addr) | + | `dogstatsd_tags` | [`telemetry.dogstatsd_tags`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-dogstatsd_tags) | + | `http_api_response_headers` | [`http_config.response_headers`](https://www.consul.io/docs/agent/config/config-files.html#response_headers) | | `ports.rpc` | None, the RPC server for CLI commands is no longer supported. | - | `recursor` | [`recursors`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/options.html.md#recursors) | - | `retry_join_azure` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#microsoft-azure) | - | `retry_join_ec2` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#amazon-ec2) | - | `retry_join_gce` | [`-retry-join`](https://www.consul.io/docs/agent/options.html#google-compute-engine) | - | `statsd_addr` | [`telemetry.statsd_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/options.html.md#telemetry-statsd_address) | - | `statsite_addr` | [`telemetry.statsite_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/options.html.md#telemetry-statsite_address) | - | `statsite_prefix` | [`telemetry.metrics_prefix`](https://www.consul.io/docs/agent/options.html#telemetry-metrics_prefix) | - | `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](https://www.consul.io/docs/agent/options.html#telemetry-metrics_prefix) | + | `recursor` | [`recursors`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#recursors) | + | `retry_join_azure` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `retry_join_ec2` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `retry_join_gce` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `statsd_addr` | [`telemetry.statsd_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#telemetry-statsd_address) | + | `statsite_addr` | [`telemetry.statsite_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#telemetry-statsite_address) | + | `statsite_prefix` | [`telemetry.metrics_prefix`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-metrics_prefix) | + | `telemetry.statsite_prefix` | [`telemetry.metrics_prefix`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-metrics_prefix) | | (service definitions) `serviceid` | [`service_id`](https://www.consul.io/docs/agent/services.html) | | (service definitions) `dockercontainerid` | [`docker_container_id`](https://www.consul.io/docs/agent/services.html) | | (service definitions) `tlsskipverify` | [`tls_skip_verify`](https://www.consul.io/docs/agent/services.html) | @@ -2638,9 +2638,9 @@ BREAKING CHANGES:
-* **`statsite_prefix` Renamed to `metrics_prefix`:** Since the `statsite_prefix` configuration option applied to all telemetry providers, `statsite_prefix` was renamed to [`metrics_prefix`](https://www.consul.io/docs/agent/options.html#telemetry-metrics_prefix). Configuration files will need to be updated when upgrading to this version of Consul. [[GH-3498](https://github.com/hashicorp/consul/issues/3498)] +* **`statsite_prefix` Renamed to `metrics_prefix`:** Since the `statsite_prefix` configuration option applied to all telemetry providers, `statsite_prefix` was renamed to [`metrics_prefix`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-metrics_prefix). Configuration files will need to be updated when upgrading to this version of Consul. [[GH-3498](https://github.com/hashicorp/consul/issues/3498)] * **`advertise_addrs` Removed:** This configuration option was removed since it was redundant with `advertise_addr` and `advertise_addr_wan` in combination with `ports` and also wrongly stated that you could configure both host and port. [[GH-3516](https://github.com/hashicorp/consul/issues/3516)] -* **Escaping Behavior Changed for go-discover Configs:** The format for [`-retry-join`](https://www.consul.io/docs/agent/options.html#retry-join) and [`-retry-join-wan`](https://www.consul.io/docs/agent/options.html#retry-join-wan) values that use [go-discover](https://github.com/hashicorp/go-discover) Cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or double quotes `"`. If values contain these characters then use double quotes as in `"some key"="some value"`. Special characters within a double quoted string can be escaped with a backslash `\`. [[GH-3417](https://github.com/hashicorp/consul/issues/3417)] +* **Escaping Behavior Changed for go-discover Configs:** The format for [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) and [`-retry-join-wan`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join_wan) values that use [go-discover](https://github.com/hashicorp/go-discover) Cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or double quotes `"`. If values contain these characters then use double quotes as in `"some key"="some value"`. Special characters within a double quoted string can be escaped with a backslash `\`. [[GH-3417](https://github.com/hashicorp/consul/issues/3417)] * **HTTP Verbs are Enforced in Many HTTP APIs:** Many endpoints in the HTTP API that previously took any HTTP verb now check for specific HTTP verbs and enforce them. This may break clients relying on the old behavior. [[GH-3405](https://github.com/hashicorp/consul/issues/3405)]
Detailed List of Updated Endpoints and Required HTTP Verbs @@ -2721,7 +2721,7 @@ BREAKING CHANGES: FEATURES: * **Support for HCL Config Files:** Consul now supports HashiCorp's [HCL](https://github.com/hashicorp/hcl#syntax) format for config files. This is easier to work with than JSON and supports comments. As part of this change, all config files will need to have either an `.hcl` or `.json` extension in order to specify their format. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] -* **Support for Binding to Multiple Addresses:** Consul now supports binding to multiple addresses for its HTTP, HTTPS, and DNS services. You can provide a space-separated list of addresses to [`-client`](https://www.consul.io/docs/agent/options.html#_client) and [`addresses`](https://www.consul.io/docs/agent/options.html#addresses) configurations, or specify a [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) template that resolves to multiple addresses. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] +* **Support for Binding to Multiple Addresses:** Consul now supports binding to multiple addresses for its HTTP, HTTPS, and DNS services. You can provide a space-separated list of addresses to [`-client`](https://www.consul.io/docs/agent/config/cli-flags.html#_client) and [`addresses`](https://www.consul.io/docs/agent/config/config-files.html#addresses) configurations, or specify a [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) template that resolves to multiple addresses. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] * **Support for RFC1464 DNS TXT records:** Consul DNS responses now contain the node meta data encoded according to RFC1464 as TXT records. [[GH-3343](https://github.com/hashicorp/consul/issues/3343)] * **Support for Running Subproccesses Directly Without a Shell:** Consul agent checks and watches now support an `args` configuration which is a list of arguments to run for the subprocess, which runs the subprocess directly without a shell. The old `script` and `handler` configurations are now deprecated (specify a shell explicitly if you require one). A `-shell=false` option is also available on `consul lock`, `consul watch`, and `consul exec` to run the subprocesses associated with those without a shell. [[GH-3509](https://github.com/hashicorp/consul/issues/3509)] * **Sentinel Integration:** (Consul Enterprise) Consul's ACL system integrates with [Sentinel](https://www.consul.io/docs/guides/sentinel.html) to enable code policies that apply to KV writes. @@ -2732,7 +2732,7 @@ IMPROVEMENTS: * agent: Improved /v1/operator/raft/configuration endpoint which allows Consul to avoid an extra agent RPC call for the `consul operator raft list-peers` command. [[GH-3449](https://github.com/hashicorp/consul/issues/3449)] * agent: Improved ACL system for the KV store to support list permissions. This behavior can be opted in. For more information, see the [ACL Guide](https://www.consul.io/docs/guides/acl.html#list-policy-for-keys). [[GH-3511](https://github.com/hashicorp/consul/issues/3511)] * agent: Updates miekg/dns library to later version to pick up bug fixes and improvements. [[GH-3547](https://github.com/hashicorp/consul/issues/3547)] -* agent: Added automatic retries to the RPC path, and a brief RPC drain time when servers leave. These changes make Consul more robust during graceful leaves of Consul servers, such as during upgrades, and help shield applications from "no leader" errors. These are configured with new [`performance`](https://www.consul.io/docs/agent/options.html#performance) options. [[GH-3514](https://github.com/hashicorp/consul/issues/3514)] +* agent: Added automatic retries to the RPC path, and a brief RPC drain time when servers leave. These changes make Consul more robust during graceful leaves of Consul servers, such as during upgrades, and help shield applications from "no leader" errors. These are configured with new [`performance`](https://www.consul.io/docs/agent/config/config-files.html#performance) options. [[GH-3514](https://github.com/hashicorp/consul/issues/3514)] * agent: Added a new `discard_check_output` agent-level configuration option that can be used to trade off write load to the Consul servers vs. visibility of health check output. This is reloadable so it can be toggled without fully restarting the agent. [[GH-3562](https://github.com/hashicorp/consul/issues/3562)] * api: Updated the API client to ride out network errors when monitoring locks and semaphores. [[GH-3553](https://github.com/hashicorp/consul/issues/3553)] * build: Updated Go toolchain to version 1.9.1. [[GH-3537](https://github.com/hashicorp/consul/issues/3537)] @@ -2760,7 +2760,7 @@ SECURITY: FEATURES: * **LAN Network Segments:** (Consul Enterprise) Added a new [Network Segments](https://www.consul.io/docs/guides/segments.html) capability which allows users to configure Consul to support segmented LAN topologies with multiple, distinct gossip pools. [[GH-3431](https://github.com/hashicorp/consul/issues/3431)] * **WAN Join for Cloud Providers:** Added WAN support for retry join for Cloud providers via go-discover, including Amazon AWS, Microsoft Azure, Google Cloud, and SoftLayer. This uses the same "provider" syntax supported for `-retry-join` via the `-retry-join-wan` configuration. [[GH-3406](https://github.com/hashicorp/consul/issues/3406)] -* **RPC Rate Limiter:** Consul agents in client mode have a new [`limits`](https://www.consul.io/docs/agent/options.html#limits) configuration that enables a rate limit on RPC calls the agent makes to Consul servers. [[GH-3140](https://github.com/hashicorp/consul/issues/3140)] +* **RPC Rate Limiter:** Consul agents in client mode have a new [`limits`](https://www.consul.io/docs/agent/config/config-files.html#limits) configuration that enables a rate limit on RPC calls the agent makes to Consul servers. [[GH-3140](https://github.com/hashicorp/consul/issues/3140)] IMPROVEMENTS: @@ -2790,13 +2790,13 @@ BUG FIXES: FEATURES: * **Secure ACL Token Introduction:** It's now possible to manage Consul's ACL tokens without having to place any tokens inside configuration files. This supports introduction of tokens as well as rotating. This is enabled with two new APIs: - * A new [`/v1/agent/token`](https://www.consul.io/api/agent.html#update-acl-tokens) API allows an agent's ACL tokens to be introduced without placing them into config files, and to update them without restarting the agent. See the [ACL Guide](https://www.consul.io/docs/guides/acl.html#create-an-agent-token) for an example. This was extended to ACL replication as well, along with a new [`enable_acl_replication`](https://www.consul.io/docs/agent/options.html#enable_acl_replication) config option. [GH-3324,GH-3357] + * A new [`/v1/agent/token`](https://www.consul.io/api/agent.html#update-acl-tokens) API allows an agent's ACL tokens to be introduced without placing them into config files, and to update them without restarting the agent. See the [ACL Guide](https://www.consul.io/docs/guides/acl.html#create-an-agent-token) for an example. This was extended to ACL replication as well, along with a new [`enable_acl_replication`](https://www.consul.io/docs/agent/config/config-files.html#enable_acl_replication) config option. [GH-3324,GH-3357] * A new [`/v1/acl/bootstrap`](https://www.consul.io/api/acl.html#bootstrap-acls) allows a cluster's first management token to be created without using the `acl_master_token` configuration. See the [ACL Guide](https://www.consul.io/docs/guides/acl.html#bootstrapping-acls) for an example. [[GH-3349](https://github.com/hashicorp/consul/issues/3349)] * **Metrics Viewing Endpoint:** A new [`/v1/agent/metrics`](https://www.consul.io/api/agent.html#view-metrics) API displays the current values of internally tracked metrics. [[GH-3369](https://github.com/hashicorp/consul/issues/3369)] IMPROVEMENTS: -* agent: Retry Join for Amazon AWS, Microsoft Azure, Google Cloud, and (new) SoftLayer is now handled through the https://github.com/hashicorp/go-discover library. With this all `-retry-join-{ec2,azure,gce}-*` parameters have been deprecated in favor of a unified configuration. See [`-retry-join`](https://www.consul.io/docs/agent/options.html#_retry_join) for details. [GH-3282,GH-3351] +* agent: Retry Join for Amazon AWS, Microsoft Azure, Google Cloud, and (new) SoftLayer is now handled through the https://github.com/hashicorp/go-discover library. With this all `-retry-join-{ec2,azure,gce}-*` parameters have been deprecated in favor of a unified configuration. See [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) for details. [GH-3282,GH-3351] * agent: Reports a more detailed error message if the LAN or WAN Serf instance fails to bind to an address. [[GH-3312](https://github.com/hashicorp/consul/issues/3312)] * agent: Added NS records and corrected SOA records to allow Consul's DNS interface to work properly with zone delegation. [[GH-1301](https://github.com/hashicorp/consul/issues/1301)] * agent: Added support for sending metrics with labels/tags to supported backends. [[GH-3369](https://github.com/hashicorp/consul/issues/3369)] @@ -2820,13 +2820,13 @@ BUG FIXES: BREAKING CHANGES: -* agent: Added a new [`enable_script_checks`](https://www.consul.io/docs/agent/options.html#_enable_script_checks) configuration option that defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box configuration for Consul where operators must opt-in to allow script-based health checks. [[GH-3087](https://github.com/hashicorp/consul/issues/3087)] +* agent: Added a new [`enable_script_checks`](https://www.consul.io/docs/agent/config/cli-flags.html#_enable_script_checks) configuration option that defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box configuration for Consul where operators must opt-in to allow script-based health checks. [[GH-3087](https://github.com/hashicorp/consul/issues/3087)] * api: Reworked `context` support in the API client to more closely match the Go standard library, and added context support to write requests in addition to read requests. [GH-3273, GH-2992] * ui: Since the UI is now bundled with the application we no longer provide a separate UI package for downloading. [[GH-3292](https://github.com/hashicorp/consul/issues/3292)] FEATURES: -* agent: Added a new [`block_endpoints`](https://www.consul.io/docs/agent/options.html#block_endpoints) configuration option that allows blocking HTTP API endpoints by prefix. This allows operators to completely disallow access to specific endpoints on a given agent. [[GH-3252](https://github.com/hashicorp/consul/issues/3252)] +* agent: Added a new [`block_endpoints`](https://www.consul.io/docs/agent/config/config-files.html#block_endpoints) configuration option that allows blocking HTTP API endpoints by prefix. This allows operators to completely disallow access to specific endpoints on a given agent. [[GH-3252](https://github.com/hashicorp/consul/issues/3252)] * cli: Added a new [`consul catalog`](https://www.consul.io/docs/commands/catalog.html) command for reading datacenters, nodes, and services from the catalog. [[GH-3204](https://github.com/hashicorp/consul/issues/3204)] * server: (Consul Enterprise) Added a new [`consul operator area update`](https://www.consul.io/docs/commands/operator/area.html#update) command and corresponding HTTP endpoint to allow for transitioning the TLS setting of network areas at runtime. [[GH-3075](https://github.com/hashicorp/consul/issues/3075)] * server: (Consul Enterprise) Added a new `UpgradeVersionTag` field to the Autopilot config to allow for using the migration feature to roll out configuration or cluster changes, without having to upgrade Consul itself. @@ -2854,7 +2854,7 @@ BUG FIXES: * agent: Fixed an issue in the Docker client where Docker checks would get EOF errors trying to connect to a volume-mounted Docker socket. [[GH-3254](https://github.com/hashicorp/consul/issues/3254)] * agent: Fixed a crash when using Azure auto discovery. [[GH-3193](https://github.com/hashicorp/consul/issues/3193)] * agent: Added `node` read privileges to the `acl_agent_master_token` by default so it can see all nodes, which enables it to be used with operations like `consul members`. [[GH-3113](https://github.com/hashicorp/consul/issues/3113)] -* agent: Fixed an issue where enabling [`-disable-keyring-file`](https://www.consul.io/docs/agent/options.html#_disable_keyring_file) would cause gossip encryption to be disabled. [[GH-3243](https://github.com/hashicorp/consul/issues/3243)] +* agent: Fixed an issue where enabling [`-disable-keyring-file`](https://www.consul.io/docs/agent/config/cli-flags.html#_disable_keyring_file) would cause gossip encryption to be disabled. [[GH-3243](https://github.com/hashicorp/consul/issues/3243)] * agent: Fixed a race condition where checks that are not associated with any existing services were allowed to persist. [[GH-3297](https://github.com/hashicorp/consul/issues/3297)] * agent: Stop docker checks on service deregistration and on shutdown. [GH-3265, GH-3295] * server: Updated the Raft library to pull in a fix where servers that are very far behind in replication can get stuck in a loop trying to install snapshots. [[GH-3201](https://github.com/hashicorp/consul/issues/3201)] @@ -2871,7 +2871,7 @@ BUG FIXES: BREAKING CHANGES: * agent: Parse values given to `?passing` for health endpoints. Previously Consul only checked for the existence of the querystring, not the value. That means using `?passing=false` would actually still include passing values. Consul now parses the value given to passing as a boolean. If no value is provided, the old behavior remains. This may be a breaking change for some users, but the old experience was incorrect and caused enough confusion to warrant changing it. [GH-2212, GH-3136] -* agent: The default value of [`-disable-host-node-id`](https://www.consul.io/docs/agent/options.html#_disable_host_node_id) has been changed from false to true. This means you need to opt-in to host-based node IDs and by default Consul will generate a random node ID. A high number of users struggled to deploy newer versions of Consul with host-based IDs because of various edge cases of how the host IDs work in Docker, on specially-provisioned machines, etc. so changing this from opt-out to opt-in will ease operations for many Consul users. [[GH-3171](https://github.com/hashicorp/consul/issues/3171)] +* agent: The default value of [`-disable-host-node-id`](https://www.consul.io/docs/agent/config/cli-flags.html#_disable_host_node_id) has been changed from false to true. This means you need to opt-in to host-based node IDs and by default Consul will generate a random node ID. A high number of users struggled to deploy newer versions of Consul with host-based IDs because of various edge cases of how the host IDs work in Docker, on specially-provisioned machines, etc. so changing this from opt-out to opt-in will ease operations for many Consul users. [[GH-3171](https://github.com/hashicorp/consul/issues/3171)] IMPROVEMENTS: diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 241f7ca53..548bfe78f 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -822,7 +822,7 @@ type RuntimeConfig struct { // PrimaryGateways is a list of addresses and/or go-discover expressions to // discovery the mesh gateways in the primary datacenter. See - // https://www.consul.io/docs/agent/options.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for // details. // // hcl: primary_gateways = []string @@ -978,7 +978,7 @@ type RuntimeConfig struct { // RetryJoinLAN is a list of addresses and/or go-discover expressions to // join with retry enabled. See - // https://www.consul.io/docs/agent/options.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for // details. // // hcl: retry_join = []string @@ -1003,7 +1003,7 @@ type RuntimeConfig struct { // RetryJoinWAN is a list of addresses and/or go-discover expressions to // join -wan with retry enabled. See - // https://www.consul.io/docs/agent/options.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for // details. // // hcl: retry_join_wan = []string diff --git a/agent/consul/server.go b/agent/consul/server.go index a3effba97..fb9255f54 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -1591,7 +1591,7 @@ const peersInfoContent = ` As of Consul 0.7.0, the peers.json file is only used for recovery after an outage. The format of this file depends on what the server has configured for its Raft protocol version. Please see the agent configuration -page at https://www.consul.io/docs/agent/options.html#_raft_protocol for more +page at https://www.consul.io/docs/agent/config/cli-flags.html#_raft_protocol for more details about this parameter. For Raft protocol version 2 and earlier, this should be formatted as a JSON diff --git a/agent/kvs_endpoint.go b/agent/kvs_endpoint.go index 4b8cc3348..85273aa8e 100644 --- a/agent/kvs_endpoint.go +++ b/agent/kvs_endpoint.go @@ -210,7 +210,7 @@ func (s *HTTPHandlers) KVSPut(resp http.ResponseWriter, req *http.Request, args if req.ContentLength > int64(s.agent.config.KVMaxValueSize) { return nil, EntityTooLargeError{ Reason: fmt.Sprintf("Request body(%d bytes) too large, max size: %d bytes. See %s.", - req.ContentLength, s.agent.config.KVMaxValueSize, "https://www.consul.io/docs/agent/options.html#kv_max_value_size"), + req.ContentLength, s.agent.config.KVMaxValueSize, "https://www.consul.io/docs/agent/config/config-files#kv_max_value_size"), } } diff --git a/agent/txn_endpoint.go b/agent/txn_endpoint.go index 54338c86b..c75d30bcc 100644 --- a/agent/txn_endpoint.go +++ b/agent/txn_endpoint.go @@ -90,7 +90,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( if req.ContentLength > maxTxnLen { return nil, 0, EntityTooLargeError{ Reason: fmt.Sprintf("Request body(%d bytes) too large, max size: %d bytes. See %s.", - req.ContentLength, maxTxnLen, "https://www.consul.io/docs/agent/options.html#txn_max_req_len"), + req.ContentLength, maxTxnLen, "https://www.consul.io/docs/agent/config/config-files#txn_max_req_len"), } } @@ -102,7 +102,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( // if the Content-Length header was not set by the client. return nil, 0, EntityTooLargeError{ Reason: fmt.Sprintf("Request body too large, max size: %d bytes. See %s.", - maxTxnLen, "https://www.consul.io/docs/agent/options.html#txn_max_req_len"), + maxTxnLen, "https://www.consul.io/docs/agent/config/config-files#txn_max_req_len"), } } else { // Note the body is in API format, and not the RPC format. If we can't From 3175bf6b1bcb3db245f0ca91fd3be433e313f219 Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Tue, 15 Mar 2022 18:10:49 -0700 Subject: [PATCH 127/785] Remove .html extensions from docs URLs --- CHANGELOG.md | 46 ++++++++++++++++++++--------------------- agent/config/runtime.go | 6 +++--- agent/consul/server.go | 2 +- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9e452af0..b4188590d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1241,7 +1241,7 @@ BUG FIXES: * server: When wan federating via mesh gateways only do heuristic primary DC bypass on the leader. [[GH-9366](https://github.com/hashicorp/consul/issues/9366)] * xds: deduplicate mesh gateway listeners by address in a stable way to prevent some LDS churn [[GH-9650](https://github.com/hashicorp/consul/issues/9650)] * xds: prevent LDS flaps in mesh gateways due to unstable datacenter lists; also prevent some flaps in terminating gateways as well [[GH-9651](https://github.com/hashicorp/consul/issues/9651)] -* +* ## 1.8.8 (January 22, 2021) BUG FIXES: @@ -2559,7 +2559,7 @@ IMPROVEMENTS: * agent: (Consul Enterprise) Added [AWS KMS support](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) for S3 snapshots using the snapshot agent. * agent: Watches in the Consul agent can now be configured to invoke an HTTP endpoint instead of an executable. [[GH-3305](https://github.com/hashicorp/consul/issues/3305)] -* agent: Added a new [`-config-format`](https://www.consul.io/docs/agent/config/cli-flags.html#_config_format) command line option which can be set to `hcl` or `json` to specify the format of configuration files. This is useful for cases where the file name cannot be controlled in order to provide the required extension. [[GH-3620](https://github.com/hashicorp/consul/issues/3620)] +* agent: Added a new [`-config-format`](https://www.consul.io/docs/agent/config/cli-flags#_config_format) command line option which can be set to `hcl` or `json` to specify the format of configuration files. This is useful for cases where the file name cannot be controlled in order to provide the required extension. [[GH-3620](https://github.com/hashicorp/consul/issues/3620)] * agent: DNS recursors can now be specified as [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) templates. [[GH-2932](https://github.com/hashicorp/consul/issues/2932)] * agent: Serf snapshots no longer save network coordinate information. This enables recovery from errors upon agent restart. [[GH-489](https://github.com/hashicorp/serf/issues/489)] * agent: Added defensive code to prevent out of range ping times from infecting network coordinates. Updates to the coordinate system with negative round trip times or round trip times higher than 10 seconds will log an error but will be ignored. @@ -2590,8 +2590,8 @@ SECURITY: BREAKING CHANGES: -* **Raft Protocol Now Defaults to 3:** The [`-raft-protocol`](https://www.consul.io/docs/agent/config/cli-flags.html#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://www.consul.io/docs/guides/autopilot.html) features by default. Version 3 requires Consul running 0.8.0 or newer on all servers in order to work, so if you are upgrading with older servers in a cluster then you will need to set this back to 2 in order to upgrade. See [Raft Protocol Version Compatibility](https://www.consul.io/docs/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the lastest Raft protocol. See [Manual Recovery Using peers.json](https://www.consul.io/docs/guides/outage.html#manual-recovery-using-peers-json) for a description of the required format. [[GH-3477](https://github.com/hashicorp/consul/issues/3477)] -* **Config Files Require an Extension:** As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the [`-config-file`](https://www.consul.io/docs/agent/config/cli-flags.html#_config_file) argument to specify a file directly. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] +* **Raft Protocol Now Defaults to 3:** The [`-raft-protocol`](https://www.consul.io/docs/agent/config/cli-flags#_raft_protocol) default has been changed from 2 to 3, enabling all [Autopilot](https://www.consul.io/docs/guides/autopilot.html) features by default. Version 3 requires Consul running 0.8.0 or newer on all servers in order to work, so if you are upgrading with older servers in a cluster then you will need to set this back to 2 in order to upgrade. See [Raft Protocol Version Compatibility](https://www.consul.io/docs/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the lastest Raft protocol. See [Manual Recovery Using peers.json](https://www.consul.io/docs/guides/outage.html#manual-recovery-using-peers-json) for a description of the required format. [[GH-3477](https://github.com/hashicorp/consul/issues/3477)] +* **Config Files Require an Extension:** As part of supporting the [HCL](https://github.com/hashicorp/hcl#syntax) format for Consul's config files, an `.hcl` or `.json` extension is required for all config files loaded by Consul, even when using the [`-config-file`](https://www.consul.io/docs/agent/config/cli-flags#_config_file) argument to specify a file directly. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] * **Deprecated Options Have Been Removed:** All of Consul's previously deprecated command line flags and config options have been removed, so these will need to be mapped to their equivalents before upgrading. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)]
Detailed List of Removed Options and their Equivalents @@ -2602,16 +2602,16 @@ BREAKING CHANGES: | `-atlas-token`| None, Atlas is no longer supported. | | `-atlas-join` | None, Atlas is no longer supported. | | `-atlas-endpoint` | None, Atlas is no longer supported. | - | `-dc` | [`-datacenter`](https://www.consul.io/docs/agent/config/cli-flags.html#_datacenter) | - | `-retry-join-azure-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-azure-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-ec2-region` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-ec2-tag-key` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-ec2-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-gce-credentials-file` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-gce-project-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-gce-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `-retry-join-gce-zone-pattern` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `-dc` | [`-datacenter`](https://www.consul.io/docs/agent/config/cli-flags#_datacenter) | + | `-retry-join-azure-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-azure-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-ec2-region` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-ec2-tag-key` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-ec2-tag-value` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-gce-credentials-file` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-gce-project-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-gce-tag-name` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `-retry-join-gce-zone-pattern` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | | `addresses.rpc` | None, the RPC server for CLI commands is no longer supported. | | `advertise_addrs` | [`ports`](https://www.consul.io/docs/agent/config/config-files.html#ports) with [`advertise_addr`](https://www.consul/io/docs/agent/config/config-files.html#advertise_addr) and/or [`advertise_addr_wan`](https://www.consul.io/docs/agent/config/config-files.html#advertise_addr_wan) | | `atlas_infrastructure` | None, Atlas is no longer supported. | @@ -2624,9 +2624,9 @@ BREAKING CHANGES: | `http_api_response_headers` | [`http_config.response_headers`](https://www.consul.io/docs/agent/config/config-files.html#response_headers) | | `ports.rpc` | None, the RPC server for CLI commands is no longer supported. | | `recursor` | [`recursors`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#recursors) | - | `retry_join_azure` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `retry_join_ec2` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | - | `retry_join_gce` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) | + | `retry_join_azure` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `retry_join_ec2` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | + | `retry_join_gce` | [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) | | `statsd_addr` | [`telemetry.statsd_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#telemetry-statsd_address) | | `statsite_addr` | [`telemetry.statsite_address`](https://github.com/hashicorp/consul/blob/main/website/source/docs/agent/config/config-files.html.md#telemetry-statsite_address) | | `statsite_prefix` | [`telemetry.metrics_prefix`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-metrics_prefix) | @@ -2640,7 +2640,7 @@ BREAKING CHANGES: * **`statsite_prefix` Renamed to `metrics_prefix`:** Since the `statsite_prefix` configuration option applied to all telemetry providers, `statsite_prefix` was renamed to [`metrics_prefix`](https://www.consul.io/docs/agent/config/config-files.html#telemetry-metrics_prefix). Configuration files will need to be updated when upgrading to this version of Consul. [[GH-3498](https://github.com/hashicorp/consul/issues/3498)] * **`advertise_addrs` Removed:** This configuration option was removed since it was redundant with `advertise_addr` and `advertise_addr_wan` in combination with `ports` and also wrongly stated that you could configure both host and port. [[GH-3516](https://github.com/hashicorp/consul/issues/3516)] -* **Escaping Behavior Changed for go-discover Configs:** The format for [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) and [`-retry-join-wan`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join_wan) values that use [go-discover](https://github.com/hashicorp/go-discover) Cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or double quotes `"`. If values contain these characters then use double quotes as in `"some key"="some value"`. Special characters within a double quoted string can be escaped with a backslash `\`. [[GH-3417](https://github.com/hashicorp/consul/issues/3417)] +* **Escaping Behavior Changed for go-discover Configs:** The format for [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) and [`-retry-join-wan`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join_wan) values that use [go-discover](https://github.com/hashicorp/go-discover) Cloud auto joining has changed. Values in `key=val` sequences must no longer be URL encoded and can be provided as literals as long as they do not contain spaces, backslashes `\` or double quotes `"`. If values contain these characters then use double quotes as in `"some key"="some value"`. Special characters within a double quoted string can be escaped with a backslash `\`. [[GH-3417](https://github.com/hashicorp/consul/issues/3417)] * **HTTP Verbs are Enforced in Many HTTP APIs:** Many endpoints in the HTTP API that previously took any HTTP verb now check for specific HTTP verbs and enforce them. This may break clients relying on the old behavior. [[GH-3405](https://github.com/hashicorp/consul/issues/3405)]
Detailed List of Updated Endpoints and Required HTTP Verbs @@ -2721,7 +2721,7 @@ BREAKING CHANGES: FEATURES: * **Support for HCL Config Files:** Consul now supports HashiCorp's [HCL](https://github.com/hashicorp/hcl#syntax) format for config files. This is easier to work with than JSON and supports comments. As part of this change, all config files will need to have either an `.hcl` or `.json` extension in order to specify their format. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] -* **Support for Binding to Multiple Addresses:** Consul now supports binding to multiple addresses for its HTTP, HTTPS, and DNS services. You can provide a space-separated list of addresses to [`-client`](https://www.consul.io/docs/agent/config/cli-flags.html#_client) and [`addresses`](https://www.consul.io/docs/agent/config/config-files.html#addresses) configurations, or specify a [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) template that resolves to multiple addresses. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] +* **Support for Binding to Multiple Addresses:** Consul now supports binding to multiple addresses for its HTTP, HTTPS, and DNS services. You can provide a space-separated list of addresses to [`-client`](https://www.consul.io/docs/agent/config/cli-flags#_client) and [`addresses`](https://www.consul.io/docs/agent/config/config-files.html#addresses) configurations, or specify a [go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr/template) template that resolves to multiple addresses. [[GH-3480](https://github.com/hashicorp/consul/issues/3480)] * **Support for RFC1464 DNS TXT records:** Consul DNS responses now contain the node meta data encoded according to RFC1464 as TXT records. [[GH-3343](https://github.com/hashicorp/consul/issues/3343)] * **Support for Running Subproccesses Directly Without a Shell:** Consul agent checks and watches now support an `args` configuration which is a list of arguments to run for the subprocess, which runs the subprocess directly without a shell. The old `script` and `handler` configurations are now deprecated (specify a shell explicitly if you require one). A `-shell=false` option is also available on `consul lock`, `consul watch`, and `consul exec` to run the subprocesses associated with those without a shell. [[GH-3509](https://github.com/hashicorp/consul/issues/3509)] * **Sentinel Integration:** (Consul Enterprise) Consul's ACL system integrates with [Sentinel](https://www.consul.io/docs/guides/sentinel.html) to enable code policies that apply to KV writes. @@ -2796,7 +2796,7 @@ FEATURES: IMPROVEMENTS: -* agent: Retry Join for Amazon AWS, Microsoft Azure, Google Cloud, and (new) SoftLayer is now handled through the https://github.com/hashicorp/go-discover library. With this all `-retry-join-{ec2,azure,gce}-*` parameters have been deprecated in favor of a unified configuration. See [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags.html#_retry_join) for details. [GH-3282,GH-3351] +* agent: Retry Join for Amazon AWS, Microsoft Azure, Google Cloud, and (new) SoftLayer is now handled through the https://github.com/hashicorp/go-discover library. With this all `-retry-join-{ec2,azure,gce}-*` parameters have been deprecated in favor of a unified configuration. See [`-retry-join`](https://www.consul.io/docs/agent/config/cli-flags#_retry_join) for details. [GH-3282,GH-3351] * agent: Reports a more detailed error message if the LAN or WAN Serf instance fails to bind to an address. [[GH-3312](https://github.com/hashicorp/consul/issues/3312)] * agent: Added NS records and corrected SOA records to allow Consul's DNS interface to work properly with zone delegation. [[GH-1301](https://github.com/hashicorp/consul/issues/1301)] * agent: Added support for sending metrics with labels/tags to supported backends. [[GH-3369](https://github.com/hashicorp/consul/issues/3369)] @@ -2820,7 +2820,7 @@ BUG FIXES: BREAKING CHANGES: -* agent: Added a new [`enable_script_checks`](https://www.consul.io/docs/agent/config/cli-flags.html#_enable_script_checks) configuration option that defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box configuration for Consul where operators must opt-in to allow script-based health checks. [[GH-3087](https://github.com/hashicorp/consul/issues/3087)] +* agent: Added a new [`enable_script_checks`](https://www.consul.io/docs/agent/config/cli-flags#_enable_script_checks) configuration option that defaults to `false`, meaning that in order to allow an agent to run health checks that execute scripts, this will need to be configured and set to `true`. This provides a safer out-of-the-box configuration for Consul where operators must opt-in to allow script-based health checks. [[GH-3087](https://github.com/hashicorp/consul/issues/3087)] * api: Reworked `context` support in the API client to more closely match the Go standard library, and added context support to write requests in addition to read requests. [GH-3273, GH-2992] * ui: Since the UI is now bundled with the application we no longer provide a separate UI package for downloading. [[GH-3292](https://github.com/hashicorp/consul/issues/3292)] @@ -2854,7 +2854,7 @@ BUG FIXES: * agent: Fixed an issue in the Docker client where Docker checks would get EOF errors trying to connect to a volume-mounted Docker socket. [[GH-3254](https://github.com/hashicorp/consul/issues/3254)] * agent: Fixed a crash when using Azure auto discovery. [[GH-3193](https://github.com/hashicorp/consul/issues/3193)] * agent: Added `node` read privileges to the `acl_agent_master_token` by default so it can see all nodes, which enables it to be used with operations like `consul members`. [[GH-3113](https://github.com/hashicorp/consul/issues/3113)] -* agent: Fixed an issue where enabling [`-disable-keyring-file`](https://www.consul.io/docs/agent/config/cli-flags.html#_disable_keyring_file) would cause gossip encryption to be disabled. [[GH-3243](https://github.com/hashicorp/consul/issues/3243)] +* agent: Fixed an issue where enabling [`-disable-keyring-file`](https://www.consul.io/docs/agent/config/cli-flags#_disable_keyring_file) would cause gossip encryption to be disabled. [[GH-3243](https://github.com/hashicorp/consul/issues/3243)] * agent: Fixed a race condition where checks that are not associated with any existing services were allowed to persist. [[GH-3297](https://github.com/hashicorp/consul/issues/3297)] * agent: Stop docker checks on service deregistration and on shutdown. [GH-3265, GH-3295] * server: Updated the Raft library to pull in a fix where servers that are very far behind in replication can get stuck in a loop trying to install snapshots. [[GH-3201](https://github.com/hashicorp/consul/issues/3201)] @@ -2871,7 +2871,7 @@ BUG FIXES: BREAKING CHANGES: * agent: Parse values given to `?passing` for health endpoints. Previously Consul only checked for the existence of the querystring, not the value. That means using `?passing=false` would actually still include passing values. Consul now parses the value given to passing as a boolean. If no value is provided, the old behavior remains. This may be a breaking change for some users, but the old experience was incorrect and caused enough confusion to warrant changing it. [GH-2212, GH-3136] -* agent: The default value of [`-disable-host-node-id`](https://www.consul.io/docs/agent/config/cli-flags.html#_disable_host_node_id) has been changed from false to true. This means you need to opt-in to host-based node IDs and by default Consul will generate a random node ID. A high number of users struggled to deploy newer versions of Consul with host-based IDs because of various edge cases of how the host IDs work in Docker, on specially-provisioned machines, etc. so changing this from opt-out to opt-in will ease operations for many Consul users. [[GH-3171](https://github.com/hashicorp/consul/issues/3171)] +* agent: The default value of [`-disable-host-node-id`](https://www.consul.io/docs/agent/config/cli-flags#_disable_host_node_id) has been changed from false to true. This means you need to opt-in to host-based node IDs and by default Consul will generate a random node ID. A high number of users struggled to deploy newer versions of Consul with host-based IDs because of various edge cases of how the host IDs work in Docker, on specially-provisioned machines, etc. so changing this from opt-out to opt-in will ease operations for many Consul users. [[GH-3171](https://github.com/hashicorp/consul/issues/3171)] IMPROVEMENTS: diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 548bfe78f..a71dbc387 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -822,7 +822,7 @@ type RuntimeConfig struct { // PrimaryGateways is a list of addresses and/or go-discover expressions to // discovery the mesh gateways in the primary datacenter. See - // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags#cloud-auto-joining for // details. // // hcl: primary_gateways = []string @@ -978,7 +978,7 @@ type RuntimeConfig struct { // RetryJoinLAN is a list of addresses and/or go-discover expressions to // join with retry enabled. See - // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags#cloud-auto-joining for // details. // // hcl: retry_join = []string @@ -1003,7 +1003,7 @@ type RuntimeConfig struct { // RetryJoinWAN is a list of addresses and/or go-discover expressions to // join -wan with retry enabled. See - // https://www.consul.io/docs/agent/config/cli-flags.html#cloud-auto-joining for + // https://www.consul.io/docs/agent/config/cli-flags#cloud-auto-joining for // details. // // hcl: retry_join_wan = []string diff --git a/agent/consul/server.go b/agent/consul/server.go index fb9255f54..545fe5e1d 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -1591,7 +1591,7 @@ const peersInfoContent = ` As of Consul 0.7.0, the peers.json file is only used for recovery after an outage. The format of this file depends on what the server has configured for its Raft protocol version. Please see the agent configuration -page at https://www.consul.io/docs/agent/config/cli-flags.html#_raft_protocol for more +page at https://www.consul.io/docs/agent/config/cli-flags#_raft_protocol for more details about this parameter. For Raft protocol version 2 and earlier, this should be formatted as a JSON From 19cb9779e22f6af535a5026c9228018527b25a7a Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 12 Apr 2022 09:56:54 +0100 Subject: [PATCH 128/785] ui: Adds licensing overview tab (#12706) * Add some utilities/helpers to temporal for formatting etc * Enable the licensing tab * Add licensing page * Add CSS for licensing page * Fixup typo * Remove box shadow from panel --- .../consul-ui/app/helpers/temporal-format.js | 9 ++ .../consul-ui/app/helpers/temporal-within.js | 9 ++ .../consul-ui/app/services/temporal.js | 32 +++- .../app/styles/base/icons/icons/index.scss | 2 +- ui/packages/consul-ui/app/styles/routes.scss | 1 + .../styles/routes/dc/overview/license.scss | 62 +++++++ .../consul-ui/app/templates/dc/show.hbs | 24 ++- .../app/templates/dc/show/license.hbs | 153 ++++++++++++++++++ .../consul-ui/mock-api/v1/operator/license | 4 +- .../consul-ui/translations/routes/en-us.yaml | 34 ++++ 10 files changed, 321 insertions(+), 9 deletions(-) create mode 100644 ui/packages/consul-ui/app/helpers/temporal-format.js create mode 100644 ui/packages/consul-ui/app/helpers/temporal-within.js create mode 100644 ui/packages/consul-ui/app/styles/routes/dc/overview/license.scss create mode 100644 ui/packages/consul-ui/app/templates/dc/show/license.hbs diff --git a/ui/packages/consul-ui/app/helpers/temporal-format.js b/ui/packages/consul-ui/app/helpers/temporal-format.js new file mode 100644 index 000000000..17e7dd2f9 --- /dev/null +++ b/ui/packages/consul-ui/app/helpers/temporal-format.js @@ -0,0 +1,9 @@ +import Helper from '@ember/component/helper'; +import { inject as service } from '@ember/service'; + +export default class TemporalFormatHelper extends Helper { + @service('temporal') temporal; + compute([value], hash) { + return this.temporal.format(value, hash); + } +} diff --git a/ui/packages/consul-ui/app/helpers/temporal-within.js b/ui/packages/consul-ui/app/helpers/temporal-within.js new file mode 100644 index 000000000..a1f328ca8 --- /dev/null +++ b/ui/packages/consul-ui/app/helpers/temporal-within.js @@ -0,0 +1,9 @@ +import Helper from '@ember/component/helper'; +import { inject as service } from '@ember/service'; + +export default class TemporalWithinHelper extends Helper { + @service('temporal') temporal; + compute(params, hash) { + return this.temporal.within(params, hash); + } +} diff --git a/ui/packages/consul-ui/app/services/temporal.js b/ui/packages/consul-ui/app/services/temporal.js index a5b9884ad..82778ddaf 100644 --- a/ui/packages/consul-ui/app/services/temporal.js +++ b/ui/packages/consul-ui/app/services/temporal.js @@ -1,9 +1,31 @@ import format from 'pretty-ms'; +import parse from 'parse-duration'; import { assert } from '@ember/debug'; - +import dayjs from 'dayjs'; +import relativeTime from 'dayjs/plugin/relativeTime'; import Service from '@ember/service'; +dayjs.extend(relativeTime); + export default class TemporalService extends Service { + + format(value, options) { + const djs = dayjs(value); + if(dayjs().isBefore(djs)) { + return dayjs().to(djs, true); + } else { + return dayjs().from(djs, true); + } + } + + within([value, d], options) { + return dayjs(value).isBefore(dayjs().add(d, 'ms')); + } + + parse(value, options) { + return parse(value); + } + durationFrom(value, options = {}) { switch (true) { case typeof value === 'number': @@ -14,8 +36,12 @@ export default class TemporalService extends Service { return format(value / 1000000, { formatSubMilliseconds: true }) .split(' ') .join(''); + case typeof value === 'string': + return value; + default: + assert(`${value} is not a valid type`, false); + return value; + } - assert(`${value} is not a valid type`, false); - return value; } } diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index 8eb65d292..064b8a4df 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -304,7 +304,7 @@ // @import './docker-color/index.scss'; // @import './docs/index.scss'; // @import './docs-download/index.scss'; -// @import './docs-link/index.scss'; +@import './docs-link/index.scss'; // @import './dollar-sign/index.scss'; // @import './dot/index.scss'; // @import './dot-half/index.scss'; diff --git a/ui/packages/consul-ui/app/styles/routes.scss b/ui/packages/consul-ui/app/styles/routes.scss index 238790812..624ff4570 100644 --- a/ui/packages/consul-ui/app/styles/routes.scss +++ b/ui/packages/consul-ui/app/styles/routes.scss @@ -4,3 +4,4 @@ @import 'routes/dc/acls/index'; @import 'routes/dc/intentions/index'; @import 'routes/dc/overview/serverstatus'; +@import 'routes/dc/overview/license'; diff --git a/ui/packages/consul-ui/app/styles/routes/dc/overview/license.scss b/ui/packages/consul-ui/app/styles/routes/dc/overview/license.scss new file mode 100644 index 000000000..b07132aa7 --- /dev/null +++ b/ui/packages/consul-ui/app/styles/routes/dc/overview/license.scss @@ -0,0 +1,62 @@ +section[data-route='dc.show.license'] { + @extend %license-route; +} +%license-route .validity { + @extend %license-validity; +} +%license-route aside { + @extend %license-route-learn-more; +} + +%license-route h2 { + @extend %h200; +} + +%license-validity dl { + @extend %horizontal-kv-list; + font-size: var(--typo-size-400); +} +%license-validity dl .expired + dd { + @extend %visually-hidden; +} +%license-validity dl dt::before { + content: ''; + margin-right: 0.250rem; /* 4px */ +} +%license-validity dl .expired::before { + --icon-name: icon-x-circle; + --icon-color: rgb(var(--red-500)); +} +%license-validity dl .warning::before { + --icon-name: icon-alert-circle; + --icon-color: rgb(var(--orange-500)); +} +%license-validity dl .valid:not(.warning)::before { + --icon-name: icon-check-circle; + --icon-color: rgb(var(--green-500)); +} + + +%license-route-learn-more { + @extend %panel; + box-shadow: var(--decor-elevation-000); + padding: var(--padding-y) var(--padding-x); + width: 40%; + min-width: 413px; + margin-top: 1rem; /* 16px */ +} +%license-route-learn-more header > :first-child { + @extend %h300; +} +%license-route-learn-more header { + margin-bottom: 1rem; /* 16px */ +} +%license-route-learn-more li { + margin-bottom: 0.250rem; /* 4px */ +} +%license-route-learn-more a::before { + --icon-name: icon-docs-link; + content: ''; + margin-right: 0.375rem; /* 6px */ +} + diff --git a/ui/packages/consul-ui/app/templates/dc/show.hbs b/ui/packages/consul-ui/app/templates/dc/show.hbs index 6089703a7..1391997da 100644 --- a/ui/packages/consul-ui/app/templates/dc/show.hbs +++ b/ui/packages/consul-ui/app/templates/dc/show.hbs @@ -10,23 +10,38 @@ as |route|> -{{#if false}} + +{{#let + (from-entries (array + (array 'serverstatus' true) + (array 'cataloghealth' false) + (array 'license' (can 'read license')) + )) +as |tabs|}} + + {{#let (without false + (values tabs) + ) as |tabsEnabled|}} + +{{#if (gt tabsEnabled.length 1)}} '') }}/> {{/if}} + + {{/let}} +{{/let}} + + +{{#let + loader.data +as |item|}} + + + + + + {{#if (eq loader.error.status "404")}} + + + Warning! + + +

+ This service has been deregistered and no longer exists in the catalog. +

+
+
+ {{else if (eq loader.error.status "403")}} + + + Error! + + +

+ You no longer have access to this service +

+
+
+ {{else}} + + + Warning! + + +

+ An error was returned whilst loading this data, refresh to try again. +

+
+
+ {{/if}} +
+ + +
+
+
+

+ {{compute (fn route.t 'expiry.header')}} +

+
+ +

+ {{compute (fn route.t 'expiry.${type}.body' + (hash + type=(if item.Valid 'valid' 'expired') + date=(format-time item.License.expiration_time + year='numeric' + month='long' + day='numeric' + ) + time=(format-time item.License.expiration_time + hour12=true + hour='numeric' + hourCycle='h12' + minute='numeric' + second='numeric' + timeZoneName='short' + ) + htmlSafe=true + ) + )}} +

+ +
+
+ {{compute (fn route.t 'expiry.${type}.header' + (hash + type=(if item.Valid 'valid' 'expired') + ) + )}} +
+
+ {{temporal-format item.License.expiration_time}} +
+
+ + + +
+
+
+{{/let}} +
+ + diff --git a/ui/packages/consul-ui/mock-api/v1/operator/license b/ui/packages/consul-ui/mock-api/v1/operator/license index 08d485fd2..ece7927f1 100644 --- a/ui/packages/consul-ui/mock-api/v1/operator/license +++ b/ui/packages/consul-ui/mock-api/v1/operator/license @@ -1,12 +1,12 @@ { - "Valid": ${fake.random.boolean()}, + "Valid": true, "License": { "license_id": "${fake.random.uuid()}", "customer_id": "${fake.random.uuid()}", "installation_id": "*", "issue_time": "2021-01-13T15:25:19.052900132Z", "start_time": "2021-01-13T00:00:00Z", - "expiration_time": "${env('CONSUL_LICENSE_EXPIRATION', '2022-01-13T23:59:59.999Z')}", + "expiration_time": "${env('CONSUL_LICENSE_EXPIRATION', '2022-05-02T23:59:59.999Z')}", "termination_time": "${env('CONSUL_LICENSE_TERMINATION', '2022-01-13T23:59:59.999Z')}", "product": "consul", "flags": { diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index e1b7d3559..735a33f96 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -16,6 +16,40 @@ dc: title: Health license: title: License + expiry: + header: Expiry + expired: + header: Expired + body: | +

+ Your license expired on {date} at {time}. +

+ valid: + header: '' + body: | +

+ Your license expires on {date} at {time}. +

+ documentation: + title: Learn More + body: | +
nodes: index: From 2a4ca71d3fd6da8c8779d46db91b416d82690831 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 12 Apr 2022 09:47:42 -0400 Subject: [PATCH 129/785] Move to using a shared EventPublisher (#12673) Previously we had 1 EventPublisher per state.Store. When a state store was closed/abandoned such as during a consul snapshot restore, this had the behavior of force closing subscriptions for that topic and evicting event snapshots from the cache. The intention of this commit is to keep all that behavior. To that end, the shared EventPublisher now supports the ability to refresh a topic. That will perform the force close + eviction. The FSM upon abandoning the previous state.Store will call RefreshTopic for all the topics with events generated by the state.Store. --- agent/consul/fsm/fsm.go | 52 ++++++++++ agent/consul/server.go | 38 ++++---- agent/consul/state/catalog_events.go | 96 +++++++++---------- agent/consul/state/catalog_events_test.go | 22 ++--- agent/consul/state/connect_ca_events.go | 32 +++---- agent/consul/state/connect_ca_events_test.go | 5 +- agent/consul/state/memdb.go | 15 +-- agent/consul/state/state_store.go | 34 ++----- agent/consul/state/store_integration_test.go | 52 +++++----- agent/consul/stream/event_publisher.go | 64 ++++++++++++- agent/consul/stream/event_publisher_test.go | 68 +++++++------ agent/consul/stream/noop.go | 4 + agent/consul/subscribe_backend.go | 2 +- .../services/subscribe/subscribe_test.go | 57 ++++++----- .../grpc/public/services/connectca/server.go | 8 +- .../public/services/connectca/server_test.go | 55 ++++++++++- .../public/services/connectca/watch_roots.go | 2 +- .../services/connectca/watch_roots_test.go | 58 +++++------ agent/submatview/store_integration_test.go | 6 +- 19 files changed, 413 insertions(+), 257 deletions(-) diff --git a/agent/consul/fsm/fsm.go b/agent/consul/fsm/fsm.go index 9dcc5f64f..a9de91935 100644 --- a/agent/consul/fsm/fsm.go +++ b/agent/consul/fsm/fsm.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/raft" "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/logging" ) @@ -56,6 +57,8 @@ type FSM struct { // Raft side, so doesn't need to lock this. stateLock sync.RWMutex state *state.Store + + publisher *stream.EventPublisher } // New is used to construct a new FSM with a blank state. @@ -77,6 +80,8 @@ type Deps struct { // NewStateStore will be called once when the FSM is created and again any // time Restore() is called. NewStateStore func() *state.Store + + Publisher *stream.EventPublisher } // NewFromDeps creates a new FSM from its dependencies. @@ -101,6 +106,10 @@ func NewFromDeps(deps Deps) *FSM { } fsm.chunker = raftchunking.NewChunkingFSM(fsm, nil) + + // register the streaming snapshot handlers if an event publisher was provided. + fsm.registerStreamSnapshotHandlers() + return fsm } @@ -204,12 +213,28 @@ func (c *FSM) Restore(old io.ReadCloser) error { c.stateLock.Lock() stateOld := c.state c.state = stateNew + + // Tell the EventPublisher to cycle anything watching these topics. Replacement + // of the state store means that indexes could have gone backwards and data changed. + // + // This needs to happen while holding the state lock to ensure its not racey. If we + // did this outside of the locked section closer to where we abandon the old store + // then there would be a possibility for new streams to be opened that would get + // a snapshot from the cache sourced from old data but would be receiving events + // for new data. To prevent that inconsistency we refresh the topics while holding + // the lock which ensures that any subscriptions to topics for FSM generated events + if c.deps.Publisher != nil { + c.deps.Publisher.RefreshTopic(state.EventTopicServiceHealth) + c.deps.Publisher.RefreshTopic(state.EventTopicServiceHealthConnect) + c.deps.Publisher.RefreshTopic(state.EventTopicCARoots) + } c.stateLock.Unlock() // Signal that the old state store has been abandoned. This is required // because we don't operate on it any more, we just throw it away, so // blocking queries won't see any changes and need to be woken up. stateOld.Abandon() + return nil } @@ -244,3 +269,30 @@ func ReadSnapshot(r io.Reader, handler func(header *SnapshotHeader, msg structs. } } } + +func (c *FSM) registerStreamSnapshotHandlers() { + if c.deps.Publisher == nil { + return + } + + err := c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealth, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return c.State().ServiceHealthSnapshot(req, buf) + }) + if err != nil { + panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err)) + } + + err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealthConnect, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return c.State().ServiceHealthSnapshot(req, buf) + }) + if err != nil { + panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err)) + } + + err = c.deps.Publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return c.State().CARootsSnapshot(req, buf) + }) + if err != nil { + panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err)) + } +} diff --git a/agent/consul/server.go b/agent/consul/server.go index a3effba97..401954d85 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -39,6 +39,7 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" "github.com/hashicorp/consul/agent/consul/fsm" "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/usagemetrics" "github.com/hashicorp/consul/agent/consul/wanfed" agentgrpc "github.com/hashicorp/consul/agent/grpc/private" @@ -343,6 +344,12 @@ type Server struct { // Manager to handle starting/stopping go routines when establishing/revoking raft leadership leaderRoutineManager *routine.Manager + // publisher is the EventPublisher to be shared amongst various server components. Events from + // modifications to the FSM, autopilot and others will flow through here. If in the future we + // need Events generated outside of the Server and all its components, then we could move + // this into the Deps struct and created it much earlier on. + publisher *stream.EventPublisher + // embedded struct to hold all the enterprise specific data EnterpriseServer } @@ -397,6 +404,16 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve insecureRPCServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) } + eventPublisher := stream.NewEventPublisher(10 * time.Second) + + fsmDeps := fsm.Deps{ + Logger: flat.Logger, + NewStateStore: func() *state.Store { + return state.NewStateStoreWithEventPublisher(gc, eventPublisher) + }, + Publisher: eventPublisher, + } + // Create server. s := &Server{ config: config, @@ -422,9 +439,12 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve shutdownCh: shutdownCh, leaderRoutineManager: routine.NewManager(logger.Named(logging.Leader)), aclAuthMethodValidators: authmethod.NewCache(), - fsm: newFSMFromConfig(flat.Logger, gc, config), + fsm: fsm.NewFromDeps(fsmDeps), + publisher: eventPublisher, } + go s.publisher.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) + if s.config.ConnectMeshGatewayWANFederationEnabled { s.gatewayLocator = NewGatewayLocator( s.logger, @@ -652,6 +672,7 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve // Initialize public gRPC server - register services on public gRPC server. connectca.NewServer(connectca.Config{ + Publisher: s.publisher, GetStore: func() connectca.StateStore { return s.FSM().State() }, Logger: logger.Named("grpc-api.connect-ca"), ACLResolver: plainACLResolver{s.ACLResolver}, @@ -684,21 +705,6 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve return s, nil } -func newFSMFromConfig(logger hclog.Logger, gc *state.TombstoneGC, config *Config) *fsm.FSM { - deps := fsm.Deps{Logger: logger} - if config.RPCConfig.EnableStreaming { - deps.NewStateStore = func() *state.Store { - return state.NewStateStoreWithEventPublisher(gc) - } - return fsm.NewFromDeps(deps) - } - - deps.NewStateStore = func() *state.Store { - return state.NewStateStore(gc) - } - return fsm.NewFromDeps(deps) -} - func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler { register := func(srv *grpc.Server) { if config.RPCConfig.EnableStreaming { diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 91e1bf361..13c5c4ba0 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -78,50 +78,48 @@ func (e EventPayloadCheckServiceNode) Subject() stream.Subject { // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot // of stream.Events that describe the current state of a service health query. -func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc { - return func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { - tx := db.ReadTxn() - defer tx.Abort() +func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { + tx := s.db.ReadTxn() + defer tx.Abort() - connect := topic == topicServiceHealthConnect + connect := req.Topic == EventTopicServiceHealthConnect - subject, ok := req.Subject.(EventSubjectService) - if !ok { - return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject) - } - - idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta) - if err != nil { - return 0, err - } - - for i := range nodes { - n := nodes[i] - event := stream.Event{ - Index: idx, - Topic: topic, - Payload: EventPayloadCheckServiceNode{ - Op: pbsubscribe.CatalogOp_Register, - Value: &n, - }, - } - - if !connect { - // append each event as a separate item so that they can be serialized - // separately, to prevent the encoding of one massive message. - buf.Append([]stream.Event{event}) - continue - } - - events, err := connectEventsByServiceKind(tx, event) - if err != nil { - return idx, err - } - buf.Append(events) - } - - return idx, err + subject, ok := req.Subject.(EventSubjectService) + if !ok { + return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject) } + + idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta) + if err != nil { + return 0, err + } + + for i := range nodes { + n := nodes[i] + event := stream.Event{ + Index: idx, + Topic: req.Topic, + Payload: EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &n, + }, + } + + if !connect { + // append each event as a separate item so that they can be serialized + // separately, to prevent the encoding of one massive message. + buf.Append([]stream.Event{event}) + continue + } + + events, err := connectEventsByServiceKind(tx, event) + if err != nil { + return idx, err + } + buf.Append(events) + } + + return idx, err } // TODO: this could use NodeServiceQuery @@ -355,7 +353,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event for _, sn := range nodes { e := newServiceHealthEventDeregister(changes.Index, sn) - e.Topic = topicServiceHealthConnect + e.Topic = EventTopicServiceHealthConnect payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = serviceName.Name if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() { @@ -388,7 +386,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event return nil, err } - e.Topic = topicServiceHealthConnect + e.Topic = EventTopicServiceHealthConnect payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = serviceName.Name if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() { @@ -426,7 +424,7 @@ func isConnectProxyDestinationServiceChange(idx uint64, before, after *structs.S } e := newServiceHealthEventDeregister(idx, before) - e.Topic = topicServiceHealthConnect + e.Topic = EventTopicServiceHealthConnect payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = payload.Value.Service.Proxy.DestinationServiceName e.Payload = payload @@ -467,7 +465,7 @@ func serviceHealthToConnectEvents( ) ([]stream.Event, error) { var result []stream.Event for _, event := range events { - if event.Topic != topicServiceHealth { // event.Topic == topicServiceHealthConnect + if event.Topic != EventTopicServiceHealth { // event.Topic == topicServiceHealthConnect // Skip non-health or any events already emitted to Connect topic continue } @@ -490,7 +488,7 @@ func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Ev } event := origEvent // shallow copy the event - event.Topic = topicServiceHealthConnect + event.Topic = EventTopicServiceHealthConnect if node.Service.Connect.Native { return []stream.Event{event}, nil @@ -527,7 +525,7 @@ func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Ev } func copyEventForService(event stream.Event, service structs.ServiceName) stream.Event { - event.Topic = topicServiceHealthConnect + event.Topic = EventTopicServiceHealthConnect payload := event.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = service.Name if payload.Value.Service.EnterpriseMeta.NamespaceOrDefault() != service.EnterpriseMeta.NamespaceOrDefault() { @@ -666,7 +664,7 @@ func newServiceHealthEventRegister( Checks: checks, } return stream.Event{ - Topic: topicServiceHealth, + Topic: EventTopicServiceHealth, Index: idx, Payload: EventPayloadCheckServiceNode{ Op: pbsubscribe.CatalogOp_Register, @@ -697,7 +695,7 @@ func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream } return stream.Event{ - Topic: topicServiceHealth, + Topic: EventTopicServiceHealth, Index: idx, Payload: EventPayloadCheckServiceNode{ Op: pbsubscribe.CatalogOp_Deregister, diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index b85ea5f76..1f6f6d885 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -70,11 +70,10 @@ func TestServiceHealthSnapshot(t *testing.T) { err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "web", regNode2)) require.NoError(t, err) - fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealth) buf := &snapshotAppender{} - req := stream.SubscribeRequest{Subject: EventSubjectService{Key: "web"}} + req := stream.SubscribeRequest{Topic: EventTopicServiceHealth, Subject: EventSubjectService{Key: "web"}} - idx, err := fn(req, buf) + idx, err := store.ServiceHealthSnapshot(req, buf) require.NoError(t, err) require.Equal(t, counter.Last(), idx) @@ -147,11 +146,10 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) { err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "tgate1", regTerminatingGateway)) require.NoError(t, err) - fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealthConnect) buf := &snapshotAppender{} - req := stream.SubscribeRequest{Subject: EventSubjectService{Key: "web"}, Topic: topicServiceHealthConnect} + req := stream.SubscribeRequest{Subject: EventSubjectService{Key: "web"}, Topic: EventTopicServiceHealthConnect} - idx, err := fn(req, buf) + idx, err := store.ServiceHealthSnapshot(req, buf) require.NoError(t, err) require.Equal(t, counter.Last(), idx) @@ -1743,7 +1741,7 @@ func evServiceTermingGateway(name string) func(e *stream.Event) error { } } - if e.Topic == topicServiceHealthConnect { + if e.Topic == EventTopicServiceHealthConnect { payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = name e.Payload = payload @@ -2096,7 +2094,7 @@ func evConnectNative(e *stream.Event) error { // depending on which topic they are published to and they determine this from // the event. func evConnectTopic(e *stream.Event) error { - e.Topic = topicServiceHealthConnect + e.Topic = EventTopicServiceHealthConnect return nil } @@ -2135,7 +2133,7 @@ func evSidecar(e *stream.Event) error { csn.Checks[1].ServiceName = svc + "_sidecar_proxy" } - if e.Topic == topicServiceHealthConnect { + if e.Topic == EventTopicServiceHealthConnect { payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = svc e.Payload = payload @@ -2238,7 +2236,7 @@ func evRenameService(e *stream.Event) error { taggedAddr.Address = "240.0.0.2" csn.Service.TaggedAddresses[structs.TaggedAddressVirtualIP] = taggedAddr - if e.Topic == topicServiceHealthConnect { + if e.Topic == EventTopicServiceHealthConnect { payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = csn.Service.Proxy.DestinationServiceName e.Payload = payload @@ -2350,7 +2348,7 @@ func newTestEventServiceHealthRegister(index uint64, nodeNum int, svc string) st addr := fmt.Sprintf("10.10.%d.%d", nodeNum/256, nodeNum%256) return stream.Event{ - Topic: topicServiceHealth, + Topic: EventTopicServiceHealth, Index: index, Payload: EventPayloadCheckServiceNode{ Op: pbsubscribe.CatalogOp_Register, @@ -2421,7 +2419,7 @@ func newTestEventServiceHealthRegister(index uint64, nodeNum int, svc string) st // adding too many options to callers. func newTestEventServiceHealthDeregister(index uint64, nodeNum int, svc string) stream.Event { return stream.Event{ - Topic: topicServiceHealth, + Topic: EventTopicServiceHealth, Index: index, Payload: EventPayloadCheckServiceNode{ Op: pbsubscribe.CatalogOp_Deregister, diff --git a/agent/consul/state/connect_ca_events.go b/agent/consul/state/connect_ca_events.go index c6bd135be..6a0bdb974 100644 --- a/agent/consul/state/connect_ca_events.go +++ b/agent/consul/state/connect_ca_events.go @@ -65,23 +65,21 @@ func caRootsChangeEvents(tx ReadTxn, changes Changes) ([]stream.Event, error) { // caRootsSnapshot returns a stream.SnapshotFunc that provides a snapshot of // the current active list of CA Roots. -func caRootsSnapshot(db ReadDB) stream.SnapshotFunc { - return func(_ stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - tx := db.ReadTxn() - defer tx.Abort() +func (s *Store) CARootsSnapshot(_ stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + tx := s.db.ReadTxn() + defer tx.Abort() - idx, roots, err := caRootsTxn(tx, nil) - if err != nil { - return 0, err - } - - buf.Append([]stream.Event{ - { - Topic: EventTopicCARoots, - Index: idx, - Payload: EventPayloadCARoots{CARoots: roots}, - }, - }) - return idx, nil + idx, roots, err := caRootsTxn(tx, nil) + if err != nil { + return 0, err } + + buf.Append([]stream.Event{ + { + Topic: EventTopicCARoots, + Index: idx, + Payload: EventPayloadCARoots{CARoots: roots}, + }, + }) + return idx, nil } diff --git a/agent/consul/state/connect_ca_events_test.go b/agent/consul/state/connect_ca_events_test.go index 9651e2a47..b5062340a 100644 --- a/agent/consul/state/connect_ca_events_test.go +++ b/agent/consul/state/connect_ca_events_test.go @@ -51,14 +51,13 @@ func TestCARootsEvents(t *testing.T) { func TestCARootsSnapshot(t *testing.T) { store := testStateStore(t) - fn := caRootsSnapshot((*readDB)(store.db.db)) var req stream.SubscribeRequest t.Run("no roots", func(t *testing.T) { buf := &snapshotAppender{} - idx, err := fn(req, buf) + idx, err := store.CARootsSnapshot(req, buf) require.NoError(t, err) require.Equal(t, uint64(0), idx) @@ -77,7 +76,7 @@ func TestCARootsSnapshot(t *testing.T) { _, err := store.CARootSetCAS(1, 0, structs.CARoots{root}) require.NoError(t, err) - idx, err := fn(req, buf) + idx, err := store.CARootsSnapshot(req, buf) require.NoError(t, err) require.Equal(t, uint64(1), idx) diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 936375eb4..3edca1438 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -1,7 +1,6 @@ package state import ( - "context" "fmt" "github.com/hashicorp/go-memdb" @@ -58,7 +57,7 @@ type changeTrackerDB struct { type EventPublisher interface { Publish([]stream.Event) - Run(context.Context) + RegisterHandler(stream.Topic, stream.SnapshotFunc) error Subscribe(*stream.SubscribeRequest) (*stream.Subscription, error) } @@ -179,8 +178,8 @@ func (db *readDB) ReadTxn() AbortTxn { } var ( - topicServiceHealth = pbsubscribe.Topic_ServiceHealth - topicServiceHealthConnect = pbsubscribe.Topic_ServiceHealthConnect + EventTopicServiceHealth = pbsubscribe.Topic_ServiceHealth + EventTopicServiceHealthConnect = pbsubscribe.Topic_ServiceHealthConnect ) func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { @@ -200,11 +199,3 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { } return events, nil } - -func newSnapshotHandlers(db ReadDB) stream.SnapshotHandlers { - return stream.SnapshotHandlers{ - topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth), - topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect), - EventTopicCARoots: caRootsSnapshot(db), - } -} diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 39a4371ef..e795b6857 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -1,10 +1,8 @@ package state import ( - "context" "errors" "fmt" - "time" memdb "github.com/hashicorp/go-memdb" @@ -109,10 +107,6 @@ type Store struct { // abandoned (usually during a restore). This is only ever closed. abandonCh chan struct{} - // TODO: refactor abondonCh to use a context so that both can use the same - // cancel mechanism. - stopEventPublisher func() - // kvsGraveyard manages tombstones for the key value store. kvsGraveyard *Graveyard @@ -159,11 +153,10 @@ func NewStateStore(gc *TombstoneGC) *Store { panic(fmt.Sprintf("failed to create state store: %v", err)) } s := &Store{ - schema: schema, - abandonCh: make(chan struct{}), - kvsGraveyard: NewGraveyard(gc), - lockDelay: NewDelay(), - stopEventPublisher: func() {}, + schema: schema, + abandonCh: make(chan struct{}), + kvsGraveyard: NewGraveyard(gc), + lockDelay: NewDelay(), db: &changeTrackerDB{ db: db, publisher: stream.NoOpEventPublisher{}, @@ -173,24 +166,13 @@ func NewStateStore(gc *TombstoneGC) *Store { return s } -func NewStateStoreWithEventPublisher(gc *TombstoneGC) *Store { +func NewStateStoreWithEventPublisher(gc *TombstoneGC, publisher EventPublisher) *Store { store := NewStateStore(gc) - ctx, cancel := context.WithCancel(context.TODO()) - store.stopEventPublisher = cancel + store.db.publisher = publisher - pub := stream.NewEventPublisher(newSnapshotHandlers((*readDB)(store.db.db)), 10*time.Second) - store.db.publisher = pub - - go pub.Run(ctx) return store } -// EventPublisher returns the stream.EventPublisher used by the Store to -// publish events. -func (s *Store) EventPublisher() EventPublisher { - return s.db.publisher -} - // Snapshot is used to create a point-in-time snapshot of the entire db. func (s *Store) Snapshot() *Snapshot { tx := s.db.Txn(false) @@ -277,11 +259,7 @@ func (s *Store) AbandonCh() <-chan struct{} { // Abandon is used to signal that the given state store has been abandoned. // Calling this more than one time will panic. func (s *Store) Abandon() { - // Note: the order of these operations matters. Subscribers may receive on - // abandonCh to determine whether their subscription was closed because the - // store was abandoned, therefore it's important abandonCh is closed first. close(s.abandonCh) - s.stopEventPublisher() } // maxIndex is a helper used to retrieve the highest known index diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index 55c3059ce..421205e14 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -32,7 +32,8 @@ func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := stream.NewEventPublisher(newTestSnapshotHandlers(s), 0) + publisher := stream.NewEventPublisher(0) + registerTestSnapshotHandlers(t, s, publisher) go publisher.Run(ctx) s.db.publisher = publisher sub, err := publisher.Subscribe(subscription) @@ -119,7 +120,8 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := stream.NewEventPublisher(newTestSnapshotHandlers(s), 0) + publisher := stream.NewEventPublisher(0) + registerTestSnapshotHandlers(t, s, publisher) go publisher.Run(ctx) s.db.publisher = publisher sub, err := publisher.Subscribe(subscription) @@ -240,7 +242,8 @@ func TestStore_IntegrationWithEventPublisher_ACLRoleUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := stream.NewEventPublisher(newTestSnapshotHandlers(s), 0) + publisher := stream.NewEventPublisher(0) + registerTestSnapshotHandlers(t, s, publisher) go publisher.Run(ctx) s.db.publisher = publisher sub, err := publisher.Subscribe(subscription) @@ -393,27 +396,29 @@ func (t topic) String() string { var topicService topic = "test-topic-service" -func newTestSnapshotHandlers(s *Store) stream.SnapshotHandlers { - return stream.SnapshotHandlers{ - topicService: func(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { - key := req.Subject.String() +func (s *Store) topicServiceTestHandler(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { + key := req.Subject.String() - idx, nodes, err := s.ServiceNodes(nil, key, nil) - if err != nil { - return idx, err - } - - for _, node := range nodes { - event := stream.Event{ - Topic: req.Topic, - Index: node.ModifyIndex, - Payload: nodePayload{node: node, key: key}, - } - snap.Append([]stream.Event{event}) - } - return idx, nil - }, + idx, nodes, err := s.ServiceNodes(nil, key, nil) + if err != nil { + return idx, err } + + for _, node := range nodes { + event := stream.Event{ + Topic: req.Topic, + Index: node.ModifyIndex, + Payload: nodePayload{node: node, key: key}, + } + snap.Append([]stream.Event{event}) + } + return idx, nil +} + +func registerTestSnapshotHandlers(t *testing.T, s *Store, publisher EventPublisher) { + t.Helper() + err := publisher.RegisterHandler(topicService, s.topicServiceTestHandler) + require.NoError(t, err) } type nodePayload struct { @@ -460,7 +465,8 @@ func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLTo ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := stream.NewEventPublisher(newTestSnapshotHandlers(s), 0) + publisher := stream.NewEventPublisher(0) + registerTestSnapshotHandlers(t, s, publisher) go publisher.Run(ctx) s.db.publisher = publisher diff --git a/agent/consul/stream/event_publisher.go b/agent/consul/stream/event_publisher.go index 06b7b03a2..2cd0564ff 100644 --- a/agent/consul/stream/event_publisher.go +++ b/agent/consul/stream/event_publisher.go @@ -91,7 +91,7 @@ type SnapshotAppender interface { // A goroutine is run in the background to publish events to all subscribes. // Cancelling the context will shutdown the goroutine, to free resources, // and stop all publishing. -func NewEventPublisher(handlers SnapshotHandlers, snapCacheTTL time.Duration) *EventPublisher { +func NewEventPublisher(snapCacheTTL time.Duration) *EventPublisher { e := &EventPublisher{ snapCacheTTL: snapCacheTTL, topicBuffers: make(map[topicSubject]*topicBuffer), @@ -100,12 +100,41 @@ func NewEventPublisher(handlers SnapshotHandlers, snapCacheTTL time.Duration) *E subscriptions: &subscriptions{ byToken: make(map[string]map[*SubscribeRequest]*Subscription), }, - snapshotHandlers: handlers, + snapshotHandlers: make(map[Topic]SnapshotFunc), } return e } +// RegisterHandler will register a new snapshot handler function. The expectation is +// that all handlers get registered prior to the event publisher being Run. Handler +// registration is therefore not concurrency safe and access to handlers is internally +// not synchronized. +func (e *EventPublisher) RegisterHandler(topic Topic, handler SnapshotFunc) error { + if topic.String() == "" { + return fmt.Errorf("the topic cannnot be empty") + } + + if _, found := e.snapshotHandlers[topic]; found { + return fmt.Errorf("a handler is already registered for the topic: %s", topic.String()) + } + + e.snapshotHandlers[topic] = handler + + return nil +} + +func (e *EventPublisher) RefreshTopic(topic Topic) error { + if _, found := e.snapshotHandlers[topic]; !found { + return fmt.Errorf("topic %s is not registered", topic) + } + + e.forceEvictByTopic(topic) + e.subscriptions.closeAllByTopic(topic) + + return nil +} + // Publish events to all subscribers of the event Topic. The events will be shared // with all subscriptions, so the Payload used in Event.Payload must be immutable. func (e *EventPublisher) Publish(events []Event) { @@ -196,14 +225,14 @@ func (e *EventPublisher) bufferForPublishing(key topicSubject) *eventBuffer { // When the caller is finished with the subscription for any reason, it must // call Subscription.Unsubscribe to free ACL tracking resources. func (e *EventPublisher) Subscribe(req *SubscribeRequest) (*Subscription, error) { + e.lock.Lock() + defer e.lock.Unlock() + handler, ok := e.snapshotHandlers[req.Topic] if !ok || req.Topic == nil { return nil, fmt.Errorf("unknown topic %v", req.Topic) } - e.lock.Lock() - defer e.lock.Unlock() - topicBuf := e.bufferForSubscription(req.topicSubject()) topicBuf.refs++ @@ -327,6 +356,19 @@ func (s *subscriptions) closeAll() { } } +func (s *subscriptions) closeAllByTopic(topic Topic) { + s.lock.Lock() + defer s.lock.Unlock() + + for _, byRequest := range s.byToken { + for _, sub := range byRequest { + if sub.req.Topic == topic { + sub.forceClose() + } + } + } +} + // EventPublisher.lock must be held to call this method. func (e *EventPublisher) getCachedSnapshotLocked(req *SubscribeRequest) *eventSnapshot { snap, ok := e.snapCache[req.topicSubject()] @@ -350,3 +392,15 @@ func (e *EventPublisher) setCachedSnapshotLocked(req *SubscribeRequest, snap *ev delete(e.snapCache, req.topicSubject()) }) } + +// forceEvictByTopic will remove all entries from the snapshot cache for a given topic. +// This method should be called while holding the publishers lock. +func (e *EventPublisher) forceEvictByTopic(topic Topic) { + e.lock.Lock() + for key := range e.snapCache { + if key.Topic == topic.String() { + delete(e.snapCache, key) + } + } + e.lock.Unlock() +} diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index c718d5853..fbd253830 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -27,7 +27,8 @@ func TestEventPublisher_SubscribeWithIndex0(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), 0) + publisher := NewEventPublisher(0) + registerTestSnapshotHandlers(t, publisher) go publisher.Run(ctx) sub, err := publisher.Subscribe(req) @@ -83,16 +84,18 @@ func (p simplePayload) HasReadPermission(acl.Authorizer) bool { func (p simplePayload) Subject() Subject { return stringer(p.key) } -func newTestSnapshotHandlers() SnapshotHandlers { - return SnapshotHandlers{ - testTopic: func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { - if req.Topic != testTopic { - return 0, fmt.Errorf("unexpected topic: %v", req.Topic) - } - buf.Append([]Event{testSnapshotEvent}) - return 1, nil - }, +func registerTestSnapshotHandlers(t *testing.T, publisher *EventPublisher) { + t.Helper() + + testTopicHandler := func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { + if req.Topic != testTopic { + return 0, fmt.Errorf("unexpected topic: %v", req.Topic) + } + buf.Append([]Event{testSnapshotEvent}) + return 1, nil } + + require.NoError(t, publisher.RegisterHandler(testTopic, testTopicHandler)) } func runSubscription(ctx context.Context, sub *Subscription) <-chan eventOrErr { @@ -143,14 +146,14 @@ func TestEventPublisher_ShutdownClosesSubscriptions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - handlers := newTestSnapshotHandlers() fn := func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { return 0, nil } - handlers[intTopic(22)] = fn - handlers[intTopic(33)] = fn - publisher := NewEventPublisher(handlers, time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) + publisher.RegisterHandler(intTopic(22), fn) + publisher.RegisterHandler(intTopic(33), fn) go publisher.Run(ctx) sub1, err := publisher.Subscribe(&SubscribeRequest{Topic: intTopic(22), Subject: SubjectNone}) @@ -190,7 +193,8 @@ func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) go publisher.Run(ctx) sub, err := publisher.Subscribe(req) @@ -235,7 +239,8 @@ func TestEventPublisher_SubscribeWithIndexNotZero_CanResume(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) go publisher.Run(ctx) simulateExistingSubscriber(t, publisher, req) @@ -288,7 +293,8 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), 0) + publisher := NewEventPublisher(0) + registerTestSnapshotHandlers(t, publisher) go publisher.Run(ctx) // Include the same event in the topicBuffer publisher.publishEvent([]Event{testSnapshotEvent}) @@ -344,7 +350,8 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testin ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) go publisher.Run(ctx) simulateExistingSubscriber(t, publisher, req) @@ -417,21 +424,20 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot_WithCache(t *testi Payload: simplePayload{key: "sub-key", value: "event-3"}, } - handlers := SnapshotHandlers{ - testTopic: func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { - if req.Topic != testTopic { - return 0, fmt.Errorf("unexpected topic: %v", req.Topic) - } - buf.Append([]Event{testSnapshotEvent}) - buf.Append([]Event{nextEvent}) - return 3, nil - }, + testTopicHandler := func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { + if req.Topic != testTopic { + return 0, fmt.Errorf("unexpected topic: %v", req.Topic) + } + buf.Append([]Event{testSnapshotEvent}) + buf.Append([]Event{nextEvent}) + return 3, nil } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - publisher := NewEventPublisher(handlers, time.Second) + publisher := NewEventPublisher(time.Second) + publisher.RegisterHandler(testTopic, testTopicHandler) go publisher.Run(ctx) simulateExistingSubscriber(t, publisher, req) @@ -498,7 +504,8 @@ func TestEventPublisher_Unsubscribe_ClosesSubscription(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) sub, err := publisher.Subscribe(req) require.NoError(t, err) @@ -518,7 +525,8 @@ func TestEventPublisher_Unsubscribe_FreesResourcesWhenThereAreNoSubscribers(t *t Subject: stringer("sub-key"), } - publisher := NewEventPublisher(newTestSnapshotHandlers(), time.Second) + publisher := NewEventPublisher(time.Second) + registerTestSnapshotHandlers(t, publisher) sub1, err := publisher.Subscribe(req) require.NoError(t, err) diff --git a/agent/consul/stream/noop.go b/agent/consul/stream/noop.go index 1b3282dbf..84d6a648d 100644 --- a/agent/consul/stream/noop.go +++ b/agent/consul/stream/noop.go @@ -9,6 +9,10 @@ type NoOpEventPublisher struct{} func (NoOpEventPublisher) Publish([]Event) {} +func (NoOpEventPublisher) RegisterHandler(Topic, SnapshotFunc) error { + return fmt.Errorf("stream event publisher is disabled") +} + func (NoOpEventPublisher) Run(context.Context) {} func (NoOpEventPublisher) Subscribe(*SubscribeRequest) (*Subscription, error) { diff --git a/agent/consul/subscribe_backend.go b/agent/consul/subscribe_backend.go index 94b8671f4..bddbb2e5f 100644 --- a/agent/consul/subscribe_backend.go +++ b/agent/consul/subscribe_backend.go @@ -31,5 +31,5 @@ func (s subscribeBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) } func (s subscribeBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { - return s.srv.fsm.State().EventPublisher().Subscribe(req) + return s.srv.publisher.Subscribe(req) } diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index d9d8d162d..c31959057 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -32,8 +32,7 @@ import ( ) func TestServer_Subscribe_KeyIsRequired(t *testing.T) { - backend, err := newTestBackend() - require.NoError(t, err) + backend := newTestBackend(t) addr := runTestServer(t, NewServer(backend, hclog.New(nil))) @@ -59,8 +58,7 @@ func TestServer_Subscribe_KeyIsRequired(t *testing.T) { } func TestServer_Subscribe_IntegrationWithBackend(t *testing.T) { - backend, err := newTestBackend() - require.NoError(t, err) + backend := newTestBackend(t) addr := runTestServer(t, NewServer(backend, hclog.New(nil))) ids := newCounter() @@ -312,6 +310,7 @@ func getEvent(t *testing.T, ch chan eventOrError) *pbsubscribe.Event { } type testBackend struct { + publisher *stream.EventPublisher store *state.Store authorizer func(token string, entMeta *acl.EnterpriseMeta) acl.Authorizer forwardConn *gogrpc.ClientConn @@ -333,19 +332,33 @@ func (b testBackend) Forward(_ structs.RPCInfo, fn func(*gogrpc.ClientConn) erro } func (b testBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { - return b.store.EventPublisher().Subscribe(req) + return b.publisher.Subscribe(req) } -func newTestBackend() (*testBackend, error) { +func newTestBackend(t *testing.T) *testBackend { + t.Helper() gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) - if err != nil { - return nil, err - } - store := state.NewStateStoreWithEventPublisher(gc) + require.NoError(t, err) + + publisher := stream.NewEventPublisher(10 * time.Second) + + store := state.NewStateStoreWithEventPublisher(gc, publisher) + + // normally the handlers are registered on the FSM as state stores may come + // and go during snapshot restores. For the purposes of this test backend though we + // just register them directly to + require.NoError(t, publisher.RegisterHandler(state.EventTopicCARoots, store.CARootsSnapshot)) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot)) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot)) + + ctx, cancel := context.WithCancel(context.Background()) + go publisher.Run(ctx) + t.Cleanup(cancel) + allowAll := func(string, *acl.EnterpriseMeta) acl.Authorizer { return acl.AllowAll() } - return &testBackend{store: store, authorizer: allowAll}, nil + return &testBackend{publisher: publisher, store: store, authorizer: allowAll} } var _ Backend = (*testBackend)(nil) @@ -409,12 +422,10 @@ func raftIndex(ids *counter, created, modified string) *pbcommon.RaftIndex { } func TestServer_Subscribe_IntegrationWithBackend_ForwardToDC(t *testing.T) { - backendLocal, err := newTestBackend() - require.NoError(t, err) + backendLocal := newTestBackend(t) addrLocal := runTestServer(t, NewServer(backendLocal, hclog.New(nil))) - backendRemoteDC, err := newTestBackend() - require.NoError(t, err) + backendRemoteDC := newTestBackend(t) srvRemoteDC := NewServer(backendRemoteDC, hclog.New(nil)) addrRemoteDC := runTestServer(t, srvRemoteDC) @@ -642,8 +653,7 @@ func TestServer_Subscribe_IntegrationWithBackend_FilterEventsByACLToken(t *testi t.Skip("too slow for -short run") } - backend, err := newTestBackend() - require.NoError(t, err) + backend := newTestBackend(t) addr := runTestServer(t, NewServer(backend, hclog.New(nil))) token := "this-token-is-good" @@ -839,8 +849,7 @@ node "node1" { } func TestServer_Subscribe_IntegrationWithBackend_ACLUpdate(t *testing.T) { - backend, err := newTestBackend() - require.NoError(t, err) + backend := newTestBackend(t) addr := runTestServer(t, NewServer(backend, hclog.New(nil))) token := "this-token-is-good" @@ -1100,12 +1109,12 @@ func newPayloadEvents(items ...stream.Event) *stream.PayloadEvents { func newEventFromSubscription(t *testing.T, index uint64) stream.Event { t.Helper() - handlers := map[stream.Topic]stream.SnapshotFunc{ - pbsubscribe.Topic_ServiceHealthConnect: func(stream.SubscribeRequest, stream.SnapshotAppender) (index uint64, err error) { - return 1, nil - }, + serviceHealthConnectHandler := func(stream.SubscribeRequest, stream.SnapshotAppender) (index uint64, err error) { + return 1, nil } - ep := stream.NewEventPublisher(handlers, 0) + + ep := stream.NewEventPublisher(0) + ep.RegisterHandler(pbsubscribe.Topic_ServiceHealthConnect, serviceHealthConnectHandler) req := &stream.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealthConnect, Subject: stream.SubjectNone, Index: index} sub, err := ep.Subscribe(req) diff --git a/agent/grpc/public/services/connectca/server.go b/agent/grpc/public/services/connectca/server.go index 002f8e344..86edfdb54 100644 --- a/agent/grpc/public/services/connectca/server.go +++ b/agent/grpc/public/services/connectca/server.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" ) @@ -17,13 +17,17 @@ type Server struct { } type Config struct { + Publisher EventPublisher GetStore func() StateStore Logger hclog.Logger ACLResolver ACLResolver } +type EventPublisher interface { + Subscribe(*stream.SubscribeRequest) (*stream.Subscription, error) +} + type StateStore interface { - EventPublisher() state.EventPublisher CAConfig(memdb.WatchSet) (uint64, *structs.CAConfiguration, error) AbandonCh() <-chan struct{} } diff --git a/agent/grpc/public/services/connectca/server_test.go b/agent/grpc/public/services/connectca/server_test.go index 6a4d42fa0..e74b7c094 100644 --- a/agent/grpc/public/services/connectca/server_test.go +++ b/agent/grpc/public/services/connectca/server_test.go @@ -3,6 +3,7 @@ package connectca import ( "context" "net" + "sync" "testing" "time" @@ -10,16 +11,66 @@ import ( "google.golang.org/grpc" "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/proto-public/pbconnectca" ) -func testStateStore(t *testing.T) *state.Store { +func testStateStore(t *testing.T, publisher state.EventPublisher) *state.Store { t.Helper() gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) require.NoError(t, err) - return state.NewStateStoreWithEventPublisher(gc) + return state.NewStateStoreWithEventPublisher(gc, publisher) +} + +type FakeFSM struct { + lock sync.Mutex + store *state.Store + publisher *stream.EventPublisher +} + +func newFakeFSM(t *testing.T, publisher *stream.EventPublisher) *FakeFSM { + t.Helper() + + store := testStateStore(t, publisher) + + fsm := FakeFSM{store: store, publisher: publisher} + + // register handlers + publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return fsm.GetStore().CARootsSnapshot(req, buf) + }) + + return &fsm +} + +func (f *FakeFSM) GetStore() *state.Store { + f.lock.Lock() + defer f.lock.Unlock() + return f.store +} + +func (f *FakeFSM) ReplaceStore(store *state.Store) { + f.lock.Lock() + defer f.lock.Unlock() + oldStore := f.store + f.store = store + oldStore.Abandon() + f.publisher.RefreshTopic(state.EventTopicCARoots) +} + +func setupFSMAndPublisher(t *testing.T) (*FakeFSM, state.EventPublisher) { + t.Helper() + publisher := stream.NewEventPublisher(10 * time.Second) + + fsm := newFakeFSM(t, publisher) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go publisher.Run(ctx) + + return fsm, publisher } func testClient(t *testing.T, server *Server) pbconnectca.ConnectCAServiceClient { diff --git a/agent/grpc/public/services/connectca/watch_roots.go b/agent/grpc/public/services/connectca/watch_roots.go index eeaf2d8c8..7a7430783 100644 --- a/agent/grpc/public/services/connectca/watch_roots.go +++ b/agent/grpc/public/services/connectca/watch_roots.go @@ -68,7 +68,7 @@ func (s *Server) serveRoots( } // Start the subscription. - sub, err := store.EventPublisher().Subscribe(&stream.SubscribeRequest{ + sub, err := s.Publisher.Subscribe(&stream.SubscribeRequest{ Topic: state.EventTopicCARoots, Subject: stream.SubjectNone, Token: token, diff --git a/agent/grpc/public/services/connectca/watch_roots_test.go b/agent/grpc/public/services/connectca/watch_roots_test.go index d650a4d13..7bce07e1a 100644 --- a/agent/grpc/public/services/connectca/watch_roots_test.go +++ b/agent/grpc/public/services/connectca/watch_roots_test.go @@ -13,7 +13,6 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/acl" @@ -22,19 +21,20 @@ import ( "github.com/hashicorp/consul/agent/grpc/public/testutils" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" + "github.com/hashicorp/consul/sdk/testutil" ) const testACLToken = "acl-token" func TestWatchRoots_Success(t *testing.T) { - store := testStateStore(t) + fsm, publisher := setupFSMAndPublisher(t) // Set the initial roots and CA configuration. rootA := connect.TestCA(t, nil) - _, err := store.CARootSetCAS(1, 0, structs.CARoots{rootA}) + _, err := fsm.GetStore().CARootSetCAS(1, 0, structs.CARoots{rootA}) require.NoError(t, err) - err = store.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) + err = fsm.GetStore().CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) require.NoError(t, err) // Mock the ACL Resolver to return an authorizer with `service:write`. @@ -45,8 +45,9 @@ func TestWatchRoots_Success(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - GetStore: func() StateStore { return store }, - Logger: hclog.NewNullLogger(), + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), ACLResolver: aclResolver, }) @@ -65,7 +66,7 @@ func TestWatchRoots_Success(t *testing.T) { // Rotate the roots. rootB := connect.TestCA(t, nil) - _, err = store.CARootSetCAS(2, 1, structs.CARoots{rootB}) + _, err = fsm.GetStore().CARootSetCAS(2, 1, structs.CARoots{rootB}) require.NoError(t, err) // Expect another event containing the new roots. @@ -77,10 +78,10 @@ func TestWatchRoots_Success(t *testing.T) { } func TestWatchRoots_InvalidACLToken(t *testing.T) { - store := testStateStore(t) + fsm, publisher := setupFSMAndPublisher(t) // Set the initial CA configuration. - err := store.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) + err := fsm.GetStore().CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-id"}) require.NoError(t, err) // Mock the ACL resolver to return ErrNotFound. @@ -91,8 +92,9 @@ func TestWatchRoots_InvalidACLToken(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - GetStore: func() StateStore { return store }, - Logger: hclog.NewNullLogger(), + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), ACLResolver: aclResolver, }) @@ -108,14 +110,14 @@ func TestWatchRoots_InvalidACLToken(t *testing.T) { } func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { - store := testStateStore(t) + fsm, publisher := setupFSMAndPublisher(t) // Set the initial roots and CA configuration. rootA := connect.TestCA(t, nil) - _, err := store.CARootSetCAS(1, 0, structs.CARoots{rootA}) + _, err := fsm.GetStore().CARootSetCAS(1, 0, structs.CARoots{rootA}) require.NoError(t, err) - err = store.CASetConfig(2, &structs.CAConfiguration{ClusterID: "cluster-id"}) + err = fsm.GetStore().CASetConfig(2, &structs.CAConfiguration{ClusterID: "cluster-id"}) require.NoError(t, err) // Mock the ACL Resolver to return an authorizer with `service:write` the @@ -127,8 +129,9 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - GetStore: func() StateStore { return store }, - Logger: hclog.NewNullLogger(), + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), ACLResolver: aclResolver, }) @@ -144,7 +147,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { // Update the ACL token to cause the subscription to be force-closed. accessorID, err := uuid.GenerateUUID() require.NoError(t, err) - err = store.ACLTokenSet(1, &structs.ACLToken{ + err = fsm.GetStore().ACLTokenSet(1, &structs.ACLToken{ AccessorID: accessorID, SecretID: testACLToken, }) @@ -152,7 +155,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { // Update the roots. rootB := connect.TestCA(t, nil) - _, err = store.CARootSetCAS(3, 1, structs.CARoots{rootB}) + _, err = fsm.GetStore().CARootSetCAS(3, 1, structs.CARoots{rootB}) require.NoError(t, err) // Expect the stream to remain open and to receive the new roots. @@ -163,7 +166,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { Return(acl.DenyAll(), nil) // Update the ACL token to cause the subscription to be force-closed. - err = store.ACLTokenSet(1, &structs.ACLToken{ + err = fsm.GetStore().ACLTokenSet(1, &structs.ACLToken{ AccessorID: accessorID, SecretID: testACLToken, }) @@ -175,14 +178,14 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { } func TestWatchRoots_StateStoreAbandoned(t *testing.T) { - storeA := testStateStore(t) + fsm, publisher := setupFSMAndPublisher(t) // Set the initial roots and CA configuration. rootA := connect.TestCA(t, nil) - _, err := storeA.CARootSetCAS(1, 0, structs.CARoots{rootA}) + _, err := fsm.GetStore().CARootSetCAS(1, 0, structs.CARoots{rootA}) require.NoError(t, err) - err = storeA.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-a"}) + err = fsm.GetStore().CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-a"}) require.NoError(t, err) // Mock the ACL Resolver to return an authorizer with `service:write`. @@ -193,8 +196,9 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - GetStore: func() StateStore { return storeA }, - Logger: hclog.NewNullLogger(), + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), ACLResolver: aclResolver, }) @@ -208,7 +212,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { mustGetRoots(t, rspCh) // Simulate a snapshot restore. - storeB := testStateStore(t) + storeB := testStateStore(t, publisher) rootB := connect.TestCA(t, nil) _, err = storeB.CARootSetCAS(1, 0, structs.CARoots{rootB}) @@ -217,9 +221,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { err = storeB.CASetConfig(0, &structs.CAConfiguration{ClusterID: "cluster-b"}) require.NoError(t, err) - server.GetStore = func() StateStore { return storeB } - - storeA.Abandon() + fsm.ReplaceStore(storeB) // Expect to get the new store's roots. newRoots := mustGetRoots(t, rspCh) diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index 49cb67677..e8247b818 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -43,10 +43,8 @@ func TestStore_IntegrationWithBackend(t *testing.T) { } sh := snapshotHandler{producers: producers} - handlers := map[stream.Topic]stream.SnapshotFunc{ - pbsubscribe.Topic_ServiceHealth: sh.Snapshot, - } - pub := stream.NewEventPublisher(handlers, 10*time.Millisecond) + pub := stream.NewEventPublisher(10 * time.Millisecond) + pub.RegisterHandler(pbsubscribe.Topic_ServiceHealth, sh.Snapshot) ctx, cancel := context.WithCancel(context.Background()) defer cancel() From a2a9c963d6150284939eb88980ec203c3e8db578 Mon Sep 17 00:00:00 2001 From: John Murret Date: Tue, 12 Apr 2022 10:44:41 -0600 Subject: [PATCH 130/785] Correcting an uncapitalized word setup at the beginning of titles to be capitalized in vault section. (#12759) --- build-support/functions/20-build.sh | 2 +- .../installation/vault/data-integration/bootstrap-token.mdx | 4 ++-- .../k8s/installation/vault/data-integration/connect-ca.mdx | 4 ++-- .../vault/data-integration/enterprise-license.mdx | 4 ++-- .../docs/k8s/installation/vault/data-integration/gossip.mdx | 4 ++-- .../docs/k8s/installation/vault/data-integration/index.mdx | 4 ++-- .../installation/vault/data-integration/partition-token.mdx | 4 ++-- .../installation/vault/data-integration/replication-token.mdx | 4 ++-- .../k8s/installation/vault/data-integration/server-tls.mdx | 4 ++-- .../vault/data-integration/snapshot-agent-config.mdx | 4 ++-- 10 files changed, 19 insertions(+), 19 deletions(-) diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index dff71a6bf..c2452bae4 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -316,7 +316,7 @@ function build_consul { status "Ensuring Go modules are up to date" # ensure our go module cache is correct go_mod_assert || return 1 - # setup to bind mount our hosts module cache into the container + # Setup to bind mount our hosts module cache into the container volume_mount="--mount=type=bind,source=${MAIN_GOPATH}/pkg/mod,target=/go/pkg/mod" fi diff --git a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx index a3145d775..cf2d47df4 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx @@ -20,7 +20,7 @@ To use an ACL bootstrap token stored in Vault, we will follow the steps outlined 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -55,7 +55,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write boostrap-token-policy boostrap-token-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create Kubernetes auth roles for the Consul `server-acl-init` container that runs as part of the Consul server statefulset: diff --git a/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx index ad7b9c863..6cfc276d2 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx @@ -24,7 +24,7 @@ To use an Vault as the Service Mesh Certificate Provider on Kubernetes, we will ### One time setup in Vault 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -40,7 +40,7 @@ you will first need to decide on the type of policy that is suitable for you. To see the permissions that Consul would need in Vault, please see [Vault ACL policies](/docs/connect/ca/vault#vault-acl-policies) documentation. -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create Kubernetes auth roles for the Consul servers: diff --git a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx index 8f89e4f54..a744f700d 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx @@ -19,7 +19,7 @@ To use an enterprise license stored in Vault, we will follow the steps outlined 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -54,7 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write license-policy license-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create Kubernetes auth roles for the Consul server and client: diff --git a/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx index e0360cc3c..2482559b0 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx @@ -19,7 +19,7 @@ To use a gossip encryption key stored in Vault, we will follow the steps outline 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -52,7 +52,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write gossip-policy gossip-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, we will create Kubernetes auth roles for the Consul server and client: diff --git a/website/content/docs/k8s/installation/vault/data-integration/index.mdx b/website/content/docs/k8s/installation/vault/data-integration/index.mdx index e8dea2183..3f9060a81 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/index.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/index.mdx @@ -20,7 +20,7 @@ Generally, for each secret you wish to store in Vault, the process to integrate 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -#### setup per Consul datacenter +#### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -35,7 +35,7 @@ Following the general integraiton steps, a more detailed workflow for integratio 1. Create a Vault policy that authorizes the desired level of access to the secret. - Create a Vault policy that you name `gossip-policy` which allows `read` access to the path `secret/consul/gossip`. -#### setup per Consul datacenter +#### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. - Both Consul servers and Consul clients need access to the gossip encryption key, so you create two Vault Kubernetes: diff --git a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx index 0d0f9bb84..704797564 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx @@ -19,7 +19,7 @@ To use an ACL partition token stored in Vault, we will follow the steps outlined 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -54,7 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write partition-token-policy partition-token-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: diff --git a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx index 74b854748..90534df40 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx @@ -19,7 +19,7 @@ To use an ACL replication token stored in Vault, we will follow the steps outlin 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -54,7 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write replication-token-policy replication-token-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: diff --git a/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx index 35b187f00..ca362f807 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx @@ -47,7 +47,7 @@ To use an Vault as the Server TLS Certificate Provider on Kubernetes, we will ne ### One time setup in Vault 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. (Added) Create a Vault PKI role that establishes the domains that it is allowed to issue certificates for 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -108,7 +108,7 @@ $ vault policy write ca-policy ca-policy.hcl -> **Note:** The PKI secret path referenced by the above Policy will be your `global.tls.caCert.secretName` Helm value. -## setup per Consul datacenter +## Setup per Consul datacenter ### Create a Vault PKI role that establishes the domains that it is allowed to issue certificates for Next, a Vault role for the PKI engine will set the default certificate issuance parameters: diff --git a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx index fee102dcb..6e6d4970c 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx @@ -20,7 +20,7 @@ To use an ACL replication token stored in Vault, we will follow the steps outlin 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### setup per Consul datacenter +### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. @@ -56,7 +56,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write snapshot-agent-config-policy snapshot-agent-config-policy.hcl ``` -## setup per Consul datacenter +## Setup per Consul datacenter ### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access Next, you will create a Kubernetes auth role for the Consul snapshot agent: From cf7e6484aa4a9222aa6079e37ca64d720c9b14aa Mon Sep 17 00:00:00 2001 From: FFMMM Date: Tue, 12 Apr 2022 10:50:25 -0700 Subject: [PATCH 131/785] add more labels to RequestRecorder (#12727) Co-authored-by: Daniel Nephin Signed-off-by: FFMMM --- .changelog/12727.txt | 4 + agent/consul/options.go | 2 +- agent/consul/server.go | 41 ++-- agent/consul/server_test.go | 15 +- agent/metrics_test.go | 2 +- agent/rpc/middleware/interceptors.go | 90 ++++++- agent/rpc/middleware/interceptors_test.go | 271 ++++++++++++++++------ 7 files changed, 320 insertions(+), 105 deletions(-) create mode 100644 .changelog/12727.txt diff --git a/.changelog/12727.txt b/.changelog/12727.txt new file mode 100644 index 000000000..9ec5da457 --- /dev/null +++ b/.changelog/12727.txt @@ -0,0 +1,4 @@ +```release-note:improvement +telemetry: Add new `leader` label to `consul.rpc.server.call` and optional `target_datacenter`, `locality`, +`allow_stale`, and `blocking` optional labels. +``` \ No newline at end of file diff --git a/agent/consul/options.go b/agent/consul/options.go index e253864a5..a4cd299e3 100644 --- a/agent/consul/options.go +++ b/agent/consul/options.go @@ -25,7 +25,7 @@ type Deps struct { // the rpc server. GetNetRPCInterceptorFunc func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor // NewRequestRecorderFunc provides a middleware.RequestRecorder for the server to use; it cannot be nil - NewRequestRecorderFunc func(logger hclog.Logger) *middleware.RequestRecorder + NewRequestRecorderFunc func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder EnterpriseDeps } diff --git a/agent/consul/server.go b/agent/consul/server.go index 401954d85..eaf974032 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -386,24 +386,6 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve serverLogger := flat.Logger.NamedIntercept(logging.ConsulServer) loggers := newLoggerStore(serverLogger) - var recorder *middleware.RequestRecorder - if flat.NewRequestRecorderFunc == nil { - return nil, fmt.Errorf("cannot initialize server without an RPC request recorder provider") - } - recorder = flat.NewRequestRecorderFunc(serverLogger) - if recorder == nil { - return nil, fmt.Errorf("cannot initialize server without a non nil RPC request recorder") - } - - var rpcServer, insecureRPCServer *rpc.Server - if flat.GetNetRPCInterceptorFunc == nil { - rpcServer = rpc.NewServer() - insecureRPCServer = rpc.NewServer() - } else { - rpcServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) - insecureRPCServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) - } - eventPublisher := stream.NewEventPublisher(10 * time.Second) fsmDeps := fsm.Deps{ @@ -427,9 +409,6 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve leaveCh: make(chan struct{}), reconcileCh: make(chan serf.Member, reconcileChSize), router: flat.Router, - rpcRecorder: recorder, - rpcServer: rpcServer, - insecureRPCServer: insecureRPCServer, tlsConfigurator: flat.TLSConfigurator, publicGRPCServer: publicGRPCServer, reassertLeaderCh: make(chan chan error), @@ -443,6 +422,26 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve publisher: eventPublisher, } + var recorder *middleware.RequestRecorder + if flat.NewRequestRecorderFunc != nil { + recorder = flat.NewRequestRecorderFunc(serverLogger, s.IsLeader, s.config.Datacenter) + } else { + return nil, fmt.Errorf("cannot initialize server without an RPC request recorder provider") + } + if recorder == nil { + return nil, fmt.Errorf("cannot initialize server with a nil RPC request recorder") + } + + if flat.GetNetRPCInterceptorFunc == nil { + s.rpcServer = rpc.NewServer() + s.insecureRPCServer = rpc.NewServer() + } else { + s.rpcServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) + s.insecureRPCServer = rpc.NewServerWithOpts(rpc.WithServerServiceCallInterceptor(flat.GetNetRPCInterceptorFunc(recorder))) + } + + s.rpcRecorder = recorder + go s.publisher.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) if s.config.ConnectMeshGatewayWANFederationEnabled { diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 5c06fb4d9..0d6a4925b 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -1172,7 +1172,8 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { // note that there will be "internal" net/rpc calls made // that will still show up; those don't go thru the net/rpc interceptor; // see consul.agent.rpc.middleware.RPCTypeInternal for context - deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder { + // for the purposes of this test, we don't need isLeader or localDC return &middleware.RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), RecorderFunc: simpleRecorderFunc, @@ -1205,7 +1206,8 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { // note that there will be "internal" net/rpc calls made // that will still show up; those don't go thru the net/rpc interceptor; // see consul.agent.rpc.middleware.RPCTypeInternal for context - deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder { + // for the purposes of this test, we don't need isLeader or localDC return &middleware.RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), RecorderFunc: simpleRecorderFunc, @@ -1265,14 +1267,14 @@ func TestServer_RPC_RequestRecorder(t *testing.T) { t.Run("test nil RequestRecorder", func(t *testing.T) { _, conf := testServerConfig(t) deps := newDefaultDeps(t, conf) - deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder { return nil } s2, err := NewServer(conf, deps, grpc.NewServer()) require.Error(t, err, "need err when RequestRecorder is nil") - require.Equal(t, err.Error(), "cannot initialize server without a non nil RPC request recorder") + require.Equal(t, err.Error(), "cannot initialize server with a nil RPC request recorder") t.Cleanup(func() { if s2 != nil { @@ -1308,7 +1310,8 @@ func TestServer_RPC_MetricsIntercept(t *testing.T) { simpleRecorderFunc := func(key []string, val float32, labels []metrics.Label) { storage[keyMakingFunc(key, labels)] = val } - deps.NewRequestRecorderFunc = func(logger hclog.Logger) *middleware.RequestRecorder { + deps.NewRequestRecorderFunc = func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder { + // for the purposes of this test, we don't need isLeader or localDC return &middleware.RequestRecorder{ Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), RecorderFunc: simpleRecorderFunc, @@ -1344,11 +1347,13 @@ func TestServer_RPC_MetricsIntercept(t *testing.T) { {Name: "errored", Value: "false"}, {Name: "request_type", Value: "read"}, {Name: "rpc_type", Value: "test"}, + {Name: "server_role", Value: "unreported"}, } key := keyMakingFunc(middleware.OneTwelveRPCSummary[0].Name, expectedLabels) if _, ok := storage[key]; !ok { + // the compound key will look like: "rpc+server+call+Status.Ping+false+read+test+unreported" t.Fatalf("Did not find key %s in the metrics log, ", key) } }) diff --git a/agent/metrics_test.go b/agent/metrics_test.go index 448694e3e..76b3fff28 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -199,7 +199,7 @@ func TestAgent_OneTwelveRPCMetrics(t *testing.T) { recordPromMetrics(t, a, respRec) // make sure the labels exist for this metric - assertMetricExistsWithLabels(t, respRec, metricsPrefix+"_rpc_server_call", []string{"errored", "method", "request_type", "rpc_type"}) + assertMetricExistsWithLabels(t, respRec, metricsPrefix+"_rpc_server_call", []string{"errored", "method", "request_type", "rpc_type", "leader"}) // make sure we see 3 Status.Ping metrics corresponding to the calls we made above assertLabelWithValueForMetricExistsNTime(t, respRec, metricsPrefix+"_rpc_server_call", "method", "Status.Ping", 3) }) diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index ba6747c3a..049283ac2 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -33,33 +33,79 @@ var OneTwelveRPCSummary = []prometheus.SummaryDefinition{ } type RequestRecorder struct { - Logger hclog.Logger - RecorderFunc func(key []string, val float32, labels []metrics.Label) + Logger hclog.Logger + RecorderFunc func(key []string, val float32, labels []metrics.Label) + serverIsLeader func() bool + localDC string } -func NewRequestRecorder(logger hclog.Logger) *RequestRecorder { - return &RequestRecorder{Logger: logger, RecorderFunc: metrics.AddSampleWithLabels} +func NewRequestRecorder(logger hclog.Logger, isLeader func() bool, localDC string) *RequestRecorder { + return &RequestRecorder{ + Logger: logger, + RecorderFunc: metrics.AddSampleWithLabels, + serverIsLeader: isLeader, + localDC: localDC, + } } func (r *RequestRecorder) Record(requestName string, rpcType string, start time.Time, request interface{}, respErrored bool) { elapsed := time.Since(start).Milliseconds() reqType := requestType(request) + isLeader := r.getServerLeadership() labels := []metrics.Label{ {Name: "method", Value: requestName}, {Name: "errored", Value: strconv.FormatBool(respErrored)}, {Name: "request_type", Value: reqType}, {Name: "rpc_type", Value: rpcType}, + {Name: "leader", Value: isLeader}, } + labels = r.addOptionalLabels(request, labels) + // math.MaxInt64 < math.MaxFloat32 is true so we should be good! r.RecorderFunc(metricRPCRequest, float32(elapsed), labels) - r.Logger.Trace(requestLogName, - "method", requestName, - "errored", respErrored, - "request_type", reqType, - "rpc_type", rpcType, - "elapsed", elapsed) + + labelsArr := flattenLabels(labels) + r.Logger.Trace(requestLogName, labelsArr...) + +} + +func flattenLabels(labels []metrics.Label) []interface{} { + + var labelArr []interface{} + for _, label := range labels { + labelArr = append(labelArr, label.Name, label.Value) + } + + return labelArr +} + +func (r *RequestRecorder) addOptionalLabels(request interface{}, labels []metrics.Label) []metrics.Label { + if rq, ok := request.(readQuery); ok { + labels = append(labels, + metrics.Label{ + Name: "allow_stale", + Value: strconv.FormatBool(rq.AllowStaleRead()), + }, + metrics.Label{ + Name: "blocking", + Value: strconv.FormatBool(rq.GetMinQueryIndex() > 0), + }) + } + + if td, ok := request.(targetDC); ok { + requestDC := td.RequestDatacenter() + labels = append(labels, metrics.Label{Name: "target_datacenter", Value: requestDC}) + + if r.localDC == requestDC { + labels = append(labels, metrics.Label{Name: "locality", Value: "local"}) + } else { + labels = append(labels, metrics.Label{Name: "locality", Value: "forwarded"}) + } + } + + return labels } func requestType(req interface{}) string { @@ -77,6 +123,30 @@ func requestType(req interface{}) string { return "unreported" } +func (r *RequestRecorder) getServerLeadership() string { + if r.serverIsLeader != nil { + if r.serverIsLeader() { + return "true" + } else { + return "false" + } + } + + // This logical branch should not happen. If it happens + // it means that we have not plumbed down a way to verify + // whether the server handling the request was a leader or not + return "unreported" +} + +type readQuery interface { + GetMinQueryIndex() uint64 + AllowStaleRead() bool +} + +type targetDC interface { + RequestDatacenter() string +} + func GetNetRPCInterceptor(recorder *RequestRecorder) rpc.ServerServiceCallInterceptor { return func(reqServiceMethod string, argv, replyv reflect.Value, handler func() error) { reqStart := time.Now() diff --git a/agent/rpc/middleware/interceptors_test.go b/agent/rpc/middleware/interceptors_test.go index 63fbefecb..d9676846b 100644 --- a/agent/rpc/middleware/interceptors_test.go +++ b/agent/rpc/middleware/interceptors_test.go @@ -48,6 +48,8 @@ var simpleRecorderFunc = func(key []string, val float32, labels []metrics.Label) type readRequest struct{} type writeRequest struct{} +type readReqWithTD struct{} +type writeReqWithTD struct{} func (rr readRequest) IsRead() bool { return true @@ -57,75 +59,210 @@ func (wr writeRequest) IsRead() bool { return false } -// TestRequestRecorder_SimpleOK tests that the RequestRecorder can record a simple request. -func TestRequestRecorder_SimpleOK(t *testing.T) { - t.Parallel() - - r := RequestRecorder{ - Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - RecorderFunc: simpleRecorderFunc, - } - - start := time.Now() - r.Record("A.B", RPCTypeInternal, start, struct{}{}, false) - - expectedLabels := []metrics.Label{ - {Name: "method", Value: "A.B"}, - {Name: "errored", Value: "false"}, - {Name: "request_type", Value: "unreported"}, - {Name: "rpc_type", Value: RPCTypeInternal}, - } - - o := store.get(append(metricRPCRequest, expectedLabels[0].Value)) - require.Equal(t, o.key, metricRPCRequest) - require.LessOrEqual(t, o.elapsed, float32(start.Sub(time.Now()).Milliseconds())) - require.Equal(t, o.labels, expectedLabels) +func (r readReqWithTD) IsRead() bool { + return true } -// TestRequestRecorder_ReadRequest tests that RequestRecorder can record a read request AND a responseErrored arg. -func TestRequestRecorder_ReadRequest(t *testing.T) { - t.Parallel() - - r := RequestRecorder{ - Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - RecorderFunc: simpleRecorderFunc, - } - - start := time.Now() - - r.Record("B.A", RPCTypeNetRPC, start, readRequest{}, true) - - expectedLabels := []metrics.Label{ - {Name: "method", Value: "B.A"}, - {Name: "errored", Value: "true"}, - {Name: "request_type", Value: "read"}, - {Name: "rpc_type", Value: RPCTypeNetRPC}, - } - - o := store.get(append(metricRPCRequest, expectedLabels[0].Value)) - require.Equal(t, o.labels, expectedLabels) +func (r readReqWithTD) RequestDatacenter() string { + return "dc3" } -// TestRequestRecorder_WriteRequest tests that RequestRecorder can record a write request. -func TestRequestRecorder_WriteRequest(t *testing.T) { - t.Parallel() - - r := RequestRecorder{ - Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), - RecorderFunc: simpleRecorderFunc, - } - - start := time.Now() - - r.Record("B.C", RPCTypeNetRPC, start, writeRequest{}, true) - - expectedLabels := []metrics.Label{ - {Name: "method", Value: "B.C"}, - {Name: "errored", Value: "true"}, - {Name: "request_type", Value: "write"}, - {Name: "rpc_type", Value: RPCTypeNetRPC}, - } - - o := store.get(append(metricRPCRequest, expectedLabels[0].Value)) - require.Equal(t, o.labels, expectedLabels) +func (r readReqWithTD) GetMinQueryIndex() uint64 { + return 1 +} +func (r readReqWithTD) AllowStaleRead() bool { + return false +} + +func (w writeReqWithTD) IsRead() bool { + return false +} + +func (w writeReqWithTD) RequestDatacenter() string { + return "dc2" +} + +type testCase struct { + name string + // description is meant for human friendliness + description string + // requestName is encouraged to be unique across tests to + // avoid lock contention + requestName string + requestI interface{} + rpcType string + errored bool + isLeader func() bool + dc string + // the first element in expectedLabels should be the method name + expectedLabels []metrics.Label +} + +var testCases = []testCase{ + { + name: "simple ok", + description: "This is a simple happy path test case. We check for pass through and normal request processing", + requestName: "A.B", + requestI: struct{}{}, + rpcType: RPCTypeInternal, + errored: false, + dc: "dc1", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "A.B"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "unreported"}, + {Name: "rpc_type", Value: RPCTypeInternal}, + {Name: "leader", Value: "unreported"}, + }, + }, + { + name: "simple ok errored", + description: "Checks that the errored value is populated right.", + requestName: "A.C", + requestI: struct{}{}, + rpcType: "test", + errored: true, + dc: "dc1", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "A.C"}, + {Name: "errored", Value: "true"}, + {Name: "request_type", Value: "unreported"}, + {Name: "rpc_type", Value: "test"}, + {Name: "leader", Value: "unreported"}, + }, + }, + { + name: "read request, rpc type internal", + description: "Checks for read request interface parsing", + requestName: "B.C", + requestI: readRequest{}, + rpcType: RPCTypeInternal, + errored: false, + dc: "dc1", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "B.C"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "read"}, + {Name: "rpc_type", Value: RPCTypeInternal}, + {Name: "leader", Value: "unreported"}, + }, + }, + { + name: "write request, rpc type net/rpc", + description: "Checks for write request interface, different RPC type", + requestName: "D.E", + requestI: writeRequest{}, + rpcType: RPCTypeNetRPC, + errored: false, + dc: "dc1", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "D.E"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "write"}, + {Name: "rpc_type", Value: RPCTypeNetRPC}, + {Name: "leader", Value: "unreported"}, + }, + }, + { + name: "read request with blocking stale and target dc", + description: "Checks for locality, blocking status and target dc", + requestName: "E.F", + requestI: readReqWithTD{}, + rpcType: RPCTypeNetRPC, + errored: false, + dc: "dc1", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "E.F"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "read"}, + {Name: "rpc_type", Value: RPCTypeNetRPC}, + {Name: "leader", Value: "unreported"}, + {Name: "allow_stale", Value: "false"}, + {Name: "blocking", Value: "true"}, + {Name: "target_datacenter", Value: "dc3"}, + {Name: "locality", Value: "forwarded"}, + }, + }, + { + name: "write request with TD, locality local", + description: "Checks for write request with local forwarding and target dc", + requestName: "F.G", + requestI: writeReqWithTD{}, + rpcType: RPCTypeNetRPC, + errored: false, + dc: "dc2", + expectedLabels: []metrics.Label{ + {Name: "method", Value: "F.G"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "write"}, + {Name: "rpc_type", Value: RPCTypeNetRPC}, + {Name: "leader", Value: "unreported"}, + {Name: "target_datacenter", Value: "dc2"}, + {Name: "locality", Value: "local"}, + }, + }, + { + name: "is leader", + description: "checks for is leader", + requestName: "G.H", + requestI: struct{}{}, + rpcType: "test", + errored: false, + isLeader: func() bool { + return true + }, + expectedLabels: []metrics.Label{ + {Name: "method", Value: "G.H"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "unreported"}, + {Name: "rpc_type", Value: "test"}, + {Name: "leader", Value: "true"}, + }, + }, + { + name: "is not leader", + description: "checks for is not leader", + requestName: "H.I", + requestI: struct{}{}, + rpcType: "test", + errored: false, + isLeader: func() bool { + return false + }, + expectedLabels: []metrics.Label{ + {Name: "method", Value: "H.I"}, + {Name: "errored", Value: "false"}, + {Name: "request_type", Value: "unreported"}, + {Name: "rpc_type", Value: "test"}, + {Name: "leader", Value: "false"}, + }, + }, +} + +// TestRequestRecorder goes over all the parsing and reporting that RequestRecorder +// is expected to perform. +func TestRequestRecorder(t *testing.T) { + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + r := RequestRecorder{ + Logger: hclog.NewInterceptLogger(&hclog.LoggerOptions{}), + RecorderFunc: simpleRecorderFunc, + serverIsLeader: tc.isLeader, + localDC: tc.dc, + } + + start := time.Now() + r.Record(tc.requestName, tc.rpcType, start, tc.requestI, tc.errored) + + key := append(metricRPCRequest, tc.expectedLabels[0].Value) + o := store.get(key) + + require.Equal(t, o.key, metricRPCRequest) + require.LessOrEqual(t, o.elapsed, float32(start.Sub(time.Now()).Milliseconds())) + require.Equal(t, o.labels, tc.expectedLabels) + + }) + } } From 005e5f12d802534e2f7e4ba7055e3f9e42dd7e01 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Tue, 12 Apr 2022 13:41:12 -0500 Subject: [PATCH 132/785] deps: update to latest go-discover (#12739) Fixes #11253 $ go mod why -m github.com/dgrijalva/jwt-go # github.com/dgrijalva/jwt-go (main module does not need module github.com/dgrijalva/jwt-go) $ go mod why -m github.com/form3tech-oss/jwt-go # github.com/form3tech-oss/jwt-go github.com/hashicorp/consul/agent github.com/hashicorp/go-discover github.com/hashicorp/go-discover/provider/azure github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/adal github.com/form3tech-oss/jwt-go --- .changelog/12739.txt | 3 +++ go.mod | 2 +- go.sum | 20 +++++++++++++------- 3 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 .changelog/12739.txt diff --git a/.changelog/12739.txt b/.changelog/12739.txt new file mode 100644 index 000000000..83ea33c2c --- /dev/null +++ b/.changelog/12739.txt @@ -0,0 +1,3 @@ +```release-note:improvement +deps: update to latest go-discover to fix vulnerable transitive jwt-go dependency +``` diff --git a/go.mod b/go.mod index 47b494c46..8d7134af3 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-connlimit v0.3.0 - github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 + github.com/hashicorp/go-discover v0.0.0-20220411141802-20db45f7f0f9 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-memdb v1.3.2 github.com/hashicorp/go-multierror v1.1.1 diff --git a/go.sum b/go.sum index fb093ee1e..9311ff481 100644 --- a/go.sum +++ b/go.sum @@ -35,11 +35,13 @@ github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 h1:nSMjYIe24eBYasAIxt859TxyXef/IqoH+8/g4+LmcVs= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU= @@ -49,15 +51,17 @@ github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8K github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss= github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= @@ -143,7 +147,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= @@ -174,6 +177,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.0 h1:Yyrghcw93e1jKo4DTZkRFTTFvBsVhzbblBUPNU1vW6Q= github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -301,8 +306,8 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-connlimit v0.3.0 h1:oAojHGjFxUTTTA8c5XXnDqWJ2HLuWbDiBPTpWvNzvqM= github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= -github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= -github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= +github.com/hashicorp/go-discover v0.0.0-20220411141802-20db45f7f0f9 h1:2GsEkBZf1q4LKZjtd4cO+V0xd85xGCMolX3ebC2+xd4= +github.com/hashicorp/go-discover v0.0.0-20220411141802-20db45f7f0f9/go.mod h1:1xfdKvc3pe5WKxfUUHHOGaKMk7NLGhHY1jkyhKo6098= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -641,6 +646,7 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= From 0c05f6d2e377a809f35ebfe4155417fcea4b4b5b Mon Sep 17 00:00:00 2001 From: FFMMM Date: Tue, 12 Apr 2022 11:53:30 -0700 Subject: [PATCH 133/785] add docs for new labels (#12757) --- website/content/docs/agent/telemetry.mdx | 25 ++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 7296ed208..208e3bc5d 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -537,12 +537,13 @@ Note that values of the `consul.rpc.server.call` may emit as `0 ms`. That means The the server workload metrics above come with the following labels: -| Label Name | Description | Possible values | -| ------------------------------------- | --------------------------------------------------------- | --------------------------------------- | -| `method` | The name of the RPC method. | The value of any RPC request in Consul. | -| `errored` | Indicates whether the RPC call errored. | `True` or `False`. | -| `request_type` | Whether it is a `read` or `write` request. | `read`, `write` or `unreported`. | -| `rpc_type` | The RPC implementation. | `net/rpc` or `internal`. | +| Label Name | Description | Possible values | +| ------------------------------------- | -------------------------------------------------------------------- | --------------------------------------- | +| `method` | The name of the RPC method. | The value of any RPC request in Consul. | +| `errored` | Indicates whether the RPC call errored. | `true` or `false`. | +| `request_type` | Whether it is a `read` or `write` request. | `read`, `write` or `unreported`. | +| `rpc_type` | The RPC implementation. | `net/rpc` or `internal`. | +| `leader` | Whether the server was a `leader` or not at the time of the request. | `true`, `false` or `unreported`. | #### Label Explanations @@ -551,6 +552,18 @@ Historically, `internal` RPC operation metrics were accounted under the same met The `unreported` value for the `request_type` in the table above refers to RPC requests within Consul where it is difficult to ascertain whether a request is `read` or `write` type. +The `unreported` value for the `leader` label in the table above refers to RPC requests where Consul cannot determine the leadership status for a server. + +#### Read Request Labels + +In addition to the labels above, for read requests, the following may be populated: + +| Label Name | Description | Possible values | +| ------------------------------------- | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `blocking` | Whether the read request passed in a `MinQueryIndex`. | `true` if a MinQueryIndex was passed, `false` otherwise. | +| `target_datacenter` | The target datacenter for the read request. | The string value of the target datacenter for the request. | +| `locality` | Gives an indication of whether the RPC request is local or has been forwarded. | `local` if current server data center is the same as `target_datacenter`, otherwise `forwarded`. | + Here is a Prometheus style example of an RPC metric and its labels: From 1dd8e24bd11f0b928c0e9a5a0333510fe62334f7 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Tue, 12 Apr 2022 16:01:35 -0400 Subject: [PATCH 134/785] add changelog for enterprise fix (#12761) --- .changelog/_1728.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/_1728.txt diff --git a/.changelog/_1728.txt b/.changelog/_1728.txt new file mode 100644 index 000000000..b85f9a0ff --- /dev/null +++ b/.changelog/_1728.txt @@ -0,0 +1,3 @@ +```release-note:bug +usagemetrics: **(Enterprise only)** Fix a bug where Consul usage metrics stopped being reported when upgrading servers from 1.10 to 1.11 or later. +``` From bd1e80f0c6e0298c0a4e69f2c2c560ffc2e05f10 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 13 Apr 2022 10:33:31 +0100 Subject: [PATCH 135/785] ui: Don't automatically move rz read-replicas out of the rz (#12740) --- ui/packages/consul-ui/app/services/repository/dc.js | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/ui/packages/consul-ui/app/services/repository/dc.js b/ui/packages/consul-ui/app/services/repository/dc.js index f47dfc683..59887967e 100644 --- a/ui/packages/consul-ui/app/services/repository/dc.js +++ b/ui/packages/consul-ui/app/services/repository/dc.js @@ -130,16 +130,9 @@ export default class DcService extends RepositoryService { // convert the string[] to Server[] Servers: value.Servers.reduce((prev, item) => { const server = body.Servers[item]; - // TODO: It is not currently clear whether we should be - // taking ReadReplicas out of the RedundancyZones when we - // encounter one in a Zone once this is cleared up either - // way we can either remove this comment or make any - // necessary amends here - if(!server.ReadReplica) { - // keep a record of things - grouped.push(server.ID); - prev.push(server); - } + // keep a record of things + grouped.push(server.ID); + prev.push(server); return prev; }, []), } From 83799eaea5d9e7ae2d76eb5d56062231e07e4702 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 13 Apr 2022 10:34:09 +0100 Subject: [PATCH 136/785] ui: Show read-replica health status (#12758) --- .../consul-ui/app/components/consul/server/card/index.hbs | 2 -- 1 file changed, 2 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/server/card/index.hbs b/ui/packages/consul-ui/app/components/consul/server/card/index.hbs index 333ebd667..7c838d829 100644 --- a/ui/packages/consul-ui/app/components/consul/server/card/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/server/card/index.hbs @@ -24,7 +24,6 @@ {{@item.Name}} -{{#if (not @item.ReadReplica)}}
{{if (contains @item.Status (array 'leader' 'voter')) 'Active voter' 'Backup voter'}} -{{/if}}
From ef4a250203f3d809254918ebcb6a5a0407537ca1 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Wed, 13 Apr 2022 10:42:32 -0400 Subject: [PATCH 137/785] add changelog for enterprise bug fix (#12772) --- .changelog/_1737.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/_1737.txt diff --git a/.changelog/_1737.txt b/.changelog/_1737.txt new file mode 100644 index 000000000..ec5b4ed53 --- /dev/null +++ b/.changelog/_1737.txt @@ -0,0 +1,3 @@ +```release-note:bug +namespace: **(Enterprise Only)** Unreserve `consul` namespace to allow K8s namespace mirroring when deploying in `consul` K8s namespace . +``` From 4d57b7d5e036a5a1dd9ab42c50e89dd907b604bc Mon Sep 17 00:00:00 2001 From: John Murret Date: Wed, 13 Apr 2022 09:24:35 -0600 Subject: [PATCH 138/785] Setting DOCKER_DEFAULT_PLATFORM in make dev-docker so arm64 can build an amd64 containerwith and amd64 binary. (#12769) --- GNUmakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GNUmakefile b/GNUmakefile index 2fd96a9ec..665f1d0a1 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -158,7 +158,7 @@ dev-docker: linux @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" - @docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile + @DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile # In CircleCI, the linux binary will be attached from a previous step at bin/. This make target # should only run in CI and not locally. From b062f8c2aa3aa98ad7080ce107d3ee90c881824a Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 13 Apr 2022 11:45:25 -0400 Subject: [PATCH 139/785] Implement routing and intentions for AWS Lambdas --- agent/proxycfg/testing_terminating_gateway.go | 45 +- agent/xds/serverless_plugin_oss_test.go | 13 +- agent/xds/serverlessplugin/lambda_patcher.go | 24 +- ...with-service-resolvers.envoy-1-20-x.golden | 249 ++++++++++ ...with-service-resolvers.envoy-1-20-x.golden | 439 ++++++++++++++++++ ...da-terminating-gateway.envoy-1-20-x.golden | 11 +- ...with-service-resolvers.envoy-1-20-x.golden | 76 +++ agent/xds/xdscommon/xdscommon.go | 8 + agent/xds/xdscommon/xdscommon_oss_test.go | 12 +- 9 files changed, 852 insertions(+), 25 deletions(-) create mode 100644 agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden create mode 100644 agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden diff --git a/agent/proxycfg/testing_terminating_gateway.go b/agent/proxycfg/testing_terminating_gateway.go index 60f27ca84..38c0245a2 100644 --- a/agent/proxycfg/testing_terminating_gateway.go +++ b/agent/proxycfg/testing_terminating_gateway.go @@ -666,20 +666,41 @@ func TestConfigSnapshotTerminatingGatewayIgnoreExtraResolvers(t testing.T) *Conf }) } -func TestConfigSnapshotTerminatingGatewayWithLambdaService(t testing.T) *ConfigSnapshot { +func TestConfigSnapshotTerminatingGatewayWithLambdaService(t testing.T, extraUpdateEvents ...agentcache.UpdateEvent) *ConfigSnapshot { web := structs.NewServiceName("web", nil) - return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{ - { - CorrelationID: serviceConfigIDPrefix + web.String(), - Result: &structs.ServiceConfigResponse{ - ProxyConfig: map[string]interface{}{"protocol": "http"}, - Meta: map[string]string{ - "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", - "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "lambda-arn", - "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "true", - "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "us-east-1", - }, + updateEvents := append(extraUpdateEvents, agentcache.UpdateEvent{ + CorrelationID: serviceConfigIDPrefix + web.String(), + Result: &structs.ServiceConfigResponse{ + ProxyConfig: map[string]interface{}{"protocol": "http"}, + Meta: map[string]string{ + "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "lambda-arn", + "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "us-east-1", }, }, }) + return TestConfigSnapshotTerminatingGateway(t, true, nil, updateEvents) +} + +func TestConfigSnapshotTerminatingGatewayWithLambdaServiceAndServiceResolvers(t testing.T) *ConfigSnapshot { + web := structs.NewServiceName("web", nil) + + return TestConfigSnapshotTerminatingGatewayWithLambdaService(t, + agentcache.UpdateEvent{ + CorrelationID: serviceResolverIDPrefix + web.String(), + Result: &structs.IndexedConfigEntries{ + Kind: structs.ServiceResolver, + Entries: []structs.ConfigEntry{ + &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: web.String(), + Subsets: map[string]structs.ServiceResolverSubset{ + "canary1": {}, + "canary2": {}, + }, + }, + }, + }, + }) } diff --git a/agent/xds/serverless_plugin_oss_test.go b/agent/xds/serverless_plugin_oss_test.go index 5fbfdc1c6..0f6fdd382 100644 --- a/agent/xds/serverless_plugin_oss_test.go +++ b/agent/xds/serverless_plugin_oss_test.go @@ -10,6 +10,7 @@ import ( envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/golang/protobuf/proto" testinf "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" @@ -27,8 +28,14 @@ func TestServerlessPluginFromSnapshot(t *testing.T) { create func(t testinf.T) *proxycfg.ConfigSnapshot }{ { - name: "lambda-terminating-gateway", - create: proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaService, + name: "lambda-terminating-gateway", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaService(t) + }, + }, + { + name: "lambda-terminating-gateway-with-service-resolvers", + create: proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaServiceAndServiceResolvers, }, } @@ -85,7 +92,7 @@ func TestServerlessPluginFromSnapshot(t *testing.T) { key: xdscommon.RouteType, sorter: func(msgs []proto.Message) func(int, int) bool { return func(i, j int) bool { - return msgs[i].(*envoy_listener_v3.Listener).Name < msgs[j].(*envoy_listener_v3.Listener).Name + return msgs[i].(*envoy_route_v3.RouteConfiguration).Name < msgs[j].(*envoy_route_v3.RouteConfiguration).Name } }, }, diff --git a/agent/xds/serverlessplugin/lambda_patcher.go b/agent/xds/serverlessplugin/lambda_patcher.go index c5d54d9cf..415ae822f 100644 --- a/agent/xds/serverlessplugin/lambda_patcher.go +++ b/agent/xds/serverlessplugin/lambda_patcher.go @@ -167,7 +167,7 @@ func (p lambdaPatcher) PatchFilter(filter *envoy_listener_v3.Filter) (*envoy_lis if config == nil { return filter, false, errors.New("error unmarshalling filter") } - httpFilter, err := makeEnvoyHTTPFilter( + lambdaHttpFilter, err := makeEnvoyHTTPFilter( "envoy.filters.http.aws_lambda", &envoy_lambda_v3.Config{ Arn: p.arn, @@ -179,10 +179,26 @@ func (p lambdaPatcher) PatchFilter(filter *envoy_listener_v3.Filter) (*envoy_lis return filter, false, err } - config.HttpFilters = []*envoy_http_v3.HttpFilter{ - httpFilter, - {Name: "envoy.filters.http.router"}, + var ( + changedFilters = make([]*envoy_http_v3.HttpFilter, 0, len(config.HttpFilters)+1) + changed bool + ) + + // We need to be careful about overwriting http filters completely because + // http filters validates intentions with the RBAC filter. This inserts the + // lambda filter before `envoy.filters.http.router` while keeping everything + // else intact. + for _, httpFilter := range config.HttpFilters { + if httpFilter.Name == "envoy.filters.http.router" { + changedFilters = append(changedFilters, lambdaHttpFilter) + changed = true + } + changedFilters = append(changedFilters, httpFilter) } + if changed { + config.HttpFilters = changedFilters + } + config.StripPortMode = &envoy_http_v3.HttpConnectionManager_StripAnyHostPort{ StripAnyHostPort: true, } diff --git a/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden new file mode 100644 index 000000000..13cde2ac9 --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/clusters/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden @@ -0,0 +1,249 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "api.altdomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "filename": "api.cert.pem" + }, + "privateKey": { + "filename": "api.key.pem" + } + } + ], + "validationContext": { + "trustedCa": { + "filename": "ca.cert.pem" + } + } + } + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "cache.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "lambda.us-east-1.amazonaws.com", + "portValue": 443 + } + } + } + } + ] + } + ] + }, + "dnsLookupFamily": "V4_ONLY", + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "sni": "*.amazonaws.com" + } + }, + "metadata": { + "filterMetadata": { + "com.amazonaws.lambda": { + "egress_gateway": true + } + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "lambda.us-east-1.amazonaws.com", + "portValue": 443 + } + } + } + } + ] + } + ] + }, + "dnsLookupFamily": "V4_ONLY", + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "sni": "*.amazonaws.com" + } + }, + "metadata": { + "filterMetadata": { + "com.amazonaws.lambda": { + "egress_gateway": true + } + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "db.mydomain", + "portValue": 8081 + } + } + }, + "healthStatus": "UNHEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "lambda.us-east-1.amazonaws.com", + "portValue": 443 + } + } + } + } + ] + } + ] + }, + "dnsLookupFamily": "V4_ONLY", + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "sni": "*.amazonaws.com" + } + }, + "metadata": { + "filterMetadata": { + "com.amazonaws.lambda": { + "egress_gateway": true + } + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden new file mode 100644 index 000000000..a64481e5e --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden @@ -0,0 +1,439 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.web.default.default.dc1", + "rds": { + "configSource": { + "ads": { + + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.aws_lambda", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config", + "arn": "lambda-arn" + } + }, + { + "name": "envoy.filters.http.router" + } + ], + "tracing": { + "randomSampling": { + + } + }, + "stripAnyHostPort": true + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.web.default.default.dc1", + "rds": { + "configSource": { + "ads": { + + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.aws_lambda", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config", + "arn": "lambda-arn" + } + }, + { + "name": "envoy.filters.http.router" + } + ], + "tracing": { + "randomSampling": { + + } + }, + "stripAnyHostPort": true + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.web.default.default.dc1", + "rds": { + "configSource": { + "ads": { + + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.aws_lambda", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config", + "arn": "lambda-arn" + } + }, + { + "name": "envoy.filters.http.router" + } + ], + "tracing": { + "randomSampling": { + + } + }, + "stripAnyHostPort": true + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster" + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector" + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden index d412ef5ce..dea85717f 100644 --- a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.envoy-1-20-x.golden @@ -195,6 +195,15 @@ "routeConfigName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" }, "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, { "name": "envoy.filters.http.aws_lambda", "typedConfig": { @@ -269,4 +278,4 @@ ], "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", "nonce": "00000001" -} +} \ No newline at end of file diff --git a/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden b/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden new file mode 100644 index 000000000..d7ed2ec78 --- /dev/null +++ b/agent/xds/testdata/serverless_plugin/routes/lambda-terminating-gateway-with-service-resolvers.envoy-1-20-x.golden @@ -0,0 +1,76 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + }, + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + }, + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + } + ], + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/xdscommon/xdscommon.go b/agent/xds/xdscommon/xdscommon.go index b1b7da121..0cccba7d8 100644 --- a/agent/xds/xdscommon/xdscommon.go +++ b/agent/xds/xdscommon/xdscommon.go @@ -109,6 +109,14 @@ func MakePluginConfiguration(cfgSnap *proxycfg.ConfigSnapshot) PluginConfigurati envoyID := proxycfg.NewUpstreamIDFromServiceName(svc) envoyIDMappings[envoyID.EnvoyID()] = compoundServiceName + + resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc] + if hasResolver { + for subsetName := range resolver.Subsets { + sni := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, trustDomain) + sniMappings[sni] = compoundServiceName + } + } } } diff --git a/agent/xds/xdscommon/xdscommon_oss_test.go b/agent/xds/xdscommon/xdscommon_oss_test.go index c92be3ba5..a1182f201 100644 --- a/agent/xds/xdscommon/xdscommon_oss_test.go +++ b/agent/xds/xdscommon/xdscommon_oss_test.go @@ -13,7 +13,7 @@ import ( ) func TestMakePluginConfiguration_TerminatingGateway(t *testing.T) { - snap := proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaService(t) + snap := proxycfg.TestConfigSnapshotTerminatingGatewayWithLambdaServiceAndServiceResolvers(t) webService := api.CompoundServiceName{ Name: "web", @@ -59,10 +59,12 @@ func TestMakePluginConfiguration_TerminatingGateway(t *testing.T) { }, }, SNIToServiceName: map[string]api.CompoundServiceName{ - "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": apiService, - "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": cacheService, - "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": dbService, - "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": webService, + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": apiService, + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": cacheService, + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": dbService, + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": webService, + "canary1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": webService, + "canary2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": webService, }, EnvoyIDToServiceName: map[string]api.CompoundServiceName{ "web": webService, From 5eea62b47a9e3ae499cc7bd3364556a70831592d Mon Sep 17 00:00:00 2001 From: Paul Glass Date: Wed, 13 Apr 2022 14:31:37 -0500 Subject: [PATCH 140/785] acl: Adjust region handling in AWS IAM auth method (#12774) * acl: Adjust region handling in AWS IAM auth method --- .changelog/12774.txt | 3 + agent/consul/authmethod/awsauth/aws.go | 4 - agent/consul/authmethod/awsauth/aws_test.go | 13 +-- internal/iamauth/config.go | 13 ++- internal/iamauth/token.go | 78 +++++++++++-- internal/iamauth/token_test.go | 119 ++++++++++++++++++++ internal/iamauth/util.go | 23 +--- 7 files changed, 212 insertions(+), 41 deletions(-) create mode 100644 .changelog/12774.txt diff --git a/.changelog/12774.txt b/.changelog/12774.txt new file mode 100644 index 000000000..d5aca735d --- /dev/null +++ b/.changelog/12774.txt @@ -0,0 +1,3 @@ +```release-note:improvement +acl: Improve handling of region-specific endpoints in the AWS IAM auth method. As part of this, the `STSRegion` field was removed from the auth method config. +``` diff --git a/agent/consul/authmethod/awsauth/aws.go b/agent/consul/authmethod/awsauth/aws.go index 32320e3f7..f3995cdc5 100644 --- a/agent/consul/authmethod/awsauth/aws.go +++ b/agent/consul/authmethod/awsauth/aws.go @@ -57,9 +57,6 @@ type Config struct { // STSEndpoint is the AWS STS endpoint where sts:GetCallerIdentity requests will be sent. // Note that the Host header in a signed request cannot be changed. STSEndpoint string `json:",omitempty"` - // STSRegion is the region for the AWS STS service. This should only be set if STSEndpoint - // is set, and must match the region of the STSEndpoint. - STSRegion string `json:",omitempty"` // AllowedSTSHeaderValues is a list of additional allowed headers on the sts:GetCallerIdentity // request in the bearer token. A default list of necessary headers is allowed in any case. @@ -75,7 +72,6 @@ func (c *Config) convertForLibrary() *iamauth.Config { MaxRetries: c.MaxRetries, IAMEndpoint: c.IAMEndpoint, STSEndpoint: c.STSEndpoint, - STSRegion: c.STSRegion, AllowedSTSHeaderValues: c.AllowedSTSHeaderValues, ServerIDHeaderName: IAMServerIDHeaderName, diff --git a/agent/consul/authmethod/awsauth/aws_test.go b/agent/consul/authmethod/awsauth/aws_test.go index 8ee507692..3025275cf 100644 --- a/agent/consul/authmethod/awsauth/aws_test.go +++ b/agent/consul/authmethod/awsauth/aws_test.go @@ -24,9 +24,8 @@ func TestNewValidator(t *testing.T) { IAMEntityTags: []string{"tag-1"}, ServerIDHeaderValue: "x-some-header", MaxRetries: 3, - IAMEndpoint: "iam-endpoint", - STSEndpoint: "sts-endpoint", - STSRegion: "sts-region", + IAMEndpoint: "http://iam-endpoint", + STSEndpoint: "http://sts-endpoint", AllowedSTSHeaderValues: []string{"header-value"}, ServerIDHeaderName: "X-Consul-IAM-ServerID", GetEntityMethodHeader: "X-Consul-IAM-GetEntity-Method", @@ -44,9 +43,8 @@ func TestNewValidator(t *testing.T) { "IAMEntityTags": []string{"tag-1"}, "ServerIDHeaderValue": "x-some-header", "MaxRetries": 3, - "IAMEndpoint": "iam-endpoint", - "STSEndpoint": "sts-endpoint", - "STSRegion": "sts-region", + "IAMEndpoint": "http://iam-endpoint", + "STSEndpoint": "http://sts-endpoint", "AllowedSTSHeaderValues": []string{"header-value"}, } @@ -224,7 +222,6 @@ func setup(t *testing.T, config map[string]interface{}, server *iamauthtest.Serv fakeAws := iamauthtest.NewTestServer(t, server) config["STSEndpoint"] = fakeAws.URL + "/sts" - config["STSRegion"] = "fake-region" config["IAMEndpoint"] = fakeAws.URL + "/iam" method := &structs.ACLAuthMethod{ @@ -241,7 +238,7 @@ func setup(t *testing.T, config map[string]interface{}, server *iamauthtest.Serv Creds: credentials.NewStaticCredentials("fake", "fake", ""), IncludeIAMEntity: v.config.EnableIAMEntityDetails, STSEndpoint: v.config.STSEndpoint, - STSRegion: v.config.STSRegion, + STSRegion: "fake-region", Logger: nullLogger, ServerIDHeaderValue: v.config.ServerIDHeaderValue, ServerIDHeaderName: v.config.ServerIDHeaderName, diff --git a/internal/iamauth/config.go b/internal/iamauth/config.go index a8a6b61d5..d3c722c55 100644 --- a/internal/iamauth/config.go +++ b/internal/iamauth/config.go @@ -15,7 +15,6 @@ type Config struct { MaxRetries int IAMEndpoint string STSEndpoint string - STSRegion string AllowedSTSHeaderValues []string // Customizable header names @@ -65,5 +64,17 @@ func (c *Config) Validate() error { "GetEntityHeadersHeader, and GetEntityBodyHeader when EnableIAMEntityDetails=true") } + if c.STSEndpoint != "" { + if _, err := parseUrl(c.STSEndpoint); err != nil { + return fmt.Errorf("STSEndpoint is invalid: %s", err) + } + } + + if c.IAMEndpoint != "" { + if _, err := parseUrl(c.IAMEndpoint); err != nil { + return fmt.Errorf("IAMEndpoint is invalid: %s", err) + } + } + return nil } diff --git a/internal/iamauth/token.go b/internal/iamauth/token.go index 91994b510..10422ca6c 100644 --- a/internal/iamauth/token.go +++ b/internal/iamauth/token.go @@ -13,9 +13,7 @@ import ( ) const ( - amzHeaderPrefix = "X-Amz-" - defaultIAMEndpoint = "https://iam.amazonaws.com" - defaultSTSEndpoint = "https://sts.amazonaws.com" + amzHeaderPrefix = "X-Amz-" ) var defaultAllowedSTSRequestHeaders = []string{ @@ -98,6 +96,10 @@ func NewBearerToken(loginToken string, config *Config) (*BearerToken, error) { token.getIAMEntityHeader = header token.parsedIAMEntityURL = parsedUrl + if err := token.validateIAMHostname(); err != nil { + return nil, err + } + reqType, err := token.validateIAMEntityBody() if err != nil { return nil, err @@ -112,6 +114,9 @@ func (t *BearerToken) validate() error { if t.getCallerIdentityMethod != "POST" { return fmt.Errorf("iam_http_request_method must be POST") } + if err := t.validateSTSHostname(); err != nil { + return err + } if err := t.validateGetCallerIdentityBody(); err != nil { return err } @@ -121,6 +126,62 @@ func (t *BearerToken) validate() error { return nil } +// validateSTSHostname checks the CallerIdentityURL in the BearerToken +// either matches the admin configured STSEndpoint or, if STSEndpoint is not set, +// that the URL matches a known Amazon AWS hostname for the STS service, one of: +// +// sts.amazonaws.com +// sts.*.amazonaws.com +// sts-fips.amazonaws.com +// sts-fips.*.amazonaws.com +// +// See https://docs.aws.amazon.com/general/latest/gr/sts.html +func (t *BearerToken) validateSTSHostname() error { + if t.config.STSEndpoint != "" { + // If an STS endpoint is configured, we (elsewhere) send the request to that endpoint. + return nil + } + if t.parsedCallerIdentityURL == nil { + return fmt.Errorf("invalid GetCallerIdentity URL: %v", t.getCallerIdentityURL) + } + + // Otherwise, validate the hostname looks like a known STS endpoint. + host := t.parsedCallerIdentityURL.Hostname() + if strings.HasSuffix(host, ".amazonaws.com") && + (strings.HasPrefix(host, "sts.") || strings.HasPrefix(host, "sts-fips.")) { + return nil + } + return fmt.Errorf("invalid STS hostname: %q", host) +} + +// validateIAMHostname checks the IAMEntityURL in the BearerToken +// either matches the admin configured IAMEndpoint or, if IAMEndpoint is not set, +// that the URL matches a known Amazon AWS hostname for the IAM service, one of: +// +// iam.amazonaws.com +// iam.*.amazonaws.com +// iam-fips.amazonaws.com +// iam-fips.*.amazonaws.com +// +// See https://docs.aws.amazon.com/general/latest/gr/iam-service.html +func (t *BearerToken) validateIAMHostname() error { + if t.config.IAMEndpoint != "" { + // If an IAM endpoint is configured, we (elsewhere) send the request to that endpoint. + return nil + } + if t.parsedIAMEntityURL == nil { + return fmt.Errorf("invalid IAM URL: %v", t.getIAMEntityURL) + } + + // Otherwise, validate the hostname looks like a known IAM endpoint. + host := t.parsedIAMEntityURL.Hostname() + if strings.HasSuffix(host, ".amazonaws.com") && + (strings.HasPrefix(host, "iam.") || strings.HasPrefix(host, "iam-fips.")) { + return nil + } + return fmt.Errorf("invalid IAM hostname: %q", host) +} + // https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1439 func (t *BearerToken) validateGetCallerIdentityBody() error { allowedValues := url.Values{ @@ -265,7 +326,7 @@ func parseUrl(s string) (*url.URL, error) { return nil, err } // url.Parse doesn't error on empty string - if u == nil || u.Scheme == "" || u.Host == "" || u.Path == "" { + if u == nil || u.Scheme == "" || u.Host == "" { return nil, fmt.Errorf("url is invalid: %q", s) } return u, nil @@ -275,10 +336,9 @@ func parseUrl(s string) (*url.URL, error) { // from the bearer token. func (t *BearerToken) GetCallerIdentityRequest() (*http.Request, error) { // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy - // The protection against this is that this method will only call the endpoint specified in the - // client config (defaulting to sts.amazonaws.com), so it would require an admin to override - // the endpoint to talk to alternate web addresses - endpoint := defaultSTSEndpoint + // We validate up-front that t.getCallerIdentityURL is a known AWS STS hostname. + // Otherwise, we send to the admin-configured STSEndpoint. + endpoint := t.getCallerIdentityURL if t.config.STSEndpoint != "" { endpoint = t.config.STSEndpoint } @@ -295,7 +355,7 @@ func (t *BearerToken) GetCallerIdentityRequest() (*http.Request, error) { // GetEntityRequest returns the iam:GetUser or iam:GetRole request from the request details, // if present, embedded in the headers of the sts:GetCallerIdentity request. func (t *BearerToken) GetEntityRequest() (*http.Request, error) { - endpoint := defaultIAMEndpoint + endpoint := t.getIAMEntityURL if t.config.IAMEndpoint != "" { endpoint = t.config.IAMEndpoint } diff --git a/internal/iamauth/token_test.go b/internal/iamauth/token_test.go index 4de7ba715..42f81151d 100644 --- a/internal/iamauth/token_test.go +++ b/internal/iamauth/token_test.go @@ -27,6 +27,7 @@ func TestNewBearerToken(t *testing.T) { GetEntityURLHeader: "X-Consul-IAM-GetEntity-URL", GetEntityHeadersHeader: "X-Consul-IAM-GetEntity-Headers", GetEntityBodyHeader: "X-Consul-IAM-GetEntity-Body", + STSEndpoint: validBearerTokenParsed.getCallerIdentityURL, }, expToken: validBearerTokenWithRoleParsed, }, @@ -268,6 +269,124 @@ func TestValidateIAMEntityBody(t *testing.T) { } } +func TestValidateSTSHostname(t *testing.T) { + cases := []struct { + url string + ok bool + }{ + // https://docs.aws.amazon.com/general/latest/gr/sts.html + {"sts.us-east-2.amazonaws.com", true}, + {"sts-fips.us-east-2.amazonaws.com", true}, + {"sts.us-east-1.amazonaws.com", true}, + {"sts-fips.us-east-1.amazonaws.com", true}, + {"sts.us-west-1.amazonaws.com", true}, + {"sts-fips.us-west-1.amazonaws.com", true}, + {"sts.us-west-2.amazonaws.com", true}, + {"sts-fips.us-west-2.amazonaws.com", true}, + {"sts.af-south-1.amazonaws.com", true}, + {"sts.ap-east-1.amazonaws.com", true}, + {"sts.ap-southeast-3.amazonaws.com", true}, + {"sts.ap-south-1.amazonaws.com", true}, + {"sts.ap-northeast-3.amazonaws.com", true}, + {"sts.ap-northeast-2.amazonaws.com", true}, + {"sts.ap-southeast-1.amazonaws.com", true}, + {"sts.ap-southeast-2.amazonaws.com", true}, + {"sts.ap-northeast-1.amazonaws.com", true}, + {"sts.ca-central-1.amazonaws.com", true}, + {"sts.eu-central-1.amazonaws.com", true}, + {"sts.eu-west-1.amazonaws.com", true}, + {"sts.eu-west-2.amazonaws.com", true}, + {"sts.eu-south-1.amazonaws.com", true}, + {"sts.eu-west-3.amazonaws.com", true}, + {"sts.eu-north-1.amazonaws.com", true}, + {"sts.me-south-1.amazonaws.com", true}, + {"sts.sa-east-1.amazonaws.com", true}, + {"sts.us-gov-east-1.amazonaws.com", true}, + {"sts.us-gov-west-1.amazonaws.com", true}, + + // prefix must be either 'sts.' or 'sts-fips.' + {".amazonaws.com", false}, + {"iam.amazonaws.com", false}, + {"other.amazonaws.com", false}, + // suffix must be '.amazonaws.com' and not some other domain + {"stsamazonaws.com", false}, + {"sts-fipsamazonaws.com", false}, + {"sts.stsamazonaws.com", false}, + {"sts.notamazonaws.com", false}, + {"sts-fips.stsamazonaws.com", false}, + {"sts-fips.notamazonaws.com", false}, + {"sts.amazonaws.com.spoof", false}, + {"sts.amazonaws.spoof.com", false}, + {"xyz.sts.amazonaws.com", false}, + } + for _, c := range cases { + t.Run(c.url, func(t *testing.T) { + url := "https://" + c.url + parsedUrl, err := parseUrl(url) + require.NoError(t, err) + + token := &BearerToken{ + config: &Config{}, + getCallerIdentityURL: url, + parsedCallerIdentityURL: parsedUrl, + } + err = token.validateSTSHostname() + if c.ok { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestValidateIAMHostname(t *testing.T) { + cases := []struct { + url string + ok bool + }{ + // https://docs.aws.amazon.com/general/latest/gr/iam-service.html + {"iam.amazonaws.com", true}, + {"iam-fips.amazonaws.com", true}, + {"iam.us-gov.amazonaws.com", true}, + {"iam-fips.us-gov.amazonaws.com", true}, + + // prefix must be either 'iam.' or 'aim-fips.' + {".amazonaws.com", false}, + {"sts.amazonaws.com", false}, + {"other.amazonaws.com", false}, + // suffix must be '.amazonaws.com' and not some other domain + {"iamamazonaws.com", false}, + {"iam-fipsamazonaws.com", false}, + {"iam.iamamazonaws.com", false}, + {"iam.notamazonaws.com", false}, + {"iam-fips.iamamazonaws.com", false}, + {"iam-fips.notamazonaws.com", false}, + {"iam.amazonaws.com.spoof", false}, + {"iam.amazonaws.spoof.com", false}, + {"xyz.iam.amazonaws.com", false}, + } + for _, c := range cases { + t.Run(c.url, func(t *testing.T) { + url := "https://" + c.url + parsedUrl, err := parseUrl(url) + require.NoError(t, err) + + token := &BearerToken{ + config: &Config{}, + getCallerIdentityURL: url, + parsedIAMEntityURL: parsedUrl, + } + err = token.validateIAMHostname() + if c.ok { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + var ( validBearerTokenJson = `{ "iam_http_request_method":"POST", diff --git a/internal/iamauth/util.go b/internal/iamauth/util.go index bfd5f22d7..b92270cfd 100644 --- a/internal/iamauth/util.go +++ b/internal/iamauth/util.go @@ -39,12 +39,10 @@ type LoginInput struct { func GenerateLoginData(in *LoginInput) (map[string]interface{}, error) { cfg := aws.Config{ Credentials: in.Creds, - Region: aws.String(in.STSRegion), - } - if in.STSEndpoint != "" { - cfg.Endpoint = aws.String(in.STSEndpoint) - } else { - cfg.EndpointResolver = endpoints.ResolverFunc(stsSigningResolver) + // These are empty strings by default (i.e. not enabled) + Region: aws.String(in.STSRegion), + Endpoint: aws.String(in.STSEndpoint), + STSRegionalEndpoint: endpoints.RegionalSTSEndpoint, } stsSession, err := session.NewSessionWithOptions(session.Options{Config: cfg}) @@ -102,19 +100,6 @@ func GenerateLoginData(in *LoginInput) (map[string]interface{}, error) { }, nil } -// STS is a really weird service that used to only have global endpoints but now has regional endpoints as well. -// For backwards compatibility, even if you request a region other than us-east-1, it'll still sign for us-east-1. -// See, e.g., https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code -// So we have to shim in this EndpointResolver to force it to sign for the right region -func stsSigningResolver(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { - defaultEndpoint, err := endpoints.DefaultResolver().EndpointFor(service, region, optFns...) - if err != nil { - return defaultEndpoint, err - } - defaultEndpoint.SigningRegion = region - return defaultEndpoint, nil -} - func formatSignedEntityRequest(svc *sts.STS, in *LoginInput) (*request.Request, error) { // We need to retrieve the IAM user or role for the iam:GetRole or iam:GetUser request. // GetCallerIdentity returns this and requires no permissions. From 199f1c72003716ada9424bb9000993ae964ff038 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 12 Apr 2022 10:44:22 -0700 Subject: [PATCH 141/785] Fix namespace default field names in expanded token output --- agent/consul/acl_endpoint.go | 6 ++++++ api/acl.go | 4 ++-- command/acl/token/formatter.go | 6 +++--- command/acl/token/formatter_test.go | 10 +++++----- .../testdata/FormatTokenExpanded/oss/basic.json.golden | 4 ++-- .../FormatTokenExpanded/oss/complex.json.golden | 4 ++-- 6 files changed, 20 insertions(+), 14 deletions(-) diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index 77ca6edf3..658f72a42 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -381,6 +381,9 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok if err != nil { return tokenInfo, err } + if role == nil { + continue + } for _, policy := range role.Policies { policyIDs[policy.ID] = struct{}{} @@ -404,6 +407,9 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok if err != nil { return tokenInfo, err } + if policy == nil { + continue + } policies = append(policies, policy) } for _, policy := range identityPolicies { diff --git a/api/acl.go b/api/acl.go index 9989a50b2..bd6d82563 100644 --- a/api/acl.go +++ b/api/acl.go @@ -66,8 +66,8 @@ type ACLTokenExpanded struct { ExpandedPolicies []ACLPolicy ExpandedRoles []ACLRole - NamespaceDefaultPolicies []string - NamespaceDefaultRoles []string + NamespaceDefaultPolicyIDs []string + NamespaceDefaultRoleIDs []string AgentACLDefaultPolicy string AgentACLDownPolicy string diff --git a/command/acl/token/formatter.go b/command/acl/token/formatter.go index cc5671002..7844c9cc4 100644 --- a/command/acl/token/formatter.go +++ b/command/acl/token/formatter.go @@ -239,17 +239,17 @@ func (f *prettyFormatter) FormatTokenExpanded(token *api.ACLTokenExpanded) (stri buffer.WriteString("=== End of Authorizer Layer 0: Token ===\n") - if len(token.NamespaceDefaultPolicies) > 0 || len(token.NamespaceDefaultRoles) > 0 { + if len(token.NamespaceDefaultPolicyIDs) > 0 || len(token.NamespaceDefaultRoleIDs) > 0 { buffer.WriteString("=== Start of Authorizer Layer 1: Token Namespace’s Defaults (Inherited) ===\n") buffer.WriteString(fmt.Sprintf("Description: ACL Roles inherited by all Tokens in Namespace %q\n\n", token.Namespace)) buffer.WriteString("Namespace Policy Defaults:\n") - for _, policyID := range token.NamespaceDefaultPolicies { + for _, policyID := range token.NamespaceDefaultPolicyIDs { formatPolicy(policies[policyID], WHITESPACE_2) } buffer.WriteString("Namespace Role Defaults:\n") - for _, roleID := range token.NamespaceDefaultRoles { + for _, roleID := range token.NamespaceDefaultRoleIDs { formatRole(roles[roleID], WHITESPACE_2) } diff --git a/command/acl/token/formatter_test.go b/command/acl/token/formatter_test.go index ba93e9dc0..aafe1fcfb 100644 --- a/command/acl/token/formatter_test.go +++ b/command/acl/token/formatter_test.go @@ -408,11 +408,11 @@ var expandedTokenTestCases = map[string]testCase{ }, }, }, - NamespaceDefaultPolicies: []string{"2b582ff1-4a43-457f-8a2b-30a8265e29a5"}, - NamespaceDefaultRoles: []string{"56033f2b-e1a6-4905-b71d-e011c862bc65"}, - AgentACLDefaultPolicy: "deny", - AgentACLDownPolicy: "extend-cache", - ResolvedByAgent: "server-1", + NamespaceDefaultPolicyIDs: []string{"2b582ff1-4a43-457f-8a2b-30a8265e29a5"}, + NamespaceDefaultRoleIDs: []string{"56033f2b-e1a6-4905-b71d-e011c862bc65"}, + AgentACLDefaultPolicy: "deny", + AgentACLDownPolicy: "extend-cache", + ResolvedByAgent: "server-1", ACLToken: api.ACLToken{ AccessorID: "fbd2447f-7479-4329-ad13-b021d74f86ba", SecretID: "869c6e91-4de9-4dab-b56e-87548435f9c6", diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden index cba80e455..d03e47d64 100644 --- a/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/basic.json.golden @@ -22,8 +22,8 @@ } ], "ExpandedRoles": null, - "NamespaceDefaultPolicies": null, - "NamespaceDefaultRoles": null, + "NamespaceDefaultPolicyIDs": null, + "NamespaceDefaultRoleIDs": null, "AgentACLDefaultPolicy": "allow", "AgentACLDownPolicy": "deny", "ResolvedByAgent": "leader", diff --git a/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden index 36931e219..b0ed45c0d 100644 --- a/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden +++ b/command/acl/token/testdata/FormatTokenExpanded/oss/complex.json.golden @@ -133,10 +133,10 @@ "ModifyIndex": 0 } ], - "NamespaceDefaultPolicies": [ + "NamespaceDefaultPolicyIDs": [ "2b582ff1-4a43-457f-8a2b-30a8265e29a5" ], - "NamespaceDefaultRoles": [ + "NamespaceDefaultRoleIDs": [ "56033f2b-e1a6-4905-b71d-e011c862bc65" ], "AgentACLDefaultPolicy": "deny", From edfbbf1063d7aabefd19b4dbb39330ba4698e7c0 Mon Sep 17 00:00:00 2001 From: FFMMM Date: Wed, 13 Apr 2022 17:38:31 -0700 Subject: [PATCH 142/785] Update latest version on website to 1.11.5 Per the latest release --- website/data/version.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/data/version.js b/website/data/version.js index 27580004a..a5beaddf6 100644 --- a/website/data/version.js +++ b/website/data/version.js @@ -1 +1 @@ -export default '1.11.4' +export default '1.11.5' From 769d1d6e8e8c2e9ee34d310e9ea2f4745ebb3eb3 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Thu, 14 Apr 2022 14:26:14 +0100 Subject: [PATCH 143/785] ConnectCA.Sign gRPC Endpoint (#12787) Introduces a gRPC endpoint for signing Connect leaf certificates. It's also the first of the public gRPC endpoints to perform leader-forwarding, so establishes the pattern of forwarding over the multiplexed internal RPC port. --- .changelog/12787.txt | 3 + agent/connect/csr.go | 22 ++ agent/consul/connect_ca_endpoint.go | 2 +- agent/consul/grpc_integration_test.go | 84 ++++++ agent/consul/leader_connect_ca.go | 10 +- agent/consul/server.go | 44 ++- .../services/connectca/mock_ACLResolver.go | 13 +- .../services/connectca/mock_CAManager.go | 40 +++ .../grpc/public/services/connectca/server.go | 29 +- .../public/services/connectca/server_test.go | 5 + agent/grpc/public/services/connectca/sign.go | 95 +++++++ .../public/services/connectca/sign_test.go | 252 ++++++++++++++++++ .../public/services/connectca/watch_roots.go | 9 +- .../services/connectca/watch_roots_test.go | 50 ++-- proto-public/pbconnectca/ca.pb.binary.go | 20 ++ proto-public/pbconnectca/ca.pb.go | 219 +++++++++++++-- proto-public/pbconnectca/ca.proto | 18 ++ 17 files changed, 845 insertions(+), 70 deletions(-) create mode 100644 .changelog/12787.txt create mode 100644 agent/consul/grpc_integration_test.go create mode 100644 agent/grpc/public/services/connectca/mock_CAManager.go create mode 100644 agent/grpc/public/services/connectca/sign.go create mode 100644 agent/grpc/public/services/connectca/sign_test.go diff --git a/.changelog/12787.txt b/.changelog/12787.txt new file mode 100644 index 000000000..0e6d7fc6c --- /dev/null +++ b/.changelog/12787.txt @@ -0,0 +1,3 @@ +```release-note:feature +ca: Leaf certificates can now be obtained via the gRPC API: `Sign` +``` diff --git a/agent/connect/csr.go b/agent/connect/csr.go index cc01f991e..f699a5879 100644 --- a/agent/connect/csr.go +++ b/agent/connect/csr.go @@ -9,6 +9,7 @@ import ( "crypto/x509/pkix" "encoding/asn1" "encoding/pem" + "fmt" "net" "net/url" ) @@ -100,3 +101,24 @@ func CreateCAExtension() (pkix.Extension, error) { Value: bitstr, }, nil } + +// InvalidCSRError returns an error with the given fmt.Sprintf-formatted message +// indicating certificate signing failed because the user supplied an invalid CSR. +// +// See: IsInvalidCSRError. +func InvalidCSRError(format string, args ...interface{}) error { + return invalidCSRError{fmt.Sprintf(format, args...)} +} + +// IsInvalidCSRError returns whether the given error indicates that certificate +// signing failed because the user supplied an invalid CSR. +func IsInvalidCSRError(err error) bool { + _, ok := err.(invalidCSRError) + return ok +} + +type invalidCSRError struct { + s string +} + +func (e invalidCSRError) Error() string { return e.s } diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index c325ff123..29cfc38be 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -24,7 +24,7 @@ var ( // consul.ErrRateLimited.Error()` which is very sad. Short of replacing our // RPC mechanism it's hard to know how to make that much better though. ErrConnectNotEnabled = errors.New("Connect must be enabled in order to use this endpoint") - ErrRateLimited = errors.New("Rate limit reached, try again later") + ErrRateLimited = errors.New("Rate limit reached, try again later") // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). ErrNotPrimaryDatacenter = errors.New("not the primary datacenter") ErrStateReadOnly = errors.New("CA Provider State is read-only") ) diff --git a/agent/consul/grpc_integration_test.go b/agent/consul/grpc_integration_test.go new file mode 100644 index 000000000..c243ebfee --- /dev/null +++ b/agent/consul/grpc_integration_test.go @@ -0,0 +1,84 @@ +package consul + +import ( + "context" + "net" + "os" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/proto-public/pbconnectca" + "github.com/hashicorp/consul/testrpc" +) + +func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + // The gRPC endpoint itself well-tested with mocks. This test checks we're + // correctly wiring everything up in the server by: + // + // * Starting a cluster with multiple servers. + // * Making a request to a follower's public gRPC port. + // * Ensuring that the request is correctly forwarded to the leader. + // * Ensuring we get a valid certificate back (so it went through the CAManager). + dir1, server1 := testServerWithConfig(t, func(c *Config) { + c.Bootstrap = false + c.BootstrapExpect = 2 + }) + defer os.RemoveAll(dir1) + defer server1.Shutdown() + + dir2, server2 := testServerWithConfig(t, func(c *Config) { + c.Bootstrap = false + }) + defer os.RemoveAll(dir2) + defer server2.Shutdown() + + joinLAN(t, server2, server1) + + testrpc.WaitForLeader(t, server1.RPC, "dc1") + + var follower *Server + if server1.IsLeader() { + follower = server2 + } else { + follower = server1 + } + + // publicGRPCServer is bound to a listener by the wrapping agent code, so we + // need to do it ourselves here. + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + go func() { + require.NoError(t, follower.publicGRPCServer.Serve(lis)) + }() + t.Cleanup(follower.publicGRPCServer.Stop) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + require.NoError(t, err) + + client := pbconnectca.NewConnectCAServiceClient(conn) + + csr, _ := connect.TestCSR(t, &connect.SpiffeIDService{ + Host: connect.TestClusterID + ".consul", + Namespace: "default", + Datacenter: "dc1", + Service: "foo", + }) + + // This would fail if it wasn't forwarded to the leader. + rsp, err := client.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.NoError(t, err) + + _, err = connect.ParseCert(rsp.CertPem) + require.NoError(t, err) +} diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 88d3c5d42..91da428ac 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -1382,7 +1382,7 @@ func (l *connectSignRateLimiter) getCSRRateLimiterWithLimit(limit rate.Limit) *r func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) { // Parse the SPIFFE ID from the CSR SAN. if len(csr.URIs) == 0 { - return nil, errors.New("CSR SAN does not contain a SPIFFE ID") + return nil, connect.InvalidCSRError("CSR SAN does not contain a SPIFFE ID") } spiffeID, err := connect.ParseCertURI(csr.URIs[0]) if err != nil { @@ -1403,7 +1403,7 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au // requirement later but being restrictive for now is safer. dc := c.serverConf.Datacenter if v.Datacenter != dc { - return nil, fmt.Errorf("SPIFFE ID in CSR from a different datacenter: %s, "+ + return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+ "we are %s", v.Datacenter, dc) } case *connect.SpiffeIDAgent: @@ -1412,7 +1412,7 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au return nil, err } default: - return nil, errors.New("SPIFFE ID in CSR must be a service or agent ID") + return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID") } return c.SignCertificate(csr, spiffeID) @@ -1436,13 +1436,13 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne serviceID, isService := spiffeID.(*connect.SpiffeIDService) agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent) if !isService && !isAgent { - return nil, fmt.Errorf("SPIFFE ID in CSR must be a service or agent ID") + return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID") } var entMeta acl.EnterpriseMeta if isService { if !signingID.CanSign(spiffeID) { - return nil, fmt.Errorf("SPIFFE ID in CSR from a different trust domain: %s, "+ + return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+ "we are %s", serviceID.Host, signingID.Host()) } entMeta.Merge(serviceID.GetEnterpriseMeta()) diff --git a/agent/consul/server.go b/agent/consul/server.go index e278c2011..d9b4aed6b 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -238,6 +238,11 @@ type Server struct { // is only ever closed. leaveCh chan struct{} + // publicConnectCAServer serves the Connect CA service exposed on the public + // gRPC port. It is also exposed on the private multiplexed "server" port to + // enable RPC forwarding. + publicConnectCAServer *connectca.Server + // publicGRPCServer is the gRPC server exposed on the dedicated gRPC port, as // opposed to the multiplexed "server" port which is served by grpcHandler. publicGRPCServer *grpc.Server @@ -657,6 +662,29 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve s.overviewManager = NewOverviewManager(s.logger, s.fsm, s.config.MetricsReportingInterval) go s.overviewManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) + // Initialize public gRPC server - register services on public gRPC server. + s.publicConnectCAServer = connectca.NewServer(connectca.Config{ + Publisher: s.publisher, + GetStore: func() connectca.StateStore { return s.FSM().State() }, + Logger: logger.Named("grpc-api.connect-ca"), + ACLResolver: plainACLResolver{s.ACLResolver}, + CAManager: s.caManager, + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + ConnectEnabled: s.config.ConnectEnabled, + }) + s.publicConnectCAServer.Register(s.publicGRPCServer) + + dataplane.NewServer(dataplane.Config{ + Logger: logger.Named("grpc-api.dataplane"), + ACLResolver: plainACLResolver{s.ACLResolver}, + }).Register(s.publicGRPCServer) + + // Initialize private gRPC server. + // + // Note: some "public" gRPC services are also exposed on the private gRPC server + // to enable RPC forwarding. s.grpcHandler = newGRPCHandlerFromConfig(flat, config, s) s.grpcLeaderForwarder = flat.LeaderForwarder go s.trackLeaderChanges() @@ -669,18 +697,6 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve // since it can fire events when leadership is obtained. go s.monitorLeadership() - // Initialize public gRPC server - register services on public gRPC server. - connectca.NewServer(connectca.Config{ - Publisher: s.publisher, - GetStore: func() connectca.StateStore { return s.FSM().State() }, - Logger: logger.Named("grpc-api.connect-ca"), - ACLResolver: plainACLResolver{s.ACLResolver}, - }).Register(s.publicGRPCServer) - dataplane.NewServer(dataplane.Config{ - Logger: logger.Named("grpc-api.dataplane"), - ACLResolver: plainACLResolver{s.ACLResolver}, - }).Register(s.publicGRPCServer) - // Start listening for RPC requests. go func() { if err := s.grpcHandler.Run(); err != nil { @@ -712,6 +728,10 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler deps.Logger.Named("grpc-api.subscription"))) } s.registerEnterpriseGRPCServices(deps, srv) + + // Note: this public gRPC service is also exposed on the private server to + // enable RPC forwarding. + s.publicConnectCAServer.Register(srv) } return agentgrpc.NewHandler(deps.Logger, config.RPCAddr, register) diff --git a/agent/grpc/public/services/connectca/mock_ACLResolver.go b/agent/grpc/public/services/connectca/mock_ACLResolver.go index 6b6a6a771..ce21ffdeb 100644 --- a/agent/grpc/public/services/connectca/mock_ACLResolver.go +++ b/agent/grpc/public/services/connectca/mock_ACLResolver.go @@ -3,9 +3,8 @@ package connectca import ( - mock "github.com/stretchr/testify/mock" - acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" ) // MockACLResolver is an autogenerated mock type for the ACLResolver type @@ -13,13 +12,13 @@ type MockACLResolver struct { mock.Mock } -// ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 -func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { - ret := _m.Called(_a0, _a1, _a2) +// ResolveTokenAndDefaultMeta provides a mock function with given fields: token, entMeta, authzContext +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) { + ret := _m.Called(token, entMeta, authzContext) var r0 acl.Authorizer if rf, ok := ret.Get(0).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { - r0 = rf(_a0, _a1, _a2) + r0 = rf(token, entMeta, authzContext) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(acl.Authorizer) @@ -28,7 +27,7 @@ func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.Enter var r1 error if rf, ok := ret.Get(1).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) error); ok { - r1 = rf(_a0, _a1, _a2) + r1 = rf(token, entMeta, authzContext) } else { r1 = ret.Error(1) } diff --git a/agent/grpc/public/services/connectca/mock_CAManager.go b/agent/grpc/public/services/connectca/mock_CAManager.go new file mode 100644 index 000000000..1034c4b97 --- /dev/null +++ b/agent/grpc/public/services/connectca/mock_CAManager.go @@ -0,0 +1,40 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package connectca + +import ( + acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" + + x509 "crypto/x509" +) + +// MockCAManager is an autogenerated mock type for the CAManager type +type MockCAManager struct { + mock.Mock +} + +// AuthorizeAndSignCertificate provides a mock function with given fields: csr, authz +func (_m *MockCAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) { + ret := _m.Called(csr, authz) + + var r0 *structs.IssuedCert + if rf, ok := ret.Get(0).(func(*x509.CertificateRequest, acl.Authorizer) *structs.IssuedCert); ok { + r0 = rf(csr, authz) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*structs.IssuedCert) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*x509.CertificateRequest, acl.Authorizer) error); ok { + r1 = rf(csr, authz) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/agent/grpc/public/services/connectca/server.go b/agent/grpc/public/services/connectca/server.go index 86edfdb54..1407e42d6 100644 --- a/agent/grpc/public/services/connectca/server.go +++ b/agent/grpc/public/services/connectca/server.go @@ -1,7 +1,11 @@ package connectca import ( + "crypto/x509" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -17,10 +21,13 @@ type Server struct { } type Config struct { - Publisher EventPublisher - GetStore func() StateStore - Logger hclog.Logger - ACLResolver ACLResolver + Publisher EventPublisher + GetStore func() StateStore + Logger hclog.Logger + ACLResolver ACLResolver + CAManager CAManager + ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) + ConnectEnabled bool } type EventPublisher interface { @@ -34,7 +41,12 @@ type StateStore interface { //go:generate mockery -name ACLResolver -inpkg type ACLResolver interface { - ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) + ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) +} + +//go:generate mockery -name CAManager -inpkg +type CAManager interface { + AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) } func NewServer(cfg Config) *Server { @@ -44,3 +56,10 @@ func NewServer(cfg Config) *Server { func (s *Server) Register(grpcServer *grpc.Server) { pbconnectca.RegisterConnectCAServiceServer(grpcServer, s) } + +func (s *Server) requireConnect() error { + if s.ConnectEnabled { + return nil + } + return status.Error(codes.FailedPrecondition, "Connect must be enabled in order to use this endpoint") +} diff --git a/agent/grpc/public/services/connectca/server_test.go b/agent/grpc/public/services/connectca/server_test.go index e74b7c094..def654bf8 100644 --- a/agent/grpc/public/services/connectca/server_test.go +++ b/agent/grpc/public/services/connectca/server_test.go @@ -12,9 +12,14 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" ) +func noopForwardRPC(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) { + return false, nil +} + func testStateStore(t *testing.T, publisher state.EventPublisher) *state.Store { t.Helper() diff --git a/agent/grpc/public/services/connectca/sign.go b/agent/grpc/public/services/connectca/sign.go new file mode 100644 index 000000000..d6a21d616 --- /dev/null +++ b/agent/grpc/public/services/connectca/sign.go @@ -0,0 +1,95 @@ +package connectca + +import ( + "context" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +// Sign a leaf certificate for the service or agent identified by the SPIFFE +// ID in the given CSR's SAN. +func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbconnectca.SignResponse, error) { + if err := s.requireConnect(); err != nil { + return nil, err + } + + logger := s.Logger.Named("sign").With("request_id", traceID()) + logger.Trace("request received") + + token := public.TokenFromContext(ctx) + + if req.Csr == "" { + return nil, status.Error(codes.InvalidArgument, "CSR is required") + } + + // For private/internal gRPC handlers, protoc-gen-rpc-glue generates the + // requisite methods to satisfy the structs.RPCInfo interface using fields + // from the pbcommon package. This service is public, so we can't use those + // fields in our proto definition. Instead, we construct our RPCInfo manually. + // + // Embedding WriteRequest ensures RPCs are forwarded to the leader, embedding + // DCSpecificRequest adds the RequestDatacenter method (but as we're not + // setting Datacenter it has the effect of *not* doing DC forwarding). + var rpcInfo struct { + structs.WriteRequest + structs.DCSpecificRequest + } + rpcInfo.Token = token + + var rsp *pbconnectca.SignResponse + handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error { + logger.Trace("forwarding RPC") + var err error + rsp, err = pbconnectca.NewConnectCAServiceClient(conn).Sign(ctx, req) + return err + }) + if handled || err != nil { + return rsp, err + } + + csr, err := connect.ParseCSR(req.Csr) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, nil, nil) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + + cert, err := s.CAManager.AuthorizeAndSignCertificate(csr, authz) + switch { + case connect.IsInvalidCSRError(err): + return nil, status.Error(codes.InvalidArgument, err.Error()) + case acl.IsErrPermissionDenied(err): + return nil, status.Error(codes.PermissionDenied, err.Error()) + case isRateLimitError(err): + return nil, status.Error(codes.ResourceExhausted, err.Error()) + case err != nil: + logger.Error("failed to sign leaf certificate", "error", err.Error()) + return nil, status.Error(codes.Internal, "failed to sign leaf certificate") + } + + return &pbconnectca.SignResponse{ + CertPem: cert.CertPEM, + }, nil +} + +// TODO(agentless): CAManager currently lives in the `agent/consul` package and +// returns ErrRateLimited which we can't reference directly here because it'd +// create an import cycle. Checking the error message like this is fragile, but +// because of net/rpc's limited error handling support it's what we already do +// on the client. We should either move the error constant so that can use it +// here, or perhaps make it a typed error? +func isRateLimitError(err error) bool { + return err != nil && strings.Contains(err.Error(), "limit reached") +} diff --git a/agent/grpc/public/services/connectca/sign_test.go b/agent/grpc/public/services/connectca/sign_test.go new file mode 100644 index 000000000..600b1056c --- /dev/null +++ b/agent/grpc/public/services/connectca/sign_test.go @@ -0,0 +1,252 @@ +package connectca + +import ( + "context" + "errors" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + acl "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbconnectca" +) + +func TestSign_ConnectDisabled(t *testing.T) { + server := NewServer(Config{ConnectEnabled: false}) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{}) + require.Error(t, err) + require.Equal(t, codes.FailedPrecondition.String(), status.Code(err).String()) + require.Contains(t, status.Convert(err).Message(), "Connect") +} + +func TestSign_Validation(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + testCases := map[string]struct { + csr, err string + }{ + "no csr": { + csr: "", + err: "CSR is required", + }, + "invalid csr": { + csr: "bogus", + err: "no PEM-encoded data found", + }, + } + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: tc.csr, + }) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.Equal(t, tc.err, status.Convert(err).Message()) + }) + } +} + +func TestSign_Unauthenticated(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(nil, acl.ErrNotFound) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.Error(t, err) + require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) +} + +func TestSign_PermissionDenied(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(nil, acl.ErrPermissionDenied) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.Error(t, err) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) +} + +func TestSign_InvalidCSR(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(nil, connect.InvalidCSRError("nope")) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) +} + +func TestSign_RateLimited(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(nil, errors.New("Rate limit reached, try again later")) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.Error(t, err) + require.Equal(t, codes.ResourceExhausted.String(), status.Code(err).String()) +} + +func TestSign_InternalError(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(nil, errors.New("something went very wrong")) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + _, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.Error(t, err) + require.Equal(t, codes.Internal.String(), status.Code(err).String()) +} + +func TestSign_Success(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(&structs.IssuedCert{CertPEM: "this is the PEM"}, nil) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + rsp, err := server.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.NoError(t, err) + require.Equal(t, "this is the PEM", rsp.CertPem) +} + +func TestSign_RPCForwarding(t *testing.T) { + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(acl.AllowAll(), nil) + + caManager := &MockCAManager{} + caManager.On("AuthorizeAndSignCertificate", mock.Anything, mock.Anything). + Return(&structs.IssuedCert{CertPEM: "leader response"}, nil) + + leader := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + CAManager: caManager, + ForwardRPC: noopForwardRPC, + ConnectEnabled: true, + }) + leaderConn, err := grpc.Dial(runTestServer(t, leader).String(), grpc.WithInsecure()) + require.NoError(t, err) + + follower := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ForwardRPC: func(_ structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + return true, fn(leaderConn) + }, + ConnectEnabled: true, + }) + + csr, _ := connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")) + + rsp, err := follower.Sign(context.Background(), &pbconnectca.SignRequest{ + Csr: csr, + }) + require.NoError(t, err) + require.Equal(t, "leader response", rsp.CertPem) +} diff --git a/agent/grpc/public/services/connectca/watch_roots.go b/agent/grpc/public/services/connectca/watch_roots.go index 7a7430783..1d458b558 100644 --- a/agent/grpc/public/services/connectca/watch_roots.go +++ b/agent/grpc/public/services/connectca/watch_roots.go @@ -26,8 +26,11 @@ import ( // Connect CA roots. Current roots are sent immediately at the start of the // stream, and new lists will be sent whenever the roots are rotated. func (s *Server) WatchRoots(_ *emptypb.Empty, serverStream pbconnectca.ConnectCAService_WatchRootsServer) error { - logger := s.Logger.Named("watch-roots").With("stream_id", streamID()) + if err := s.requireConnect(); err != nil { + return err + } + logger := s.Logger.Named("watch-roots").With("stream_id", traceID()) logger.Trace("starting stream") defer logger.Trace("stream closed") @@ -179,8 +182,8 @@ func (s *Server) authorize(token string) error { } // We tag logs with a unique identifier to ease debugging. In the future this -// should probably be an Open Telemetry trace ID. -func streamID() string { +// should probably be a real Open Telemetry trace ID. +func traceID() string { id, err := uuid.GenerateUUID() if err != nil { return "" diff --git a/agent/grpc/public/services/connectca/watch_roots_test.go b/agent/grpc/public/services/connectca/watch_roots_test.go index 7bce07e1a..1106aa35d 100644 --- a/agent/grpc/public/services/connectca/watch_roots_test.go +++ b/agent/grpc/public/services/connectca/watch_roots_test.go @@ -26,6 +26,20 @@ import ( const testACLToken = "acl-token" +func TestWatchRoots_ConnectDisabled(t *testing.T) { + server := NewServer(Config{ConnectEnabled: false}) + + // Begin the stream. + client := testClient(t, server) + stream, err := client.WatchRoots(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + rspCh := handleRootsStream(t, stream) + + err = mustGetError(t, rspCh) + require.Equal(t, codes.FailedPrecondition.String(), status.Code(err).String()) + require.Contains(t, status.Convert(err).Message(), "Connect") +} + func TestWatchRoots_Success(t *testing.T) { fsm, publisher := setupFSMAndPublisher(t) @@ -45,10 +59,11 @@ func TestWatchRoots_Success(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - Publisher: publisher, - GetStore: func() StateStore { return fsm.GetStore() }, - Logger: testutil.Logger(t), - ACLResolver: aclResolver, + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), + ACLResolver: aclResolver, + ConnectEnabled: true, }) // Begin the stream. @@ -92,10 +107,11 @@ func TestWatchRoots_InvalidACLToken(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - Publisher: publisher, - GetStore: func() StateStore { return fsm.GetStore() }, - Logger: testutil.Logger(t), - ACLResolver: aclResolver, + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), + ACLResolver: aclResolver, + ConnectEnabled: true, }) // Start the stream. @@ -129,10 +145,11 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - Publisher: publisher, - GetStore: func() StateStore { return fsm.GetStore() }, - Logger: testutil.Logger(t), - ACLResolver: aclResolver, + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), + ACLResolver: aclResolver, + ConnectEnabled: true, }) // Start the stream. @@ -196,10 +213,11 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ - Publisher: publisher, - GetStore: func() StateStore { return fsm.GetStore() }, - Logger: testutil.Logger(t), - ACLResolver: aclResolver, + Publisher: publisher, + GetStore: func() StateStore { return fsm.GetStore() }, + Logger: testutil.Logger(t), + ACLResolver: aclResolver, + ConnectEnabled: true, }) // Begin the stream. diff --git a/proto-public/pbconnectca/ca.pb.binary.go b/proto-public/pbconnectca/ca.pb.binary.go index e373db9b5..3db6ad209 100644 --- a/proto-public/pbconnectca/ca.pb.binary.go +++ b/proto-public/pbconnectca/ca.pb.binary.go @@ -26,3 +26,23 @@ func (msg *CARoot) MarshalBinary() ([]byte, error) { func (msg *CARoot) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *SignRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *SignRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *SignResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *SignResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbconnectca/ca.pb.go b/proto-public/pbconnectca/ca.pb.go index bb966a4de..a3f1d8777 100644 --- a/proto-public/pbconnectca/ca.pb.go +++ b/proto-public/pbconnectca/ca.pb.go @@ -228,6 +228,106 @@ func (x *CARoot) GetRotatedOutAt() *timestamppb.Timestamp { return nil } +type SignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // csr is the PEM-encoded Certificate Signing Request (CSR). + // + // The CSR's SAN must include a SPIFFE ID that identifies a service or agent + // to which the ACL token provided in the `x-consul-token` metadata has write + // access. + Csr string `protobuf:"bytes,1,opt,name=csr,proto3" json:"csr,omitempty"` +} + +func (x *SignRequest) Reset() { + *x = SignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignRequest) ProtoMessage() {} + +func (x *SignRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignRequest.ProtoReflect.Descriptor instead. +func (*SignRequest) Descriptor() ([]byte, []int) { + return file_proto_public_pbconnectca_ca_proto_rawDescGZIP(), []int{2} +} + +func (x *SignRequest) GetCsr() string { + if x != nil { + return x.Csr + } + return "" +} + +type SignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // cert_pem is the PEM-encoded leaf certificate. + CertPem string `protobuf:"bytes,2,opt,name=cert_pem,json=certPem,proto3" json:"cert_pem,omitempty"` +} + +func (x *SignResponse) Reset() { + *x = SignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignResponse) ProtoMessage() {} + +func (x *SignResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbconnectca_ca_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignResponse.ProtoReflect.Descriptor instead. +func (*SignResponse) Descriptor() ([]byte, []int) { + return file_proto_public_pbconnectca_ca_proto_rawDescGZIP(), []int{3} +} + +func (x *SignResponse) GetCertPem() string { + if x != nil { + return x.CertPem + } + return "" +} + var File_proto_public_pbconnectca_ca_proto protoreflect.FileDescriptor var file_proto_public_pbconnectca_ca_proto_rawDesc = []byte{ @@ -264,17 +364,25 @@ var file_proto_public_pbconnectca_ca_proto_rawDesc = []byte{ 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, - 0x4f, 0x75, 0x74, 0x41, 0x74, 0x32, 0x5b, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x43, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, - 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x4f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x63, 0x73, 0x72, 0x22, 0x29, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x70, + 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x65, 0x72, 0x74, 0x50, 0x65, + 0x6d, 0x32, 0x96, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x41, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, + 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x39, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x63, 0x61, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -289,20 +397,24 @@ func file_proto_public_pbconnectca_ca_proto_rawDescGZIP() []byte { return file_proto_public_pbconnectca_ca_proto_rawDescData } -var file_proto_public_pbconnectca_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_public_pbconnectca_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_proto_public_pbconnectca_ca_proto_goTypes = []interface{}{ (*WatchRootsResponse)(nil), // 0: connectca.WatchRootsResponse (*CARoot)(nil), // 1: connectca.CARoot - (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp - (*emptypb.Empty)(nil), // 3: google.protobuf.Empty + (*SignRequest)(nil), // 2: connectca.SignRequest + (*SignResponse)(nil), // 3: connectca.SignResponse + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 5: google.protobuf.Empty } var file_proto_public_pbconnectca_ca_proto_depIdxs = []int32{ 1, // 0: connectca.WatchRootsResponse.roots:type_name -> connectca.CARoot - 2, // 1: connectca.CARoot.rotated_out_at:type_name -> google.protobuf.Timestamp - 3, // 2: connectca.ConnectCAService.WatchRoots:input_type -> google.protobuf.Empty - 0, // 3: connectca.ConnectCAService.WatchRoots:output_type -> connectca.WatchRootsResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type + 4, // 1: connectca.CARoot.rotated_out_at:type_name -> google.protobuf.Timestamp + 5, // 2: connectca.ConnectCAService.WatchRoots:input_type -> google.protobuf.Empty + 2, // 3: connectca.ConnectCAService.Sign:input_type -> connectca.SignRequest + 0, // 4: connectca.ConnectCAService.WatchRoots:output_type -> connectca.WatchRootsResponse + 3, // 5: connectca.ConnectCAService.Sign:output_type -> connectca.SignResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name @@ -338,6 +450,30 @@ func file_proto_public_pbconnectca_ca_proto_init() { return nil } } + file_proto_public_pbconnectca_ca_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbconnectca_ca_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -345,7 +481,7 @@ func file_proto_public_pbconnectca_ca_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_public_pbconnectca_ca_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, @@ -375,6 +511,9 @@ type ConnectCAServiceClient interface { // Connect CA roots. Current roots are sent immediately at the start of the // stream, and new lists will be sent whenever the roots are rotated. WatchRoots(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (ConnectCAService_WatchRootsClient, error) + // Sign a leaf certificate for the service or agent identified by the SPIFFE + // ID in the given CSR's SAN. + Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) } type connectCAServiceClient struct { @@ -417,12 +556,24 @@ func (x *connectCAServiceWatchRootsClient) Recv() (*WatchRootsResponse, error) { return m, nil } +func (c *connectCAServiceClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { + out := new(SignResponse) + err := c.cc.Invoke(ctx, "/connectca.ConnectCAService/Sign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ConnectCAServiceServer is the server API for ConnectCAService service. type ConnectCAServiceServer interface { // WatchRoots provides a stream on which you can receive the list of active // Connect CA roots. Current roots are sent immediately at the start of the // stream, and new lists will be sent whenever the roots are rotated. WatchRoots(*emptypb.Empty, ConnectCAService_WatchRootsServer) error + // Sign a leaf certificate for the service or agent identified by the SPIFFE + // ID in the given CSR's SAN. + Sign(context.Context, *SignRequest) (*SignResponse, error) } // UnimplementedConnectCAServiceServer can be embedded to have forward compatible implementations. @@ -432,6 +583,9 @@ type UnimplementedConnectCAServiceServer struct { func (*UnimplementedConnectCAServiceServer) WatchRoots(*emptypb.Empty, ConnectCAService_WatchRootsServer) error { return status.Errorf(codes.Unimplemented, "method WatchRoots not implemented") } +func (*UnimplementedConnectCAServiceServer) Sign(context.Context, *SignRequest) (*SignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented") +} func RegisterConnectCAServiceServer(s *grpc.Server, srv ConnectCAServiceServer) { s.RegisterService(&_ConnectCAService_serviceDesc, srv) @@ -458,10 +612,33 @@ func (x *connectCAServiceWatchRootsServer) Send(m *WatchRootsResponse) error { return x.ServerStream.SendMsg(m) } +func _ConnectCAService_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnectCAServiceServer).Sign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/connectca.ConnectCAService/Sign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnectCAServiceServer).Sign(ctx, req.(*SignRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _ConnectCAService_serviceDesc = grpc.ServiceDesc{ ServiceName: "connectca.ConnectCAService", HandlerType: (*ConnectCAServiceServer)(nil), - Methods: []grpc.MethodDesc{}, + Methods: []grpc.MethodDesc{ + { + MethodName: "Sign", + Handler: _ConnectCAService_Sign_Handler, + }, + }, Streams: []grpc.StreamDesc{ { StreamName: "WatchRoots", diff --git a/proto-public/pbconnectca/ca.proto b/proto-public/pbconnectca/ca.proto index fef15fbc1..216a6e43c 100644 --- a/proto-public/pbconnectca/ca.proto +++ b/proto-public/pbconnectca/ca.proto @@ -12,6 +12,10 @@ service ConnectCAService { // Connect CA roots. Current roots are sent immediately at the start of the // stream, and new lists will be sent whenever the roots are rotated. rpc WatchRoots(google.protobuf.Empty) returns (stream WatchRootsResponse) {}; + + // Sign a leaf certificate for the service or agent identified by the SPIFFE + // ID in the given CSR's SAN. + rpc Sign(SignRequest) returns (SignResponse) {}; } message WatchRootsResponse { @@ -70,3 +74,17 @@ message CARoot { // active root. google.protobuf.Timestamp rotated_out_at = 8; } + +message SignRequest { + // csr is the PEM-encoded Certificate Signing Request (CSR). + // + // The CSR's SAN must include a SPIFFE ID that identifies a service or agent + // to which the ACL token provided in the `x-consul-token` metadata has write + // access. + string csr = 1; +} + +message SignResponse { + // cert_pem is the PEM-encoded leaf certificate. + string cert_pem = 2; +} From 7c748851806c46688ed2902d32b69a8829a8bf34 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 14 Apr 2022 16:58:19 +0100 Subject: [PATCH 144/785] ui: Only show optimistic details in Ent (#12788) --- .../app/styles/routes/dc/overview/serverstatus.scss | 2 +- .../consul-ui/app/templates/dc/show/serverstatus.hbs | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss index 0e035f8a2..7b36bec25 100644 --- a/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss +++ b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss @@ -22,7 +22,7 @@ section[data-route='dc.show.serverstatus'] { @extend %panel; box-shadow: var(--decor-elevation-000); padding: var(--padding-y) var(--padding-x); - width: 770px; + max-width: 770px; display: flex; flex-wrap: wrap; } diff --git a/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs index 211289c72..36fa0c86e 100644 --- a/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs +++ b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs @@ -119,6 +119,7 @@ as |item|}} + {{#if (can 'read zones')}}

{{compute (fn route.t 'tolerance.optimistic.header')}} - {{#if (not (can 'read zones'))}} - - {{t 'common.ui.enterprisefeature'}} - - {{/if}} 30 seconds between server failures, Consul can restore the Immediate Fault Tolerance by replacing failed active voters with healthy back-up voters when using redundancy zones.'}} > @@ -152,7 +148,7 @@ as |item|}}

- + {{/if}} {{#if (gt item.RedundancyZones.length 0)}} From 8edb19f97a5e41316783891b4f078ed30546966d Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 14 Apr 2022 17:13:13 +0100 Subject: [PATCH 145/785] ui: Adds support for AWS-IAM Auth Methods (#12786) * ui: Adds support for AWS-IAM Auth Methods * Changelog --- .changelog/12786.txt | 3 + .../components/consul/auth-method/index.scss | 6 ++ .../consul/auth-method/view/index.hbs | 60 +++++++++++++++++-- .../consul-ui/app/components/pill/index.scss | 4 ++ .../dc/acls/auth-methods/show/auth-method.hbs | 2 +- .../acls/auth-methods/show/binding-rules.hbs | 2 +- .../acls/auth-methods/show/nspace-rules.hbs | 8 +-- .../consul-ui/mock-api/v1/acl/auth-methods | 4 +- ui/packages/consul-ui/package.json | 1 + .../consul-ui/translations/common/en-us.yaml | 1 + ui/yarn.lock | 7 +++ 11 files changed, 84 insertions(+), 14 deletions(-) create mode 100644 .changelog/12786.txt diff --git a/.changelog/12786.txt b/.changelog/12786.txt new file mode 100644 index 000000000..ca772829e --- /dev/null +++ b/.changelog/12786.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Added support for AWS IAM Auth Methods +``` diff --git a/ui/packages/consul-ui/app/components/consul/auth-method/index.scss b/ui/packages/consul-ui/app/components/consul/auth-method/index.scss index c1b862381..beff18a65 100644 --- a/ui/packages/consul-ui/app/components/consul/auth-method/index.scss +++ b/ui/packages/consul-ui/app/components/consul/auth-method/index.scss @@ -42,6 +42,12 @@ section dl { @extend %tabular-dl; } + section dt { + width: 30%; + } + section dd { + width: 70%; + } } // Binding List diff --git a/ui/packages/consul-ui/app/components/consul/auth-method/view/index.hbs b/ui/packages/consul-ui/app/components/consul/auth-method/view/index.hbs index 396aea1d7..e7f8f319c 100644 --- a/ui/packages/consul-ui/app/components/consul/auth-method/view/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/auth-method/view/index.hbs @@ -37,16 +37,62 @@
Type
-
+
+ +
+ +{{#each (array "MaxTokenTTL" "TokenLocality" "DisplayName" "Description") as |value|}} + {{#if (get @item value)}} - {{#each (array "MaxTokenTTL" "TokenLocality" "DisplayName" "Description") as |value|}} - {{#if (get @item value)}}
{{t (concat "models.auth-method." value)}}
{{get @item value}}
- {{/if}} - {{/each}} - {{#if (eq @item.Type 'jwt')}} + {{/if}} +{{/each}} + + {{#if (eq @item.Type 'aws-iam')}} + +{{#let + @item.Config +as |config|}} + {{#each (array + "BoundIAMPrincipalARNs" + "EnableIAMEntityDetails" + "IAMEntityTags" + "IAMEndpoint" + "MaxRetries" + "STSEndpoint" + "STSRegion" + "AllowedSTSHeaderValues" + "ServerIDHeaderValue" + ) as |value|}} + {{#if (get config value)}} + +
{{t (concat "models.auth-method." value)}}
+
+{{#let + (get config value) +as |item|}} + {{#if (array-is-array item)}} +
    + {{#each item as |jtem|}} +
  • + {{jtem}} +
  • + {{/each}} +
+ {{else}} + {{item}} + {{/if}} +{{/let}} +
+ + {{/if}} + {{/each}} + +{{/let}} + + {{else if (eq @item.Type 'jwt')}} {{#if @item.Config.JWKSURL}}
{{t 'models.auth-method.Config.JWKSURL'}}
@@ -164,6 +210,7 @@
+ {{#if (not (eq @item.Type 'aws-iam'))}}
@@ -250,4 +297,5 @@ {{/if}}
{{/if}} +{{/if}} \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/pill/index.scss b/ui/packages/consul-ui/app/components/pill/index.scss index d7da1f0a5..cbeabfff1 100644 --- a/ui/packages/consul-ui/app/components/pill/index.scss +++ b/ui/packages/consul-ui/app/components/pill/index.scss @@ -32,3 +32,7 @@ span.policy-service-identity::before { %pill.kubernetes::before { @extend %with-logo-kubernetes-color-icon, %as-pseudo; } +%pill.aws-iam::before { + --icon-name: icon-aws-color; + content: ''; +} diff --git a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/auth-method.hbs b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/auth-method.hbs index 248122f06..cc28d28f6 100644 --- a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/auth-method.hbs +++ b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/auth-method.hbs @@ -1,5 +1,5 @@
diff --git a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/binding-rules.hbs b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/binding-rules.hbs index f12f996a0..f1e09e636 100644 --- a/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/binding-rules.hbs +++ b/ui/packages/consul-ui/app/templates/dc/acls/auth-methods/show/binding-rules.hbs @@ -1,5 +1,5 @@ {{#let route.model.item @@ -14,13 +14,13 @@ as |item|}}

- {{t 'routes.dc.acls.auth-methods.show.nspace-rules.index.empty.header'}} + {{compute (fn route.t 'empty.header')}}

- {{t 'routes.dc.acls.auth-methods.show.nspace-rules.index.empty.body' + {{compute (fn route.t 'empty.body' (hash htmlSafe=true - }} + ))}}