Merge branch 'main' into fix-kv_entries-metric

This commit is contained in:
Max Bowsher 2022-06-27 18:57:03 +01:00 committed by GitHub
commit 728cd03e24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
234 changed files with 8152 additions and 2254 deletions

3
.changelog/12399.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
catalog: Add per-node indexes to reduce watchset firing for unrelated nodes and services.
```

4
.changelog/13481.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:improvement
command: Add support for enabling TLS in the Envoy Prometheus endpoint via the `consul connect envoy` command.
Adds the `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file` flags.
```

View File

@ -930,21 +930,6 @@ jobs:
path: *TEST_RESULTS_DIR path: *TEST_RESULTS_DIR
- run: *notify-slack-failure - run: *notify-slack-failure
trigger-oss-merge:
docker:
- image: docker.mirror.hashicorp.services/alpine:3.12
steps:
- run: apk add --no-cache --no-progress curl jq
- run:
name: trigger oss merge
command: |
curl -s -X POST \
--header "Circle-Token: ${CIRCLECI_API_TOKEN}" \
--header "Content-Type: application/json" \
-d '{"build_parameters": {"CIRCLE_JOB": "oss-merge"}}' \
"https://circleci.com/api/v1.1/project/github/hashicorp/consul-enterprise/tree/${CIRCLE_BRANCH}" | jq -r '.build_url'
- run: *notify-slack-failure
# Run load tests against a commit # Run load tests against a commit
load-test: load-test:
docker: docker:
@ -1180,16 +1165,6 @@ workflows:
requires: requires:
- ember-build-ent - ember-build-ent
- noop - noop
workflow-automation:
unless: << pipeline.parameters.trigger-load-test >>
jobs:
- trigger-oss-merge:
context: team-consul
filters:
branches:
only:
- main
- /release\/\d+\.\d+\.x$/
load-test: load-test:
when: << pipeline.parameters.trigger-load-test >> when: << pipeline.parameters.trigger-load-test >>

View File

@ -232,6 +232,14 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
# Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix
# This naming convention will be used ONLY for per-commit dev images
- name: Set docker dev tag
run: |
version="${{ env.version }}"
echo "dev_tag=${version%.*}-dev" >> $GITHUB_ENV
- name: Docker Build (Action) - name: Docker Build (Action)
uses: hashicorp/actions-docker-build@v1 uses: hashicorp/actions-docker-build@v1
with: with:
@ -242,8 +250,8 @@ jobs:
docker.io/hashicorp/${{env.repo}}:${{env.version}} docker.io/hashicorp/${{env.repo}}:${{env.version}}
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}} public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}
dev_tags: | dev_tags: |
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }} docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }}-${{ github.sha }} docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-${{ github.sha }}
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
build-docker-redhat: build-docker-redhat:

View File

@ -1,3 +1,58 @@
## 1.13.0-alpha2 (June 21, 2022)
IMPROVEMENTS:
* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)]
* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)]
BUG FIXES:
* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)]
## 1.13.0-alpha1 (June 15, 2022)
BREAKING CHANGES:
* config-entry: Exporting a specific service name across all namespace is invalid.
FEATURES:
* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)]
* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)]
* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)]
* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)]
* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)]
IMPROVEMENTS:
* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)]
* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)]
* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration.
This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)]
* Support Vault namespaces in Connect CA by adding RootPKINamespace and
IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)]
* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)]
* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)]
* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)]
* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)]
* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)]
* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)]
* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)]
* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)]
BUG FIXES:
* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)]
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)]
* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)]
* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)]
* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)]
## 1.12.2 (June 3, 2022) ## 1.12.2 (June 3, 2022)
BUG FIXES: BUG FIXES:

View File

@ -333,12 +333,12 @@ ifeq ("$(GOTAGS)","")
@docker tag consul-dev:latest consul:local @docker tag consul-dev:latest consul:local
@docker run --rm -t consul:local consul version @docker run --rm -t consul:local consul version
@cd ./test/integration/consul-container && \ @cd ./test/integration/consul-container && \
go test -v -timeout=30m ./upgrade --target-version local --latest-version latest go test -v -timeout=30m ./... --target-version local --latest-version latest
else else
@docker tag consul-dev:latest hashicorp/consul-enterprise:local @docker tag consul-dev:latest hashicorp/consul-enterprise:local
@docker run --rm -t hashicorp/consul-enterprise:local consul version @docker run --rm -t hashicorp/consul-enterprise:local consul version
@cd ./test/integration/consul-container && \ @cd ./test/integration/consul-container && \
go test -v -timeout=30m ./upgrade --tags $(GOTAGS) --target-version local --latest-version latest go test -v -timeout=30m ./... --tags $(GOTAGS) --target-version local --latest-version latest
endif endif
.PHONY: test-metrics-integ .PHONY: test-metrics-integ

View File

@ -8,7 +8,7 @@ import (
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
bexpr "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
@ -1036,6 +1036,7 @@ func (c *Catalog) VirtualIPForService(args *structs.ServiceSpecificRequest, repl
} }
state := c.srv.fsm.State() state := c.srv.fsm.State()
*reply, err = state.VirtualIPForService(structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)) psn := structs.PeeredServiceName{Peer: args.PeerName, ServiceName: structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)}
*reply, err = state.VirtualIPForService(psn)
return err return err
} }

View File

@ -451,7 +451,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
Port: 8000, Port: 8000,
Connect: connectConf, Connect: connectConf,
}) })
vip, err := fsm.state.VirtualIPForService(structs.NewServiceName("frontend", nil)) psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("frontend", nil)}
vip, err := fsm.state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, vip, "240.0.0.1") require.Equal(t, vip, "240.0.0.1")
@ -462,7 +463,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
Port: 9000, Port: 9000,
Connect: connectConf, Connect: connectConf,
}) })
vip, err = fsm.state.VirtualIPForService(structs.NewServiceName("backend", nil)) psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("backend", nil)}
vip, err = fsm.state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, vip, "240.0.0.2") require.Equal(t, vip, "240.0.0.2")
@ -476,6 +478,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
// Peerings // Peerings
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{ require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{
ID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Name: "baz", Name: "baz",
})) }))
@ -591,10 +594,12 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
require.Equal(t, uint64(25), checks[0].ModifyIndex) require.Equal(t, uint64(25), checks[0].ModifyIndex)
// Verify virtual IPs are consistent. // Verify virtual IPs are consistent.
vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("frontend", nil)) psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("frontend", nil)}
vip, err = fsm2.state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, vip, "240.0.0.1") require.Equal(t, vip, "240.0.0.1")
vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("backend", nil)) psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("backend", nil)}
vip, err = fsm2.state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, vip, "240.0.0.2") require.Equal(t, vip, "240.0.0.2")

View File

@ -69,18 +69,60 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
&args.QueryOptions, &args.QueryOptions,
&reply.QueryMeta, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error { func(ws memdb.WatchSet, state *state.Store) error {
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, args.PeerName) // we don't support calling this endpoint for a specific peer
if err != nil { if args.PeerName != "" {
return err return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
} }
reply.Index, reply.Dump = index, dump
// this maxIndex will be the max of the NodeDump calls and the PeeringList call
var maxIndex uint64
// Get data for local nodes
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return fmt.Errorf("could not get a node dump for local nodes: %w", err)
}
if index > maxIndex {
maxIndex = index
}
reply.Dump = dump
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for node dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
// get node dumps for all peerings
for _, p := range listedPeerings {
index, importedDump, err := state.NodeDump(ws, &args.EnterpriseMeta, p.Name)
if err != nil {
return fmt.Errorf("could not get a node dump for peer %q: %w", p.Name, err)
}
reply.ImportedDump = append(reply.ImportedDump, importedDump...)
if index > maxIndex {
maxIndex = index
}
}
reply.Index = maxIndex
raw, err := filter.Execute(reply.Dump) raw, err := filter.Execute(reply.Dump)
if err != nil { if err != nil {
return err return fmt.Errorf("could not filter local node dump: %w", err)
} }
reply.Dump = raw.(structs.NodeDump) reply.Dump = raw.(structs.NodeDump)
importedRaw, err := filter.Execute(reply.ImportedDump)
if err != nil {
return fmt.Errorf("could not filter peer node dump: %w", err)
}
reply.ImportedDump = importedRaw.(structs.NodeDump)
// Note: we filter the results with ACLs *after* applying the user-supplied // Note: we filter the results with ACLs *after* applying the user-supplied
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
// results that would be filtered out even if the user did have permission. // results that would be filtered out even if the user did have permission.
@ -111,13 +153,47 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
&args.QueryOptions, &args.QueryOptions,
&reply.QueryMeta, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error { func(ws memdb.WatchSet, state *state.Store) error {
// Get, store, and filter nodes // we don't support calling this endpoint for a specific peer
maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, args.PeerName) if args.PeerName != "" {
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
}
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
var maxIndex uint64
// get a local dump for services
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil { if err != nil {
return err return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
if index > maxIndex {
maxIndex = index
} }
reply.Nodes = nodes reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
// Get, store, and filter gateway services // Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws) idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil { if err != nil {
@ -125,17 +201,23 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
} }
reply.Gateways = gatewayServices reply.Gateways = gatewayServices
if idx > maxIdx { if idx > maxIndex {
maxIdx = idx maxIndex = idx
} }
reply.Index = maxIdx reply.Index = maxIndex
raw, err := filter.Execute(reply.Nodes) raw, err := filter.Execute(reply.Nodes)
if err != nil { if err != nil {
return err return fmt.Errorf("could not filter local service dump: %w", err)
} }
reply.Nodes = raw.(structs.CheckServiceNodes) reply.Nodes = raw.(structs.CheckServiceNodes)
importedRaw, err := filter.Execute(reply.ImportedNodes)
if err != nil {
return fmt.Errorf("could not filter peer service dump: %w", err)
}
reply.ImportedNodes = importedRaw.(structs.CheckServiceNodes)
// Note: we filter the results with ACLs *after* applying the user-supplied // Note: we filter the results with ACLs *after* applying the user-supplied
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
// results that would be filtered out even if the user did have permission. // results that would be filtered out even if the user did have permission.

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -17,6 +18,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/lib/stringslice"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
@ -29,56 +31,79 @@ func TestInternal_NodeInfo(t *testing.T) {
} }
t.Parallel() t.Parallel()
dir1, s1 := testServer(t) _, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1) codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{ args := []*structs.RegisterRequest{
Datacenter: "dc1", {
Node: "foo", Datacenter: "dc1",
Address: "127.0.0.1", Node: "foo",
Service: &structs.NodeService{ Address: "127.0.0.1",
ID: "db", Service: &structs.NodeService{
Service: "db", ID: "db",
Tags: []string{"primary"}, Service: "db",
Tags: []string{"primary"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}, },
Check: &structs.HealthCheck{ {
Name: "db connect", Datacenter: "dc1",
Status: api.HealthPassing, Node: "foo",
ServiceID: "db", Address: "127.0.0.3",
PeerName: "peer1",
}, },
} }
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { for _, reg := range args {
t.Fatalf("err: %v", err) err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
require.NoError(t, err)
} }
var out2 structs.IndexedNodeDump t.Run("get local node", func(t *testing.T) {
req := structs.NodeSpecificRequest{ var out structs.IndexedNodeDump
Datacenter: "dc1", req := structs.NodeSpecificRequest{
Node: "foo", Datacenter: "dc1",
} Node: "foo",
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out2); err != nil { }
t.Fatalf("err: %v", err) if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out); err != nil {
} t.Fatalf("err: %v", err)
}
nodes := out2.Dump nodes := out.Dump
if len(nodes) != 1 { if len(nodes) != 1 {
t.Fatalf("Bad: %v", nodes) t.Fatalf("Bad: %v", nodes)
} }
if nodes[0].Node != "foo" { if nodes[0].Node != "foo" {
t.Fatalf("Bad: %v", nodes[0]) t.Fatalf("Bad: %v", nodes[0])
} }
if !stringslice.Contains(nodes[0].Services[0].Tags, "primary") { if !stringslice.Contains(nodes[0].Services[0].Tags, "primary") {
t.Fatalf("Bad: %v", nodes[0]) t.Fatalf("Bad: %v", nodes[0])
} }
if nodes[0].Checks[0].Status != api.HealthPassing { if nodes[0].Checks[0].Status != api.HealthPassing {
t.Fatalf("Bad: %v", nodes[0]) t.Fatalf("Bad: %v", nodes[0])
} }
})
t.Run("get peered node", func(t *testing.T) {
var out structs.IndexedNodeDump
req := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: "foo",
PeerName: "peer1",
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out))
nodes := out.Dump
require.Equal(t, 1, len(nodes))
require.Equal(t, "foo", nodes[0].Node)
require.Equal(t, "peer1", nodes[0].PeerName)
})
} }
func TestInternal_NodeDump(t *testing.T) { func TestInternal_NodeDump(t *testing.T) {
@ -87,53 +112,61 @@ func TestInternal_NodeDump(t *testing.T) {
} }
t.Parallel() t.Parallel()
dir1, s1 := testServer(t) _, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1) codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{ args := []*structs.RegisterRequest{
Datacenter: "dc1", {
Node: "foo", Datacenter: "dc1",
Address: "127.0.0.1", Node: "foo",
Service: &structs.NodeService{ Address: "127.0.0.1",
ID: "db", Service: &structs.NodeService{
Service: "db", ID: "db",
Tags: []string{"primary"}, Service: "db",
Tags: []string{"primary"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}, },
Check: &structs.HealthCheck{ {
Name: "db connect", Datacenter: "dc1",
Status: api.HealthPassing, Node: "bar",
ServiceID: "db", Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"replica"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
},
{
Datacenter: "dc1",
Node: "foo-peer",
Address: "127.0.0.3",
PeerName: "peer1",
}, },
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
} }
arg = structs.RegisterRequest{ for _, reg := range args {
Datacenter: "dc1", err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
Node: "bar", require.NoError(t, err)
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"replica"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
} }
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
Name: "peer1",
})
require.NoError(t, err)
var out2 structs.IndexedNodeDump var out2 structs.IndexedNodeDump
req := structs.DCSpecificRequest{ req := structs.DCSpecificRequest{
Datacenter: "dc1", Datacenter: "dc1",
@ -175,6 +208,10 @@ func TestInternal_NodeDump(t *testing.T) {
if !foundFoo || !foundBar { if !foundFoo || !foundBar {
t.Fatalf("missing foo or bar") t.Fatalf("missing foo or bar")
} }
require.Len(t, out2.ImportedDump, 1)
require.Equal(t, "peer1", out2.ImportedDump[0].PeerName)
require.Equal(t, "foo-peer", out2.ImportedDump[0].Node)
} }
func TestInternal_NodeDump_Filter(t *testing.T) { func TestInternal_NodeDump_Filter(t *testing.T) {
@ -183,60 +220,107 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
} }
t.Parallel() t.Parallel()
dir1, s1 := testServer(t) _, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1) codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{ args := []*structs.RegisterRequest{
Datacenter: "dc1", {
Node: "foo", Datacenter: "dc1",
Address: "127.0.0.1", Node: "foo",
Service: &structs.NodeService{ Address: "127.0.0.1",
ID: "db", Service: &structs.NodeService{
Service: "db", ID: "db",
Tags: []string{"primary"}, Service: "db",
Tags: []string{"primary"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}, },
Check: &structs.HealthCheck{ {
Name: "db connect", Datacenter: "dc1",
Status: api.HealthPassing, Node: "bar",
ServiceID: "db", Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"replica"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
}, },
} {
var out struct{} Datacenter: "dc1",
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) Node: "foo-peer",
Address: "127.0.0.3",
arg = structs.RegisterRequest{ PeerName: "peer1",
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"replica"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
}, },
} }
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) for _, reg := range args {
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
var out2 structs.IndexedNodeDump require.NoError(t, err)
req := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Filter: "primary in Services.Tags"},
} }
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
nodes := out2.Dump err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
require.Len(t, nodes, 1) ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
require.Equal(t, "foo", nodes[0].Node) Name: "peer1",
})
require.NoError(t, err)
t.Run("filter on the local node", func(t *testing.T) {
var out2 structs.IndexedNodeDump
req := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Filter: "primary in Services.Tags"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
nodes := out2.Dump
require.Len(t, nodes, 1)
require.Equal(t, "foo", nodes[0].Node)
})
t.Run("filter on imported dump", func(t *testing.T) {
var out3 structs.IndexedNodeDump
req2 := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Filter: "friend in PeerName"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
require.Len(t, out3.Dump, 0)
require.Len(t, out3.ImportedDump, 0)
})
t.Run("filter look for peer nodes (non local nodes)", func(t *testing.T) {
var out3 structs.IndexedNodeDump
req2 := structs.DCSpecificRequest{
QueryOptions: structs.QueryOptions{Filter: "PeerName != \"\""},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
require.Len(t, out3.Dump, 0)
require.Len(t, out3.ImportedDump, 1)
})
t.Run("filter look for a specific peer", func(t *testing.T) {
var out3 structs.IndexedNodeDump
req2 := structs.DCSpecificRequest{
QueryOptions: structs.QueryOptions{Filter: "PeerName == peer1"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
require.Len(t, out3.Dump, 0)
require.Len(t, out3.ImportedDump, 1)
})
} }
func TestInternal_KeyringOperation(t *testing.T) { func TestInternal_KeyringOperation(t *testing.T) {
@ -1665,6 +1749,89 @@ func TestInternal_GatewayServiceDump_Ingress_ACL(t *testing.T) {
require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning) require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning)
} }
func TestInternal_ServiceDump_Peering(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, s1 := testServer(t)
codec := rpcClient(t, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
doRequest := func(t *testing.T, filter string) structs.IndexedNodesWithGateways {
t.Helper()
args := structs.DCSpecificRequest{
QueryOptions: structs.QueryOptions{Filter: filter},
}
var out structs.IndexedNodesWithGateways
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out))
return out
}
t.Run("No peerings", func(t *testing.T) {
nodes := doRequest(t, "")
// redis (3), web (3), critical (1), warning (1) and consul (1)
require.Len(t, nodes.Nodes, 9)
require.Len(t, nodes.ImportedNodes, 0)
})
addPeerService(t, codec)
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
Name: "peer1",
})
require.NoError(t, err)
t.Run("peerings", func(t *testing.T) {
nodes := doRequest(t, "")
// redis (3), web (3), critical (1), warning (1) and consul (1)
require.Len(t, nodes.Nodes, 9)
// service (1)
require.Len(t, nodes.ImportedNodes, 1)
})
t.Run("peerings w filter", func(t *testing.T) {
nodes := doRequest(t, "Node.PeerName == foo")
require.Len(t, nodes.Nodes, 0)
require.Len(t, nodes.ImportedNodes, 0)
nodes2 := doRequest(t, "Node.PeerName == peer1")
require.Len(t, nodes2.Nodes, 0)
require.Len(t, nodes2.ImportedNodes, 1)
})
}
func addPeerService(t *testing.T, codec rpc.ClientCodec) {
// prep the cluster with some data we can use in our filters
registrations := map[string]*structs.RegisterRequest{
"Peer node foo with peer service": {
Datacenter: "dc1",
Node: "foo",
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
Address: "127.0.0.2",
PeerName: "peer1",
Service: &structs.NodeService{
Kind: structs.ServiceKindTypical,
ID: "serviceID",
Service: "service",
Port: 1235,
Address: "198.18.1.2",
PeerName: "peer1",
},
},
}
registerTestCatalogEntriesMap(t, codec, registrations)
}
func TestInternal_GatewayIntentions(t *testing.T) { func TestInternal_GatewayIntentions(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")

View File

@ -7,13 +7,13 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
@ -62,6 +62,10 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
_, found := s1.peeringService.StreamStatus(token.PeerID) _, found := s1.peeringService.StreamStatus(token.PeerID)
require.False(t, found) require.False(t, found)
var (
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
)
// Bring up s2 and store s1's token so that it attempts to dial. // Bring up s2 and store s1's token so that it attempts to dial.
_, s2 := testServerWithConfig(t, func(c *Config) { _, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s2.dc2" c.NodeName = "s2.dc2"
@ -73,6 +77,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
// Simulate a peering initiation event by writing a peering with data from a peering token. // Simulate a peering initiation event by writing a peering with data from a peering token.
// Eventually the leader in dc2 should dial and connect to the leader in dc1. // Eventually the leader in dc2 should dial and connect to the leader in dc1.
p := &pbpeering.Peering{ p := &pbpeering.Peering{
ID: s2PeerID,
Name: "my-peer-s1", Name: "my-peer-s1",
PeerID: token.PeerID, PeerID: token.PeerID,
PeerCAPems: token.CA, PeerCAPems: token.CA,
@ -92,6 +97,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
// Delete the peering to trigger the termination sequence. // Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{ deleted := &pbpeering.Peering{
ID: s2PeerID,
Name: "my-peer-s1", Name: "my-peer-s1",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
} }
@ -151,6 +157,11 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
var token structs.PeeringToken var token structs.PeeringToken
require.NoError(t, json.Unmarshal(tokenJSON, &token)) require.NoError(t, json.Unmarshal(tokenJSON, &token))
var (
s1PeerID = token.PeerID
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
)
// Bring up s2 and store s1's token so that it attempts to dial. // Bring up s2 and store s1's token so that it attempts to dial.
_, s2 := testServerWithConfig(t, func(c *Config) { _, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s2.dc2" c.NodeName = "s2.dc2"
@ -162,6 +173,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
// Simulate a peering initiation event by writing a peering with data from a peering token. // Simulate a peering initiation event by writing a peering with data from a peering token.
// Eventually the leader in dc2 should dial and connect to the leader in dc1. // Eventually the leader in dc2 should dial and connect to the leader in dc1.
p := &pbpeering.Peering{ p := &pbpeering.Peering{
ID: s2PeerID,
Name: "my-peer-s1", Name: "my-peer-s1",
PeerID: token.PeerID, PeerID: token.PeerID,
PeerCAPems: token.CA, PeerCAPems: token.CA,
@ -181,6 +193,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
// Delete the peering from the server peer to trigger the termination sequence. // Delete the peering from the server peer to trigger the termination sequence.
deleted := &pbpeering.Peering{ deleted := &pbpeering.Peering{
ID: s1PeerID,
Name: "my-peer-s2", Name: "my-peer-s2",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
} }
@ -216,6 +229,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
var ( var (
peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
peerName = "my-peer-s2" peerName = "my-peer-s2"
defaultMeta = acl.DefaultEnterpriseMeta() defaultMeta = acl.DefaultEnterpriseMeta()
lastIdx = uint64(0) lastIdx = uint64(0)
@ -224,6 +238,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
// Simulate a peering initiation event by writing a peering to the state store. // Simulate a peering initiation event by writing a peering to the state store.
lastIdx++ lastIdx++
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
ID: peerID,
Name: peerName, Name: peerName,
})) }))
@ -233,6 +248,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
// Mark the peering for deletion to trigger the termination sequence. // Mark the peering for deletion to trigger the termination sequence.
lastIdx++ lastIdx++
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
ID: peerID,
Name: peerName, Name: peerName,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
})) }))

View File

@ -2258,7 +2258,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
}) })
require.NoError(t, err) require.NoError(t, err)
vip, err := state.VirtualIPForService(structs.NewServiceName("api", nil)) psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
vip, err := state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "", vip) require.Equal(t, "", vip)
@ -2287,7 +2288,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
// Make sure the service referenced in the terminating gateway config doesn't have // Make sure the service referenced in the terminating gateway config doesn't have
// a virtual IP yet. // a virtual IP yet.
vip, err = state.VirtualIPForService(structs.NewServiceName("bar", nil)) psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("bar", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "", vip) require.Equal(t, "", vip)
@ -2316,8 +2318,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
}, },
}) })
require.NoError(t, err) require.NoError(t, err)
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
vip, err = state.VirtualIPForService(structs.NewServiceName("api", nil)) vip, err = state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "240.0.0.1", vip) require.Equal(t, "240.0.0.1", vip)
@ -2345,7 +2347,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
// Make sure the baz service (only referenced in the config entry so far) // Make sure the baz service (only referenced in the config entry so far)
// has a virtual IP. // has a virtual IP.
vip, err = state.VirtualIPForService(structs.NewServiceName("baz", nil)) psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("baz", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "240.0.0.2", vip) require.Equal(t, "240.0.0.2", vip)
} }

View File

@ -143,6 +143,17 @@ type peeringApply struct {
srv *Server srv *Server
} }
func (a *peeringApply) CheckPeeringUUID(id string) (bool, error) {
state := a.srv.fsm.State()
if _, existing, err := state.PeeringReadByID(nil, id); err != nil {
return false, err
} else if existing != nil {
return false, nil
}
return true, nil
}
func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error { func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req) _, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
return err return err

View File

@ -7,7 +7,7 @@ import (
"fmt" "fmt"
"strings" "strings"
memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -209,18 +209,13 @@ func (s *Store) ACLAuthMethodUpsertValidateEnterprise(method *structs.ACLAuthMet
return nil return nil
} }
func indexAuthMethodFromACLToken(raw interface{}) ([]byte, error) { func indexAuthMethodFromACLToken(t *structs.ACLToken) ([]byte, error) {
p, ok := raw.(*structs.ACLToken) if t.AuthMethod == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
if p.AuthMethod == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(p.AuthMethod)) b.String(strings.ToLower(t.AuthMethod))
return b.Bytes(), nil return b.Bytes(), nil
} }

View File

@ -6,7 +6,6 @@ import (
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
@ -36,18 +35,18 @@ func tokensTableSchema() *memdb.TableSchema {
// DEPRECATED (ACL-Legacy-Compat) - we should not AllowMissing here once legacy compat is removed // DEPRECATED (ACL-Legacy-Compat) - we should not AllowMissing here once legacy compat is removed
AllowMissing: true, AllowMissing: true,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[string, *structs.ACLToken]{
readIndex: readIndex(indexFromUUIDString), readIndex: indexFromUUIDString,
writeIndex: writeIndex(indexAccessorIDFromACLToken), writeIndex: indexAccessorIDFromACLToken,
}, },
}, },
indexID: { indexID: {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[string, *structs.ACLToken]{
readIndex: readIndex(indexFromStringCaseSensitive), readIndex: indexFromStringCaseSensitive,
writeIndex: writeIndex(indexSecretIDFromACLToken), writeIndex: indexSecretIDFromACLToken,
}, },
}, },
indexPolicies: { indexPolicies: {
@ -55,58 +54,58 @@ func tokensTableSchema() *memdb.TableSchema {
// Need to allow missing for the anonymous token // Need to allow missing for the anonymous token
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerMulti{ Indexer: indexerMulti[Query, *structs.ACLToken]{
readIndex: readIndex(indexFromUUIDQuery), readIndex: indexFromUUIDQuery,
writeIndexMulti: writeIndexMulti(indexPoliciesFromACLToken), writeIndexMulti: indexPoliciesFromACLToken,
}, },
}, },
indexRoles: { indexRoles: {
Name: indexRoles, Name: indexRoles,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerMulti{ Indexer: indexerMulti[Query, *structs.ACLToken]{
readIndex: readIndex(indexFromUUIDQuery), readIndex: indexFromUUIDQuery,
writeIndexMulti: writeIndexMulti(indexRolesFromACLToken), writeIndexMulti: indexRolesFromACLToken,
}, },
}, },
indexAuthMethod: { indexAuthMethod: {
Name: indexAuthMethod, Name: indexAuthMethod,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[AuthMethodQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromAuthMethodQuery), readIndex: indexFromAuthMethodQuery,
writeIndex: writeIndex(indexAuthMethodFromACLToken), writeIndex: indexAuthMethodFromACLToken,
}, },
}, },
indexLocality: { indexLocality: {
Name: indexLocality, Name: indexLocality,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[BoolQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromBoolQuery), readIndex: indexFromBoolQuery,
writeIndex: writeIndex(indexLocalFromACLToken), writeIndex: indexLocalFromACLToken,
}, },
}, },
indexExpiresGlobal: { indexExpiresGlobal: {
Name: indexExpiresGlobal, Name: indexExpiresGlobal,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[*TimeQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromTimeQuery), readIndex: indexFromTimeQuery,
writeIndex: writeIndex(indexExpiresGlobalFromACLToken), writeIndex: indexExpiresGlobalFromACLToken,
}, },
}, },
indexExpiresLocal: { indexExpiresLocal: {
Name: indexExpiresLocal, Name: indexExpiresLocal,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[*TimeQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromTimeQuery), readIndex: indexFromTimeQuery,
writeIndex: writeIndex(indexExpiresLocalFromACLToken), writeIndex: indexExpiresLocalFromACLToken,
}, },
}, },
//DEPRECATED (ACL-Legacy-Compat) - This index is only needed while we support upgrading v1 to v2 acls // DEPRECATED (ACL-Legacy-Compat) - This index is only needed while we support upgrading v1 to v2 acls
// This table indexes all the ACL tokens that do not have an AccessorID // This table indexes all the ACL tokens that do not have an AccessorID
// TODO(ACL-Legacy-Compat): remove in phase 2 // TODO(ACL-Legacy-Compat): remove in phase 2
"needs-upgrade": { "needs-upgrade": {
@ -142,7 +141,7 @@ func policiesTableSchema() *memdb.TableSchema {
Name: indexName, Name: indexName,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *structs.ACLPolicy, any]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexNameFromACLPolicy, writeIndex: indexNameFromACLPolicy,
prefixIndex: prefixIndexFromQuery, prefixIndex: prefixIndexFromQuery,
@ -152,12 +151,7 @@ func policiesTableSchema() *memdb.TableSchema {
} }
} }
func indexNameFromACLPolicy(raw interface{}) ([]byte, error) { func indexNameFromACLPolicy(p *structs.ACLPolicy) ([]byte, error) {
p, ok := raw.(*structs.ACLPolicy)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLPolicy index", raw)
}
if p.Name == "" { if p.Name == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -183,7 +177,7 @@ func rolesTableSchema() *memdb.TableSchema {
Name: indexName, Name: indexName,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *structs.ACLRole, any]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexNameFromACLRole, writeIndex: indexNameFromACLRole,
prefixIndex: prefixIndexFromQuery, prefixIndex: prefixIndexFromQuery,
@ -194,7 +188,7 @@ func rolesTableSchema() *memdb.TableSchema {
// Need to allow missing for the anonymous token // Need to allow missing for the anonymous token
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerMulti{ Indexer: indexerMulti[Query, *structs.ACLRole]{
readIndex: indexFromUUIDQuery, readIndex: indexFromUUIDQuery,
writeIndexMulti: multiIndexPolicyFromACLRole, writeIndexMulti: multiIndexPolicyFromACLRole,
}, },
@ -203,75 +197,43 @@ func rolesTableSchema() *memdb.TableSchema {
} }
} }
func indexNameFromACLRole(raw interface{}) ([]byte, error) { func indexNameFromACLRole(r *structs.ACLRole) ([]byte, error) {
p, ok := raw.(*structs.ACLRole) if r.Name == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLRole index", raw)
}
if p.Name == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(p.Name)) b.String(strings.ToLower(r.Name))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromUUIDQuery(raw interface{}) ([]byte, error) { func indexFromUUIDQuery(q Query) ([]byte, error) {
q, ok := raw.(Query)
if !ok {
return nil, fmt.Errorf("unexpected type %T for UUIDQuery index", raw)
}
return uuidStringToBytes(q.Value) return uuidStringToBytes(q.Value)
} }
func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) { func prefixIndexFromUUIDWithPeerQuery(q Query) ([]byte, error) {
switch v := arg.(type) { var b indexBuilder
case *acl.EnterpriseMeta: peername := q.PeerOrEmpty()
return nil, nil if peername == "" {
case acl.EnterpriseMeta: b.String(structs.LocalPeerKeyword)
return nil, nil } else {
case Query: b.String(strings.ToLower(peername))
return variableLengthUUIDStringToBytes(v.Value)
} }
uuidBytes, err := variableLengthUUIDStringToBytes(q.Value)
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) if err != nil {
return nil, err
}
return append(b.Bytes(), uuidBytes...), nil
} }
func prefixIndexFromUUIDWithPeerQuery(arg interface{}) ([]byte, error) { func multiIndexPolicyFromACLRole(r *structs.ACLRole) ([][]byte, error) {
switch v := arg.(type) { count := len(r.Policies)
case Query:
var b indexBuilder
peername := v.PeerOrEmpty()
if peername == "" {
b.String(structs.LocalPeerKeyword)
} else {
b.String(strings.ToLower(peername))
}
uuidBytes, err := variableLengthUUIDStringToBytes(v.Value)
if err != nil {
return nil, err
}
return append(b.Bytes(), uuidBytes...), nil
}
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
}
func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) {
role, ok := raw.(*structs.ACLRole)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLRole index", raw)
}
count := len(role.Policies)
if count == 0 { if count == 0 {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
vals := make([][]byte, 0, count) vals := make([][]byte, 0, count)
for _, link := range role.Policies { for _, link := range r.Policies {
v, err := uuidStringToBytes(link.ID) v, err := uuidStringToBytes(link.ID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -290,16 +252,16 @@ func bindingRulesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[string, *structs.ACLBindingRule]{
readIndex: readIndex(indexFromUUIDString), readIndex: indexFromUUIDString,
writeIndex: writeIndex(indexIDFromACLBindingRule), writeIndex: indexIDFromACLBindingRule,
}, },
}, },
indexAuthMethod: { indexAuthMethod: {
Name: indexAuthMethod, Name: indexAuthMethod,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.ACLBindingRule]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexAuthMethodFromACLBindingRule, writeIndex: indexAuthMethodFromACLBindingRule,
}, },
@ -308,12 +270,8 @@ func bindingRulesTableSchema() *memdb.TableSchema {
} }
} }
func indexIDFromACLBindingRule(raw interface{}) ([]byte, error) { func indexIDFromACLBindingRule(r *structs.ACLBindingRule) ([]byte, error) {
p, ok := raw.(*structs.ACLBindingRule) vv, err := uuidStringToBytes(r.ID)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLBindingRule index", raw)
}
vv, err := uuidStringToBytes(p.ID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -321,27 +279,18 @@ func indexIDFromACLBindingRule(raw interface{}) ([]byte, error) {
return vv, err return vv, err
} }
func indexAuthMethodFromACLBindingRule(raw interface{}) ([]byte, error) { func indexAuthMethodFromACLBindingRule(r *structs.ACLBindingRule) ([]byte, error) {
p, ok := raw.(*structs.ACLBindingRule) if r.AuthMethod == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLBindingRule index", raw)
}
if p.AuthMethod == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(p.AuthMethod)) b.String(strings.ToLower(r.AuthMethod))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromUUIDString(raw interface{}) ([]byte, error) { func indexFromUUIDString(raw string) ([]byte, error) {
index, ok := raw.(string) uuid, err := uuidStringToBytes(raw)
if !ok {
return nil, fmt.Errorf("unexpected type %T for UUID string index", raw)
}
uuid, err := uuidStringToBytes(index)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -350,17 +299,12 @@ func indexFromUUIDString(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexAccessorIDFromACLToken(raw interface{}) ([]byte, error) { func indexAccessorIDFromACLToken(t *structs.ACLToken) ([]byte, error) {
p, ok := raw.(*structs.ACLToken) if t.AccessorID == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
if p.AccessorID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
uuid, err := uuidStringToBytes(p.AccessorID) uuid, err := uuidStringToBytes(t.AccessorID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -369,37 +313,23 @@ func indexAccessorIDFromACLToken(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexSecretIDFromACLToken(raw interface{}) ([]byte, error) { func indexSecretIDFromACLToken(t *structs.ACLToken) ([]byte, error) {
p, ok := raw.(*structs.ACLToken) if t.SecretID == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
if p.SecretID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(p.SecretID) b.String(t.SecretID)
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromStringCaseSensitive(raw interface{}) ([]byte, error) { func indexFromStringCaseSensitive(s string) ([]byte, error) {
q, ok := raw.(string)
if !ok {
return nil, fmt.Errorf("unexpected type %T for string prefix query", raw)
}
var b indexBuilder var b indexBuilder
b.String(q) b.String(s)
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexPoliciesFromACLToken(raw interface{}) ([][]byte, error) { func indexPoliciesFromACLToken(token *structs.ACLToken) ([][]byte, error) {
token, ok := raw.(*structs.ACLToken)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
links := token.Policies links := token.Policies
numLinks := len(links) numLinks := len(links)
@ -420,11 +350,7 @@ func indexPoliciesFromACLToken(raw interface{}) ([][]byte, error) {
return vals, nil return vals, nil
} }
func indexRolesFromACLToken(raw interface{}) ([][]byte, error) { func indexRolesFromACLToken(token *structs.ACLToken) ([][]byte, error) {
token, ok := raw.(*structs.ACLToken)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
links := token.Roles links := token.Roles
numLinks := len(links) numLinks := len(links)
@ -445,63 +371,45 @@ func indexRolesFromACLToken(raw interface{}) ([][]byte, error) {
return vals, nil return vals, nil
} }
func indexFromBoolQuery(raw interface{}) ([]byte, error) { func indexFromBoolQuery(q BoolQuery) ([]byte, error) {
q, ok := raw.(BoolQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for BoolQuery index", raw)
}
var b indexBuilder var b indexBuilder
b.Bool(q.Value) b.Bool(q.Value)
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexLocalFromACLToken(raw interface{}) ([]byte, error) { func indexLocalFromACLToken(token *structs.ACLToken) ([]byte, error) {
p, ok := raw.(*structs.ACLToken)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLPolicy index", raw)
}
var b indexBuilder var b indexBuilder
b.Bool(p.Local) b.Bool(token.Local)
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromTimeQuery(arg interface{}) ([]byte, error) { func indexFromTimeQuery(q *TimeQuery) ([]byte, error) {
p, ok := arg.(*TimeQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for TimeQuery index", arg)
}
var b indexBuilder var b indexBuilder
b.Time(p.Value) b.Time(q.Value)
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexExpiresLocalFromACLToken(raw interface{}) ([]byte, error) { func indexExpiresLocalFromACLToken(token *structs.ACLToken) ([]byte, error) {
return indexExpiresFromACLToken(raw, true) return indexExpiresFromACLToken(token, true)
} }
func indexExpiresGlobalFromACLToken(raw interface{}) ([]byte, error) { func indexExpiresGlobalFromACLToken(token *structs.ACLToken) ([]byte, error) {
return indexExpiresFromACLToken(raw, false) return indexExpiresFromACLToken(token, false)
} }
func indexExpiresFromACLToken(raw interface{}, local bool) ([]byte, error) { func indexExpiresFromACLToken(t *structs.ACLToken, local bool) ([]byte, error) {
p, ok := raw.(*structs.ACLToken) if t.Local != local {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLToken index", raw)
}
if p.Local != local {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
if !p.HasExpirationTime() { if !t.HasExpirationTime() {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
if p.ExpirationTime.Unix() < 0 { if t.ExpirationTime.Unix() < 0 {
return nil, fmt.Errorf("token expiration time cannot be before the unix epoch: %s", p.ExpirationTime) return nil, fmt.Errorf("token expiration time cannot be before the unix epoch: %s", t.ExpirationTime)
} }
var b indexBuilder var b indexBuilder
b.Time(*p.ExpirationTime) b.Time(*t.ExpirationTime)
return b.Bytes(), nil return b.Bytes(), nil
} }
@ -513,7 +421,7 @@ func authMethodsTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.ACLAuthMethod]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexNameFromACLAuthMethod, writeIndex: indexNameFromACLAuthMethod,
}, },
@ -522,17 +430,12 @@ func authMethodsTableSchema() *memdb.TableSchema {
} }
} }
func indexNameFromACLAuthMethod(raw interface{}) ([]byte, error) { func indexNameFromACLAuthMethod(m *structs.ACLAuthMethod) ([]byte, error) {
p, ok := raw.(*structs.ACLAuthMethod) if m.Name == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ACLAuthMethod index", raw)
}
if p.Name == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(p.Name)) b.String(strings.ToLower(m.Name))
return b.Bytes(), nil return b.Bytes(), nil
} }

View File

@ -7,14 +7,14 @@ import (
"testing" "testing"
"time" "time"
memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
pbacl "github.com/hashicorp/consul/proto/pbacl" "github.com/hashicorp/consul/proto/pbacl"
) )
const ( const (
@ -3702,18 +3702,18 @@ func TestTokenPoliciesIndex(t *testing.T) {
Name: "global", Name: "global",
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[*TimeQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromTimeQuery), readIndex: indexFromTimeQuery,
writeIndex: writeIndex(indexExpiresGlobalFromACLToken), writeIndex: indexExpiresGlobalFromACLToken,
}, },
} }
localIndex := &memdb.IndexSchema{ localIndex := &memdb.IndexSchema{
Name: "local", Name: "local",
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[*TimeQuery, *structs.ACLToken]{
readIndex: readIndex(indexFromTimeQuery), readIndex: indexFromTimeQuery,
writeIndex: writeIndex(indexExpiresLocalFromACLToken), writeIndex: indexExpiresLocalFromACLToken,
}, },
} }
schema := &memdb.DBSchema{ schema := &memdb.DBSchema{

View File

@ -7,7 +7,7 @@ import (
"reflect" "reflect"
"strings" "strings"
memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
@ -17,9 +17,15 @@ import (
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
) )
// indexServiceExtinction keeps track of the last raft index when the last instance const (
// of any service was unregistered. This is used by blocking queries on missing services. // indexServiceExtinction keeps track of the last raft index when the last instance
const indexServiceExtinction = "service_last_extinction" // of any service was unregistered. This is used by blocking queries on missing services.
indexServiceExtinction = "service_last_extinction"
// indexNodeExtinction keeps track of the last raft index when the last instance
// of any node was unregistered. This is used by blocking queries on missing nodes.
indexNodeExtinction = "node_last_extinction"
)
const ( const (
// minUUIDLookupLen is used as a minimum length of a node name required before // minUUIDLookupLen is used as a minimum length of a node name required before
@ -414,8 +420,8 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
// We are actually renaming a node, remove its reference first // We are actually renaming a node, remove its reference first
err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta(), n.PeerName) err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta(), n.PeerName)
if err != nil { if err != nil {
return fmt.Errorf("Error while renaming Node ID: %q (%s) from %s to %s", return fmt.Errorf("Error while renaming Node ID: %q (%s) from %s to %s: %w",
node.ID, node.Address, n.Node, node.Node) node.ID, node.Address, n.Node, node.Node, err)
} }
} }
} else { } else {
@ -764,6 +770,15 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta
return fmt.Errorf("failed updating index: %s", err) return fmt.Errorf("failed updating index: %s", err)
} }
// Clean up node entry from index table
if err := tx.Delete(tableIndex, &IndexEntry{Key: nodeIndexName(nodeName, entMeta, node.PeerName)}); err != nil {
return fmt.Errorf("failed deleting nodeIndex %q: %w", nodeIndexName(nodeName, entMeta, node.PeerName), err)
}
if err := catalogUpdateNodeExtinctionIndex(tx, idx, entMeta, node.PeerName); err != nil {
return err
}
if peerName == "" { if peerName == "" {
// Invalidate any sessions for this node. // Invalidate any sessions for this node.
toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault())
@ -857,9 +872,10 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
return fmt.Errorf("failed updating gateway mapping: %s", err) return fmt.Errorf("failed updating gateway mapping: %s", err)
} }
} }
} // Only upsert KindServiceName if service is local
if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil { if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil {
return fmt.Errorf("failed to persist service name: %v", err) return fmt.Errorf("failed to persist service name: %v", err)
}
} }
// Update upstream/downstream mappings if it's a connect service // Update upstream/downstream mappings if it's a connect service
@ -881,7 +897,8 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
} }
sn := structs.ServiceName{Name: service, EnterpriseMeta: svc.EnterpriseMeta} sn := structs.ServiceName{Name: service, EnterpriseMeta: svc.EnterpriseMeta}
vip, err := assignServiceVirtualIP(tx, sn) psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: sn}
vip, err := assignServiceVirtualIP(tx, psn)
if err != nil { if err != nil {
return fmt.Errorf("failed updating virtual IP: %s", err) return fmt.Errorf("failed updating virtual IP: %s", err)
} }
@ -961,9 +978,8 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
// assignServiceVirtualIP assigns a virtual IP to the target service and updates // assignServiceVirtualIP assigns a virtual IP to the target service and updates
// the global virtual IP counter if necessary. // the global virtual IP counter if necessary.
func assignServiceVirtualIP(tx WriteTxn, sn structs.ServiceName) (string, error) { func assignServiceVirtualIP(tx WriteTxn, psn structs.PeeredServiceName) (string, error) {
// TODO(peering): support VIPs serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, psn)
serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn)
if err != nil { if err != nil {
return "", fmt.Errorf("failed service virtual IP lookup: %s", err) return "", fmt.Errorf("failed service virtual IP lookup: %s", err)
} }
@ -1034,7 +1050,7 @@ func assignServiceVirtualIP(tx WriteTxn, sn structs.ServiceName) (string, error)
} }
assignedVIP := ServiceVirtualIP{ assignedVIP := ServiceVirtualIP{
Service: sn, Service: psn,
IP: newEntry.IP, IP: newEntry.IP,
} }
if err := tx.Insert(tableServiceVirtualIPs, assignedVIP); err != nil { if err := tx.Insert(tableServiceVirtualIPs, assignedVIP); err != nil {
@ -1683,9 +1699,6 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac
entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() entMeta = structs.DefaultEnterpriseMetaInDefaultPartition()
} }
// Get the table index.
idx := catalogMaxIndex(tx, entMeta, peerName, false)
// Query the node by node name // Query the node by node name
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{
Value: nodeNameOrID, Value: nodeNameOrID,
@ -1712,16 +1725,16 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac
}) })
if err != nil { if err != nil {
ws.Add(watchCh) ws.Add(watchCh)
// TODO(sean@): We could/should log an error re: the uuid_prefix lookup idx := catalogNodeLastExtinctionIndex(tx, entMeta, peerName)
// failing once a logger has been introduced to the catalog. return true, idx, nil, nil, nil
return true, 0, nil, nil, nil
} }
n = iter.Next() n = iter.Next()
if n == nil { if n == nil {
// No nodes matched, even with the Node ID: add a watch on the node name. // No nodes matched, even with the Node ID: add a watch on the node name.
ws.Add(watchCh) ws.Add(watchCh)
return true, 0, nil, nil, nil idx := catalogNodeLastExtinctionIndex(tx, entMeta, peerName)
return true, idx, nil, nil, nil
} }
idWatchCh := iter.WatchCh() idWatchCh := iter.WatchCh()
@ -1745,6 +1758,9 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac
} }
ws.Add(services.WatchCh()) ws.Add(services.WatchCh())
// Get the table index.
idx := catalogNodeMaxIndex(tx, nodeName, entMeta, peerName)
return false, idx, node, services, nil return false, idx, node, services, nil
} }
@ -1862,10 +1878,6 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
return nil return nil
} }
// TODO: accept a non-pointer value for EnterpriseMeta
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMetaInDefaultPartition()
}
// Delete any checks associated with the service. This will invalidate // Delete any checks associated with the service. This will invalidate
// sessions as necessary. // sessions as necessary.
nsq := NodeServiceQuery{ nsq := NodeServiceQuery{
@ -1902,10 +1914,17 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
svc := service.(*structs.ServiceNode) svc := service.(*structs.ServiceNode)
if err := catalogUpdateServicesIndexes(tx, idx, entMeta, svc.PeerName); err != nil { if err := catalogUpdateServicesIndexes(tx, idx, entMeta, svc.PeerName); err != nil {
return err return fmt.Errorf("failed updating services indexes: %w", err)
} }
if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return err return fmt.Errorf("failed updating service-kind indexes: %w", err)
}
// Update the node indexes as the service information is included in node catalog queries.
if err := catalogUpdateNodesIndexes(tx, idx, entMeta, peerName); err != nil {
return fmt.Errorf("failed updating nodes indexes: %w", err)
}
if err := catalogUpdateNodeIndexes(tx, idx, nodeName, entMeta, peerName); err != nil {
return fmt.Errorf("failed updating node indexes: %w", err)
} }
name := svc.CompoundServiceName() name := svc.CompoundServiceName()
@ -1930,7 +1949,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
_, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta, svc.PeerName) _, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta, svc.PeerName)
if err == nil && serviceIndex != nil { if err == nil && serviceIndex != nil {
// we found service.<serviceName> index, garbage collect it // we found service.<serviceName> index, garbage collect it
if errW := tx.Delete(tableIndex, serviceIndex); errW != nil { if err := tx.Delete(tableIndex, serviceIndex); err != nil {
return fmt.Errorf("[FAILED] deleting serviceIndex %s: %s", svc.ServiceName, err) return fmt.Errorf("[FAILED] deleting serviceIndex %s: %s", svc.ServiceName, err)
} }
} }
@ -1943,7 +1962,8 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err)
} }
} }
if err := freeServiceVirtualIP(tx, svc.ServiceName, nil, entMeta); err != nil { psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: name}
if err := freeServiceVirtualIP(tx, psn, nil); err != nil {
return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err)
} }
if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil { if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil {
@ -1959,7 +1979,11 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
// freeServiceVirtualIP is used to free a virtual IP for a service after the last instance // freeServiceVirtualIP is used to free a virtual IP for a service after the last instance
// is removed. // is removed.
func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.ServiceName, entMeta *acl.EnterpriseMeta) error { func freeServiceVirtualIP(
tx WriteTxn,
psn structs.PeeredServiceName,
excludeGateway *structs.ServiceName,
) error {
supported, err := virtualIPsSupported(tx, nil) supported, err := virtualIPsSupported(tx, nil)
if err != nil { if err != nil {
return err return err
@ -1969,15 +1993,14 @@ func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.Servi
} }
// Don't deregister the virtual IP if at least one terminating gateway still references this service. // Don't deregister the virtual IP if at least one terminating gateway still references this service.
sn := structs.NewServiceName(svc, entMeta)
termGatewaySupported, err := terminatingGatewayVirtualIPsSupported(tx, nil) termGatewaySupported, err := terminatingGatewayVirtualIPsSupported(tx, nil)
if err != nil { if err != nil {
return err return err
} }
if termGatewaySupported { if termGatewaySupported {
svcGateways, err := tx.Get(tableGatewayServices, indexService, sn) svcGateways, err := tx.Get(tableGatewayServices, indexService, psn.ServiceName)
if err != nil { if err != nil {
return fmt.Errorf("failed gateway lookup for %q: %s", sn.Name, err) return fmt.Errorf("failed gateway lookup for %q: %s", psn.ServiceName.Name, err)
} }
for service := svcGateways.Next(); service != nil; service = svcGateways.Next() { for service := svcGateways.Next(); service != nil; service = svcGateways.Next() {
@ -1990,7 +2013,7 @@ func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.Servi
} }
} }
serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn) serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, psn)
if err != nil { if err != nil {
return fmt.Errorf("failed service virtual IP lookup: %s", err) return fmt.Errorf("failed service virtual IP lookup: %s", err)
} }
@ -2857,11 +2880,11 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.
return lib.MaxUint64(maxIdx, idx), results, nil return lib.MaxUint64(maxIdx, idx), results, nil
} }
func (s *Store) VirtualIPForService(sn structs.ServiceName) (string, error) { func (s *Store) VirtualIPForService(psn structs.PeeredServiceName) (string, error) {
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
vip, err := tx.First(tableServiceVirtualIPs, indexID, sn) vip, err := tx.First(tableServiceVirtualIPs, indexID, psn)
if err != nil { if err != nil {
return "", fmt.Errorf("failed service virtual IP lookup: %s", err) return "", fmt.Errorf("failed service virtual IP lookup: %s", err)
} }
@ -3314,7 +3337,9 @@ func getTermGatewayVirtualIPs(tx WriteTxn, services []structs.LinkedService, ent
addrs := make(map[string]structs.ServiceAddress, len(services)) addrs := make(map[string]structs.ServiceAddress, len(services))
for _, s := range services { for _, s := range services {
sn := structs.ServiceName{Name: s.Name, EnterpriseMeta: *entMeta} sn := structs.ServiceName{Name: s.Name, EnterpriseMeta: *entMeta}
vip, err := assignServiceVirtualIP(tx, sn) // Terminating Gateways cannot route to services in peered clusters
psn := structs.PeeredServiceName{ServiceName: sn, Peer: structs.DefaultPeerKeyword}
vip, err := assignServiceVirtualIP(tx, psn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -3391,7 +3416,8 @@ func updateTerminatingGatewayVirtualIPs(tx WriteTxn, idx uint64, conf *structs.T
return err return err
} }
if len(nodes) == 0 { if len(nodes) == 0 {
if err := freeServiceVirtualIP(tx, sn.Name, &gatewayName, &sn.EnterpriseMeta); err != nil { psn := structs.PeeredServiceName{Peer: structs.DefaultPeerKeyword, ServiceName: sn}
if err := freeServiceVirtualIP(tx, psn, &gatewayName); err != nil {
return err return err
} }
} }

View File

@ -7,7 +7,7 @@ import (
"fmt" "fmt"
"strings" "strings"
memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -24,8 +24,12 @@ func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerN
return peeredIndexEntryName(base, peerName) return peeredIndexEntryName(base, peerName)
} }
func nodeIndexName(name string, _ *acl.EnterpriseMeta, peerName string) string {
return peeredIndexEntryName(fmt.Sprintf("node.%s", name), peerName)
}
func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
// overall nodes index // overall nodes index for snapshot and ListNodes RPC
if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil { if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil {
return fmt.Errorf("failed updating index: %s", err) return fmt.Errorf("failed updating index: %s", err)
} }
@ -38,12 +42,22 @@ func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, p
return nil return nil
} }
// catalogUpdateNodeIndexes upserts the max index for a single node
func catalogUpdateNodeIndexes(tx WriteTxn, idx uint64, nodeName string, _ *acl.EnterpriseMeta, peerName string) error {
// per-node index
if err := indexUpdateMaxTxn(tx, idx, nodeIndexName(nodeName, nil, peerName)); err != nil {
return fmt.Errorf("failed updating node index: %w", err)
}
return nil
}
// catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels // catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels
// of granularity (no-op if `idx` is lower than what exists for that index key): // of granularity (no-op if `idx` is lower than what exists for that index key):
// - all services // - all services
// - all services in a specified peer (including internal) // - all services in a specified peer (including internal)
func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
// overall services index // overall services index for snapshot
if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil { if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil {
return fmt.Errorf("failed updating index for services table: %w", err) return fmt.Errorf("failed updating index for services table: %w", err)
} }
@ -84,14 +98,16 @@ func catalogUpdateServiceIndexes(tx WriteTxn, idx uint64, serviceName string, _
} }
func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
if err := indexUpdateMaxTxn(tx, idx, indexServiceExtinction); err != nil {
return fmt.Errorf("failed updating missing service extinction index: %w", err)
}
// update the peer index
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexServiceExtinction, peerName)); err != nil { if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexServiceExtinction, peerName)); err != nil {
return fmt.Errorf("failed updating missing service extinction peered index: %w", err) return fmt.Errorf("failed updating missing service extinction peered index: %w", err)
} }
return nil
}
func catalogUpdateNodeExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexNodeExtinction, peerName)); err != nil {
return fmt.Errorf("failed updating missing node extinction peered index: %w", err)
}
return nil return nil
} }
@ -105,7 +121,10 @@ func catalogInsertNode(tx WriteTxn, node *structs.Node) error {
} }
if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta(), node.PeerName); err != nil { if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta(), node.PeerName); err != nil {
return err return fmt.Errorf("failed updating nodes indexes: %w", err)
}
if err := catalogUpdateNodeIndexes(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta(), node.PeerName); err != nil {
return fmt.Errorf("failed updating node indexes: %w", err)
} }
// Update the node's service indexes as the node information is included // Update the node's service indexes as the node information is included
@ -125,15 +144,23 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error {
} }
if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil { if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return err return fmt.Errorf("failed updating services indexes: %w", err)
} }
if err := catalogUpdateServiceIndexes(tx, svc.ModifyIndex, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { if err := catalogUpdateServiceIndexes(tx, svc.ModifyIndex, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return err return fmt.Errorf("failed updating service indexes: %w", err)
} }
if err := catalogUpdateServiceKindIndexes(tx, svc.ModifyIndex, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { if err := catalogUpdateServiceKindIndexes(tx, svc.ModifyIndex, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return err return fmt.Errorf("failed updating service-kind indexes: %w", err)
}
// Update the node indexes as the service information is included in node catalog queries.
if err := catalogUpdateNodesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return fmt.Errorf("failed updating nodes indexes: %w", err)
}
if err := catalogUpdateNodeIndexes(tx, svc.ModifyIndex, svc.Node, &svc.EnterpriseMeta, svc.PeerName); err != nil {
return fmt.Errorf("failed updating node indexes: %w", err)
} }
return nil return nil
@ -143,6 +170,14 @@ func catalogNodesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) ui
return maxIndexTxn(tx, peeredIndexEntryName(tableNodes, peerName)) return maxIndexTxn(tx, peeredIndexEntryName(tableNodes, peerName))
} }
func catalogNodeMaxIndex(tx ReadTxn, nodeName string, _ *acl.EnterpriseMeta, peerName string) uint64 {
return maxIndexTxn(tx, nodeIndexName(nodeName, nil, peerName))
}
func catalogNodeLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 {
return maxIndexTxn(tx, peeredIndexEntryName(indexNodeExtinction, peerName))
}
func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 {
return maxIndexTxn(tx, peeredIndexEntryName(tableServices, peerName)) return maxIndexTxn(tx, peeredIndexEntryName(tableServices, peerName))
} }
@ -185,7 +220,6 @@ func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string, checks
} }
func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 { func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 {
// TODO(peering_indexes): pipe peerName here
if checks { if checks {
return maxIndexWatchTxn(tx, ws, return maxIndexWatchTxn(tx, ws,
peeredIndexEntryName(tableChecks, peerName), peeredIndexEntryName(tableChecks, peerName),
@ -200,7 +234,7 @@ func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta,
} }
func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
// update the universal index entry // update the overall index entry for snapshot
if err := indexUpdateMaxTxn(tx, idx, tableChecks); err != nil { if err := indexUpdateMaxTxn(tx, idx, tableChecks); err != nil {
return fmt.Errorf("failed updating index: %s", err) return fmt.Errorf("failed updating index: %s", err)
} }
@ -265,3 +299,15 @@ func updateKindServiceNamesIndex(tx WriteTxn, idx uint64, kind structs.ServiceKi
} }
return nil return nil
} }
func indexFromPeeredServiceName(psn structs.PeeredServiceName) ([]byte, error) {
peer := structs.LocalPeerKeyword
if psn.Peer != "" {
peer = psn.Peer
}
var b indexBuilder
b.String(strings.ToLower(peer))
b.String(strings.ToLower(psn.ServiceName.Name))
return b.Bytes(), nil
}

View File

@ -669,8 +669,19 @@ func testIndexerTableServices() map[string]indexerTestCase {
func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase { func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase {
obj := ServiceVirtualIP{ obj := ServiceVirtualIP{
Service: structs.ServiceName{ Service: structs.PeeredServiceName{
Name: "foo", ServiceName: structs.ServiceName{
Name: "foo",
},
},
IP: net.ParseIP("127.0.0.1"),
}
peeredObj := ServiceVirtualIP{
Service: structs.PeeredServiceName{
ServiceName: structs.ServiceName{
Name: "foo",
},
Peer: "Billing",
}, },
IP: net.ParseIP("127.0.0.1"), IP: net.ParseIP("127.0.0.1"),
} }
@ -678,14 +689,33 @@ func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase {
return map[string]indexerTestCase{ return map[string]indexerTestCase{
indexID: { indexID: {
read: indexValue{ read: indexValue{
source: structs.ServiceName{ source: structs.PeeredServiceName{
Name: "foo", ServiceName: structs.ServiceName{
Name: "foo",
},
}, },
expected: []byte("foo\x00"), expected: []byte("internal\x00foo\x00"),
}, },
write: indexValue{ write: indexValue{
source: obj, source: obj,
expected: []byte("foo\x00"), expected: []byte("internal\x00foo\x00"),
},
extra: []indexerTestCase{
{
read: indexValue{
source: structs.PeeredServiceName{
ServiceName: structs.ServiceName{
Name: "foo",
},
Peer: "Billing",
},
expected: []byte("billing\x00foo\x00"),
},
write: indexValue{
source: peeredObj,
expected: []byte("billing\x00foo\x00"),
},
},
}, },
}, },
} }

View File

@ -47,7 +47,7 @@ func nodesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *structs.Node, any]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexFromNode), writeIndex: indexWithPeerName(indexFromNode),
prefixIndex: prefixIndexFromQueryWithPeer, prefixIndex: prefixIndexFromQueryWithPeer,
@ -57,7 +57,7 @@ func nodesTableSchema() *memdb.TableSchema {
Name: indexUUID, Name: indexUUID,
AllowMissing: true, AllowMissing: true,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *structs.Node, Query]{
readIndex: indexWithPeerName(indexFromUUIDQuery), readIndex: indexWithPeerName(indexFromUUIDQuery),
writeIndex: indexWithPeerName(indexIDFromNode), writeIndex: indexWithPeerName(indexIDFromNode),
prefixIndex: prefixIndexFromUUIDWithPeerQuery, prefixIndex: prefixIndexFromUUIDWithPeerQuery,
@ -67,7 +67,7 @@ func nodesTableSchema() *memdb.TableSchema {
Name: indexMeta, Name: indexMeta,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerMulti{ Indexer: indexerMulti[KeyValueQuery, *structs.Node]{
readIndex: indexWithPeerName(indexFromKeyValueQuery), readIndex: indexWithPeerName(indexFromKeyValueQuery),
writeIndexMulti: multiIndexWithPeerName(indexMetaFromNode), writeIndexMulti: multiIndexWithPeerName(indexMetaFromNode),
}, },
@ -76,12 +76,7 @@ func nodesTableSchema() *memdb.TableSchema {
} }
} }
func indexFromNode(raw interface{}) ([]byte, error) { func indexFromNode(n *structs.Node) ([]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
if n.Node == "" { if n.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -91,12 +86,7 @@ func indexFromNode(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexIDFromNode(raw interface{}) ([]byte, error) { func indexIDFromNode(n *structs.Node) ([]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
if n.ID == "" { if n.ID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -109,12 +99,7 @@ func indexIDFromNode(raw interface{}) ([]byte, error) {
return v, nil return v, nil
} }
func indexMetaFromNode(raw interface{}) ([][]byte, error) { func indexMetaFromNode(n *structs.Node) ([][]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
// NOTE: this is case-sensitive! // NOTE: this is case-sensitive!
vals := make([][]byte, 0, len(n.Meta)) vals := make([][]byte, 0, len(n.Meta))
@ -145,7 +130,7 @@ func servicesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[NodeServiceQuery, *structs.ServiceNode, any]{
readIndex: indexWithPeerName(indexFromNodeServiceQuery), readIndex: indexWithPeerName(indexFromNodeServiceQuery),
writeIndex: indexWithPeerName(indexFromServiceNode), writeIndex: indexWithPeerName(indexFromServiceNode),
prefixIndex: prefixIndexFromQueryWithPeer, prefixIndex: prefixIndexFromQueryWithPeer,
@ -155,7 +140,7 @@ func servicesTableSchema() *memdb.TableSchema {
Name: indexNode, Name: indexNode,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, nodeIdentifier]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexFromNodeIdentity), writeIndex: indexWithPeerName(indexFromNodeIdentity),
}, },
@ -164,7 +149,7 @@ func servicesTableSchema() *memdb.TableSchema {
Name: indexService, Name: indexService,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.ServiceNode]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexServiceNameFromServiceNode), writeIndex: indexWithPeerName(indexServiceNameFromServiceNode),
}, },
@ -173,7 +158,7 @@ func servicesTableSchema() *memdb.TableSchema {
Name: indexConnect, Name: indexConnect,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.ServiceNode]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexConnectNameFromServiceNode), writeIndex: indexWithPeerName(indexConnectNameFromServiceNode),
}, },
@ -182,7 +167,7 @@ func servicesTableSchema() *memdb.TableSchema {
Name: indexKind, Name: indexKind,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.ServiceNode]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexKindFromServiceNode), writeIndex: indexWithPeerName(indexKindFromServiceNode),
}, },
@ -191,24 +176,14 @@ func servicesTableSchema() *memdb.TableSchema {
} }
} }
func indexFromNodeServiceQuery(arg interface{}) ([]byte, error) { func indexFromNodeServiceQuery(q NodeServiceQuery) ([]byte, error) {
q, ok := arg.(NodeServiceQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for NodeServiceQuery index", arg)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(q.Node)) b.String(strings.ToLower(q.Node))
b.String(strings.ToLower(q.Service)) b.String(strings.ToLower(q.Service))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromServiceNode(raw interface{}) ([]byte, error) { func indexFromServiceNode(n *structs.ServiceNode) ([]byte, error) {
n, ok := raw.(*structs.ServiceNode)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
}
if n.Node == "" { if n.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -219,14 +194,17 @@ func indexFromServiceNode(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromNodeIdentity(raw interface{}) ([]byte, error) { type nodeIdentifier interface {
n, ok := raw.(interface { partitionIndexable
NodeIdentity() structs.Identity peerIndexable
})
if !ok {
return nil, fmt.Errorf("unexpected type %T for index, type must provide NodeIdentity()", raw)
}
NodeIdentity() structs.Identity
}
var _ nodeIdentifier = (*structs.HealthCheck)(nil)
var _ nodeIdentifier = (*structs.ServiceNode)(nil)
func indexFromNodeIdentity(n nodeIdentifier) ([]byte, error) {
id := n.NodeIdentity() id := n.NodeIdentity()
if id.ID == "" { if id.ID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
@ -237,12 +215,7 @@ func indexFromNodeIdentity(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexServiceNameFromServiceNode(raw interface{}) ([]byte, error) { func indexServiceNameFromServiceNode(n *structs.ServiceNode) ([]byte, error) {
n, ok := raw.(*structs.ServiceNode)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
}
if n.Node == "" { if n.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -252,12 +225,7 @@ func indexServiceNameFromServiceNode(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexConnectNameFromServiceNode(raw interface{}) ([]byte, error) { func indexConnectNameFromServiceNode(n *structs.ServiceNode) ([]byte, error) {
n, ok := raw.(*structs.ServiceNode)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
}
name, ok := connectNameFromServiceNode(n) name, ok := connectNameFromServiceNode(n)
if !ok { if !ok {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
@ -284,33 +252,23 @@ func connectNameFromServiceNode(sn *structs.ServiceNode) (string, bool) {
} }
} }
func indexKindFromServiceNode(raw interface{}) ([]byte, error) { func indexKindFromServiceNode(n *structs.ServiceNode) ([]byte, error) {
n, ok := raw.(*structs.ServiceNode)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(string(n.ServiceKind))) b.String(strings.ToLower(string(n.ServiceKind)))
return b.Bytes(), nil return b.Bytes(), nil
} }
// indexWithPeerName adds peer name to the index. // indexWithPeerName adds peer name to the index.
func indexWithPeerName( func indexWithPeerName[T peerIndexable](
fn func(interface{}) ([]byte, error), fn func(T) ([]byte, error),
) func(interface{}) ([]byte, error) { ) func(T) ([]byte, error) {
return func(raw interface{}) ([]byte, error) { return func(e T) ([]byte, error) {
v, err := fn(raw) v, err := fn(e)
if err != nil { if err != nil {
return nil, err return nil, err
} }
n, ok := raw.(peerIndexable) peername := e.PeerOrEmpty()
if !ok {
return nil, fmt.Errorf("type must be peerIndexable: %T", raw)
}
peername := n.PeerOrEmpty()
if peername == "" { if peername == "" {
peername = structs.LocalPeerKeyword peername = structs.LocalPeerKeyword
} }
@ -322,20 +280,20 @@ func indexWithPeerName(
} }
// multiIndexWithPeerName adds peer name to multiple indices, and returns multiple indices. // multiIndexWithPeerName adds peer name to multiple indices, and returns multiple indices.
func multiIndexWithPeerName( func multiIndexWithPeerName[T any](
fn func(interface{}) ([][]byte, error), fn func(T) ([][]byte, error),
) func(interface{}) ([][]byte, error) { ) func(T) ([][]byte, error) {
return func(raw interface{}) ([][]byte, error) { return func(raw T) ([][]byte, error) {
n, ok := any(raw).(peerIndexable)
if !ok {
return nil, fmt.Errorf("type must be peerIndexable: %T", raw)
}
results, err := fn(raw) results, err := fn(raw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
n, ok := raw.(peerIndexable)
if !ok {
return nil, fmt.Errorf("type must be peerIndexable: %T", raw)
}
peername := n.PeerOrEmpty() peername := n.PeerOrEmpty()
if peername == "" { if peername == "" {
peername = structs.LocalPeerKeyword peername = structs.LocalPeerKeyword
@ -361,7 +319,7 @@ func checksTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[NodeCheckQuery, *structs.HealthCheck, any]{
readIndex: indexWithPeerName(indexFromNodeCheckQuery), readIndex: indexWithPeerName(indexFromNodeCheckQuery),
writeIndex: indexWithPeerName(indexFromHealthCheck), writeIndex: indexWithPeerName(indexFromHealthCheck),
prefixIndex: prefixIndexFromQueryWithPeer, prefixIndex: prefixIndexFromQueryWithPeer,
@ -371,7 +329,7 @@ func checksTableSchema() *memdb.TableSchema {
Name: indexStatus, Name: indexStatus,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.HealthCheck]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexStatusFromHealthCheck), writeIndex: indexWithPeerName(indexStatusFromHealthCheck),
}, },
@ -380,7 +338,7 @@ func checksTableSchema() *memdb.TableSchema {
Name: indexService, Name: indexService,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.HealthCheck]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexServiceNameFromHealthCheck), writeIndex: indexWithPeerName(indexServiceNameFromHealthCheck),
}, },
@ -389,7 +347,7 @@ func checksTableSchema() *memdb.TableSchema {
Name: indexNode, Name: indexNode,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, nodeIdentifier]{
readIndex: indexWithPeerName(indexFromQuery), readIndex: indexWithPeerName(indexFromQuery),
writeIndex: indexWithPeerName(indexFromNodeIdentity), writeIndex: indexWithPeerName(indexFromNodeIdentity),
}, },
@ -398,7 +356,7 @@ func checksTableSchema() *memdb.TableSchema {
Name: indexNodeService, Name: indexNodeService,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[NodeServiceQuery, *structs.HealthCheck]{
readIndex: indexWithPeerName(indexFromNodeServiceQuery), readIndex: indexWithPeerName(indexFromNodeServiceQuery),
writeIndex: indexWithPeerName(indexNodeServiceFromHealthCheck), writeIndex: indexWithPeerName(indexNodeServiceFromHealthCheck),
}, },
@ -407,28 +365,18 @@ func checksTableSchema() *memdb.TableSchema {
} }
} }
func indexFromNodeCheckQuery(raw interface{}) ([]byte, error) { func indexFromNodeCheckQuery(q NodeCheckQuery) ([]byte, error) {
hc, ok := raw.(NodeCheckQuery) if q.Node == "" || q.CheckID == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for NodeCheckQuery index", raw)
}
if hc.Node == "" || hc.CheckID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(hc.Node)) b.String(strings.ToLower(q.Node))
b.String(strings.ToLower(hc.CheckID)) b.String(strings.ToLower(q.CheckID))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromHealthCheck(raw interface{}) ([]byte, error) { func indexFromHealthCheck(hc *structs.HealthCheck) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.Node == "" || hc.CheckID == "" { if hc.Node == "" || hc.CheckID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -439,12 +387,7 @@ func indexFromHealthCheck(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexNodeServiceFromHealthCheck(raw interface{}) ([]byte, error) { func indexNodeServiceFromHealthCheck(hc *structs.HealthCheck) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.Node == "" { if hc.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -455,12 +398,7 @@ func indexNodeServiceFromHealthCheck(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexStatusFromHealthCheck(raw interface{}) ([]byte, error) { func indexStatusFromHealthCheck(hc *structs.HealthCheck) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.Status == "" { if hc.Status == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -470,12 +408,7 @@ func indexStatusFromHealthCheck(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexServiceNameFromHealthCheck(raw interface{}) ([]byte, error) { func indexServiceNameFromHealthCheck(hc *structs.HealthCheck) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.ServiceName == "" { if hc.ServiceName == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -672,7 +605,7 @@ func (q NodeCheckQuery) PartitionOrDefault() string {
// ServiceVirtualIP is used to store a virtual IP associated with a service. // ServiceVirtualIP is used to store a virtual IP associated with a service.
// It is also used to store assigned virtual IPs when a snapshot is created. // It is also used to store assigned virtual IPs when a snapshot is created.
type ServiceVirtualIP struct { type ServiceVirtualIP struct {
Service structs.ServiceName Service structs.PeeredServiceName
IP net.IP IP net.IP
} }
@ -698,14 +631,22 @@ func serviceVirtualIPTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: &ServiceNameIndex{ Indexer: indexerSingle[structs.PeeredServiceName, ServiceVirtualIP]{
Field: "Service", readIndex: indexFromPeeredServiceName,
writeIndex: indexFromServiceVirtualIP,
}, },
}, },
}, },
} }
} }
func indexFromServiceVirtualIP(vip ServiceVirtualIP) ([]byte, error) {
if vip.Service.ServiceName.Name == "" {
return nil, errMissingValueForIndex
}
return indexFromPeeredServiceName(vip.Service)
}
func freeVirtualIPTableSchema() *memdb.TableSchema { func freeVirtualIPTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{ return &memdb.TableSchema{
Name: tableFreeVirtualIPs, Name: tableFreeVirtualIPs,
@ -761,7 +702,7 @@ func kindServiceNameTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[any, any]{
readIndex: indexFromKindServiceName, readIndex: indexFromKindServiceName,
writeIndex: indexFromKindServiceName, writeIndex: indexFromKindServiceName,
}, },
@ -770,7 +711,7 @@ func kindServiceNameTableSchema() *memdb.TableSchema {
Name: indexKind, Name: indexKind,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[enterpriseIndexable, enterpriseIndexable]{
readIndex: indexFromKindServiceNameKindOnly, readIndex: indexFromKindServiceNameKindOnly,
writeIndex: indexFromKindServiceNameKindOnly, writeIndex: indexFromKindServiceNameKindOnly,
}, },
@ -798,7 +739,7 @@ func (q KindServiceNameQuery) PartitionOrDefault() string {
return q.EnterpriseMeta.PartitionOrDefault() return q.EnterpriseMeta.PartitionOrDefault()
} }
func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) { func indexFromKindServiceNameKindOnly(raw enterpriseIndexable) ([]byte, error) {
switch x := raw.(type) { switch x := raw.(type) {
case *KindServiceName: case *KindServiceName:
var b indexBuilder var b indexBuilder

View File

@ -11,7 +11,7 @@ import (
"time" "time"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -555,7 +555,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
) )
run := func(t *testing.T, peerName string) { run := func(t *testing.T, peerName string) {
verifyNode := func(t *testing.T, s *Store, nodeLookup string) { verifyNode := func(t *testing.T, s *Store, nodeLookup string, expectIdx uint64) {
idx, out, err := s.GetNode(nodeLookup, nil, peerName) idx, out, err := s.GetNode(nodeLookup, nil, peerName)
require.NoError(t, err) require.NoError(t, err)
byID := false byID := false
@ -566,7 +566,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
} }
require.NotNil(t, out) require.NotNil(t, out)
require.Equal(t, uint64(1), idx) require.Equal(t, expectIdx, idx)
require.Equal(t, "1.2.3.4", out.Address) require.Equal(t, "1.2.3.4", out.Address)
if byID { if byID {
@ -661,8 +661,8 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
require.NoError(t, restore.Commit()) require.NoError(t, restore.Commit())
// Retrieve the node and verify its contents. // Retrieve the node and verify its contents.
verifyNode(t, s, nodeID) verifyNode(t, s, nodeID, 1)
verifyNode(t, s, nodeName) verifyNode(t, s, nodeName, 1)
}) })
// Add in a service definition. // Add in a service definition.
@ -686,8 +686,8 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
require.NoError(t, restore.Commit()) require.NoError(t, restore.Commit())
// Verify that the service got registered. // Verify that the service got registered.
verifyNode(t, s, nodeID) verifyNode(t, s, nodeID, 2)
verifyNode(t, s, nodeName) verifyNode(t, s, nodeName, 2)
verifyService(t, s, nodeID) verifyService(t, s, nodeID)
verifyService(t, s, nodeName) verifyService(t, s, nodeName)
}) })
@ -726,8 +726,8 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
require.NoError(t, restore.Commit()) require.NoError(t, restore.Commit())
// Verify that the check got registered. // Verify that the check got registered.
verifyNode(t, s, nodeID) verifyNode(t, s, nodeID, 2)
verifyNode(t, s, nodeName) verifyNode(t, s, nodeName, 2)
verifyService(t, s, nodeID) verifyService(t, s, nodeID)
verifyService(t, s, nodeName) verifyService(t, s, nodeName)
verifyCheck(t, s) verifyCheck(t, s)
@ -776,8 +776,8 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
require.NoError(t, restore.Commit()) require.NoError(t, restore.Commit())
// Verify that the additional check got registered. // Verify that the additional check got registered.
verifyNode(t, s, nodeID) verifyNode(t, s, nodeID, 2)
verifyNode(t, s, nodeName) verifyNode(t, s, nodeName, 2)
verifyService(t, s, nodeID) verifyService(t, s, nodeID)
verifyService(t, s, nodeName) verifyService(t, s, nodeName)
verifyChecks(t, s) verifyChecks(t, s)
@ -976,7 +976,7 @@ func TestNodeRenamingNodes(t *testing.T) {
Address: "1.1.1.2", Address: "1.1.1.2",
} }
if err := s.EnsureNode(10, in2Modify); err != nil { if err := s.EnsureNode(10, in2Modify); err != nil {
t.Fatalf("Renaming node2 into node1 should fail") t.Fatalf("Renaming node2 into node1 should not fail: " + err.Error())
} }
// Retrieve the node again // Retrieve the node again
@ -1550,20 +1550,16 @@ func TestStateStore_DeleteNode(t *testing.T) {
} }
// Indexes were updated. // Indexes were updated.
for _, tbl := range []string{tableNodes, tableServices, tableChecks} { assert.Equal(t, uint64(3), catalogChecksMaxIndex(tx, nil, ""))
if idx := s.maxIndex(tbl); idx != 3 { assert.Equal(t, uint64(3), catalogServicesMaxIndex(tx, nil, ""))
t.Fatalf("bad index: %d (%s)", idx, tbl) assert.Equal(t, uint64(3), catalogNodesMaxIndex(tx, nil, ""))
}
}
// Deleting a nonexistent node should be idempotent and not return // Deleting a nonexistent node should be idempotent and not return
// an error // an error
if err := s.DeleteNode(4, "node1", nil, ""); err != nil { if err := s.DeleteNode(4, "node1", nil, ""); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if idx := s.maxIndex(tableNodes); idx != 3 { assert.Equal(t, uint64(3), catalogNodesMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
} }
func TestStateStore_Node_Snapshot(t *testing.T) { func TestStateStore_Node_Snapshot(t *testing.T) {
@ -1690,7 +1686,8 @@ func TestStateStore_EnsureService(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if idx != 30 { // expect node1's max idx
if idx != 20 {
t.Fatalf("bad index: %d", idx) t.Fatalf("bad index: %d", idx)
} }
@ -1713,9 +1710,7 @@ func TestStateStore_EnsureService(t *testing.T) {
} }
// Index tables were updated. // Index tables were updated.
if idx := s.maxIndex(tableServices); idx != 30 { assert.Equal(t, uint64(30), catalogServicesMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
// Update a service registration. // Update a service registration.
ns1.Address = "1.1.1.2" ns1.Address = "1.1.1.2"
@ -1744,9 +1739,7 @@ func TestStateStore_EnsureService(t *testing.T) {
} }
// Index tables were updated. // Index tables were updated.
if idx := s.maxIndex(tableServices); idx != 40 { assert.Equal(t, uint64(40), catalogServicesMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
} }
func TestStateStore_EnsureService_connectProxy(t *testing.T) { func TestStateStore_EnsureService_connectProxy(t *testing.T) {
@ -1806,7 +1799,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
require.NoError(t, s.EnsureService(10, "node1", ns1)) require.NoError(t, s.EnsureService(10, "node1", ns1))
// Make sure there's a virtual IP for the foo service. // Make sure there's a virtual IP for the foo service.
vip, err := s.VirtualIPForService(structs.ServiceName{Name: "foo"}) vip, err := s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "foo"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.1", vip) assert.Equal(t, "240.0.0.1", vip)
@ -1837,7 +1830,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
require.NoError(t, s.EnsureService(11, "node1", ns2)) require.NoError(t, s.EnsureService(11, "node1", ns2))
// Make sure the virtual IP has been incremented for the redis service. // Make sure the virtual IP has been incremented for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "redis"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.2", vip) assert.Equal(t, "240.0.0.2", vip)
@ -1853,7 +1846,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
// Delete the first service and make sure it no longer has a virtual IP assigned. // Delete the first service and make sure it no longer has a virtual IP assigned.
require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta, "")) require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta, ""))
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "connect-proxy"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "connect-proxy"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "", vip) assert.Equal(t, "", vip)
@ -1874,7 +1867,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
require.NoError(t, s.EnsureService(13, "node1", ns3)) require.NoError(t, s.EnsureService(13, "node1", ns3))
// Make sure the virtual IP is unchanged for the redis service. // Make sure the virtual IP is unchanged for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "redis"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.2", vip) assert.Equal(t, "240.0.0.2", vip)
@ -1902,7 +1895,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
require.NoError(t, s.EnsureService(14, "node1", ns4)) require.NoError(t, s.EnsureService(14, "node1", ns4))
// Make sure the virtual IP has allocated from the previously freed service. // Make sure the virtual IP has allocated from the previously freed service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "web"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "web"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.1", vip) assert.Equal(t, "240.0.0.1", vip)
@ -1912,6 +1905,41 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP] taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(t, vip, taggedAddress.Address) assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns4.Port, taggedAddress.Port) assert.Equal(t, ns4.Port, taggedAddress.Port)
// Register a node1 in another peer (technically this node would be imported
// and stored through the peering stream handlers).
testRegisterNodeOpts(t, s, 15, "node1", func(node *structs.Node) error {
node.PeerName = "billing"
return nil
})
// Register an identical service but imported from a peer
ns5 := &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "web-proxy",
Service: "web-proxy",
Address: "4.4.4.4",
Port: 4444,
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "web"},
EnterpriseMeta: *entMeta,
PeerName: "billing",
}
require.NoError(t, s.EnsureService(15, "node1", ns5))
// Make sure the virtual IP is different from the identically named local service.
vip, err = s.VirtualIPForService(structs.PeeredServiceName{Peer: "billing", ServiceName: structs.ServiceName{Name: "web"}})
require.NoError(t, err)
assert.Equal(t, "240.0.0.3", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil, "billing")
require.NoError(t, err)
taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns5.Port, taggedAddress.Port)
} }
func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
@ -1938,7 +1966,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
require.NoError(t, s.EnsureService(10, "node1", ns1)) require.NoError(t, s.EnsureService(10, "node1", ns1))
// Make sure there's a virtual IP for the foo service. // Make sure there's a virtual IP for the foo service.
vip, err := s.VirtualIPForService(structs.ServiceName{Name: "foo"}) vip, err := s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "foo"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.1", vip) assert.Equal(t, "240.0.0.1", vip)
@ -1968,7 +1996,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
require.NoError(t, s.EnsureService(11, "node1", ns2)) require.NoError(t, s.EnsureService(11, "node1", ns2))
// Make sure the virtual IP has been incremented for the redis service. // Make sure the virtual IP has been incremented for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "redis"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.2", vip) assert.Equal(t, "240.0.0.2", vip)
@ -1983,7 +2011,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
// Delete the last service and make sure it no longer has a virtual IP assigned. // Delete the last service and make sure it no longer has a virtual IP assigned.
require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta, "")) require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta, ""))
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "redis"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "", vip) assert.Equal(t, "", vip)
@ -2003,7 +2031,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
} }
require.NoError(t, s.EnsureService(13, "node1", ns3)) require.NoError(t, s.EnsureService(13, "node1", ns3))
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "backend"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "backend"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.2", vip) assert.Equal(t, "240.0.0.2", vip)
@ -2033,7 +2061,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
require.NoError(t, s.EnsureService(14, "node1", ns4)) require.NoError(t, s.EnsureService(14, "node1", ns4))
// Make sure the virtual IP has been incremented for the frontend service. // Make sure the virtual IP has been incremented for the frontend service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "frontend"}) vip, err = s.VirtualIPForService(structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "frontend"}})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "240.0.0.3", vip) assert.Equal(t, "240.0.0.3", vip)
@ -2571,21 +2599,15 @@ func TestStateStore_DeleteService(t *testing.T) {
} }
// Index tables were updated. // Index tables were updated.
if idx := s.maxIndex(tableServices); idx != 4 { assert.Equal(t, uint64(4), catalogChecksMaxIndex(tx, nil, ""))
t.Fatalf("bad index: %d", idx) assert.Equal(t, uint64(4), catalogServicesMaxIndex(tx, nil, ""))
}
if idx := s.maxIndex(tableChecks); idx != 4 {
t.Fatalf("bad index: %d", idx)
}
// Deleting a nonexistent service should be idempotent and not return an // Deleting a nonexistent service should be idempotent and not return an
// error, nor fire a watch. // error, nor fire a watch.
if err := s.DeleteService(5, "node1", "service1", nil, ""); err != nil { if err := s.DeleteService(5, "node1", "service1", nil, ""); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if idx := s.maxIndex(tableServices); idx != 4 { assert.Equal(t, uint64(4), catalogServicesMaxIndex(tx, nil, ""))
t.Fatalf("bad index: %d", idx)
}
if watchFired(ws) { if watchFired(ws) {
t.Fatalf("bad") t.Fatalf("bad")
} }
@ -2906,9 +2928,7 @@ func TestStateStore_EnsureCheck(t *testing.T) {
testCheckOutput(t, 5, 5, "bbbmodified") testCheckOutput(t, 5, 5, "bbbmodified")
// Index tables were updated // Index tables were updated
if idx := s.maxIndex(tableChecks); idx != 5 { assert.Equal(t, uint64(5), catalogChecksMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
} }
func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) { func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) {
@ -3387,9 +3407,7 @@ func TestStateStore_DeleteCheck(t *testing.T) {
if idx, check, err := s.NodeCheck("node1", "check1", nil, ""); idx != 3 || err != nil || check != nil { if idx, check, err := s.NodeCheck("node1", "check1", nil, ""); idx != 3 || err != nil || check != nil {
t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err) t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err)
} }
if idx := s.maxIndex(tableChecks); idx != 3 { assert.Equal(t, uint64(3), catalogChecksMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index for checks: %d", idx)
}
if !watchFired(ws) { if !watchFired(ws) {
t.Fatalf("bad") t.Fatalf("bad")
} }
@ -3407,18 +3425,14 @@ func TestStateStore_DeleteCheck(t *testing.T) {
} }
// Index tables were updated. // Index tables were updated.
if idx := s.maxIndex(tableChecks); idx != 3 { assert.Equal(t, uint64(3), catalogChecksMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
// Deleting a nonexistent check should be idempotent and not return an // Deleting a nonexistent check should be idempotent and not return an
// error. // error.
if err := s.DeleteCheck(4, "node1", "check1", nil, ""); err != nil { if err := s.DeleteCheck(4, "node1", "check1", nil, ""); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if idx := s.maxIndex(tableChecks); idx != 3 { assert.Equal(t, uint64(3), catalogChecksMaxIndex(s.db.ReadTxn(), nil, ""))
t.Fatalf("bad index: %d", idx)
}
if watchFired(ws) { if watchFired(ws) {
t.Fatalf("bad") t.Fatalf("bad")
} }

View File

@ -1,7 +1,6 @@
package state package state
import ( import (
"fmt"
"strings" "strings"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
@ -27,7 +26,7 @@ func configTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[any, structs.ConfigEntry, any]{
readIndex: indexFromConfigEntryKindName, readIndex: indexFromConfigEntryKindName,
writeIndex: indexFromConfigEntry, writeIndex: indexFromConfigEntry,
prefixIndex: indexFromConfigEntryKindName, prefixIndex: indexFromConfigEntryKindName,
@ -55,12 +54,30 @@ func configTableSchema() *memdb.TableSchema {
} }
} }
func indexFromConfigEntry(raw interface{}) ([]byte, error) { // configEntryIndexable is required because while structs.ConfigEntry
c, ok := raw.(structs.ConfigEntry) // has a GetEnterpriseMeta method, it does not directly expose the
if !ok { // required NamespaceOrDefault and PartitionOrDefault methods of
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw) // enterpriseIndexable.
} //
// Config entries that embed *acl.EnterpriseMeta will automatically
// implement this interface.
type configEntryIndexable interface {
structs.ConfigEntry
enterpriseIndexable
}
var _ configEntryIndexable = (*structs.ExportedServicesConfigEntry)(nil)
var _ configEntryIndexable = (*structs.IngressGatewayConfigEntry)(nil)
var _ configEntryIndexable = (*structs.MeshConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ProxyConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ServiceConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ServiceIntentionsConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ServiceResolverConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ServiceRouterConfigEntry)(nil)
var _ configEntryIndexable = (*structs.ServiceSplitterConfigEntry)(nil)
var _ configEntryIndexable = (*structs.TerminatingGatewayConfigEntry)(nil)
func indexFromConfigEntry(c structs.ConfigEntry) ([]byte, error) {
if c.GetName() == "" || c.GetKind() == "" { if c.GetName() == "" || c.GetKind() == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -73,12 +90,7 @@ func indexFromConfigEntry(raw interface{}) ([]byte, error) {
// indexKindFromConfigEntry indexes kinds without a namespace for any config // indexKindFromConfigEntry indexes kinds without a namespace for any config
// entries that span all namespaces. // entries that span all namespaces.
func indexKindFromConfigEntry(raw interface{}) ([]byte, error) { func indexKindFromConfigEntry(c configEntryIndexable) ([]byte, error) {
c, ok := raw.(structs.ConfigEntry)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
if c.GetKind() == "" { if c.GetKind() == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }

View File

@ -13,12 +13,7 @@ import (
const tableCoordinates = "coordinates" const tableCoordinates = "coordinates"
func indexFromCoordinate(raw interface{}) ([]byte, error) { func indexFromCoordinate(c *structs.Coordinate) ([]byte, error) {
c, ok := raw.(*structs.Coordinate)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Coordinate index", raw)
}
if c.Node == "" { if c.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -29,12 +24,7 @@ func indexFromCoordinate(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexNodeFromCoordinate(raw interface{}) ([]byte, error) { func indexNodeFromCoordinate(c *structs.Coordinate) ([]byte, error) {
c, ok := raw.(*structs.Coordinate)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Coordinate index", raw)
}
if c.Node == "" { if c.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -44,12 +34,7 @@ func indexNodeFromCoordinate(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromCoordinateQuery(raw interface{}) ([]byte, error) { func indexFromCoordinateQuery(q CoordinateQuery) ([]byte, error) {
q, ok := raw.(CoordinateQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for CoordinateQuery index", raw)
}
if q.Node == "" { if q.Node == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -80,7 +65,7 @@ func coordinatesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[CoordinateQuery, *structs.Coordinate, any]{
readIndex: indexFromCoordinateQuery, readIndex: indexFromCoordinateQuery,
writeIndex: indexFromCoordinate, writeIndex: indexFromCoordinate,
prefixIndex: prefixIndexFromQueryNoNamespace, prefixIndex: prefixIndexFromQueryNoNamespace,
@ -90,7 +75,7 @@ func coordinatesTableSchema() *memdb.TableSchema {
Name: indexNode, Name: indexNode,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[Query, *structs.Coordinate]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexNodeFromCoordinate, writeIndex: indexNodeFromCoordinate,
}, },

View File

@ -15,32 +15,42 @@ import (
// indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may // indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index // be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations. // value for memdb.Txn operations.
type indexerSingle struct { //
// R represents the type used to generate the read index.
// W represents the type used to generate the write index.
type indexerSingle[R, W any] struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations // readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data. // that read data.
readIndex readIndex[R]
// writeIndex is used by memdb for Txn.Insert, Txn.Delete, for operations // writeIndex is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index. // that write data to the index.
writeIndex writeIndex[W]
} }
// indexerMulti implements both memdb.Indexer and memdb.MultiIndexer. It may // indexerMulti implements both memdb.Indexer and memdb.MultiIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index // be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations. // value for memdb.Txn operations.
type indexerMulti struct { //
// R represents the type used to generate the read index.
// W represents the type used to generate the write index.
type indexerMulti[R, W any] struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations // readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data. // that read data.
readIndex readIndex[R]
// writeIndexMulti is used by memdb for Txn.Insert, Txn.Delete, for operations // writeIndexMulti is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index. // that write data to the index.
writeIndexMulti writeIndexMulti[W]
} }
// indexerSingleWithPrefix is a indexerSingle which also supports prefix queries. // indexerSingleWithPrefix is a indexerSingle which also supports prefix queries.
type indexerSingleWithPrefix struct { //
readIndex // R represents the type used to generate the read index.
writeIndex // W represents the type used to generate the write index.
prefixIndex // P represents the type used to generate the prefix index.
type indexerSingleWithPrefix[R, W, P any] struct {
readIndex[R]
writeIndex[W]
prefixIndex[P]
} }
// readIndex implements memdb.Indexer. It exists so that a function can be used // readIndex implements memdb.Indexer. It exists so that a function can be used
@ -48,13 +58,18 @@ type indexerSingleWithPrefix struct {
// //
// Unlike memdb.Indexer, a readIndex function accepts only a single argument. To // Unlike memdb.Indexer, a readIndex function accepts only a single argument. To
// generate an index from multiple values, use a struct type with multiple fields. // generate an index from multiple values, use a struct type with multiple fields.
type readIndex func(arg interface{}) ([]byte, error) type readIndex[R any] func(arg R) ([]byte, error)
func (f readIndex) FromArgs(args ...interface{}) ([]byte, error) { func (f readIndex[R]) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 { if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg") return nil, fmt.Errorf("index supports only a single arg")
} }
return f(args[0]) arg, ok := args[0].(R)
if !ok {
var typ R
return nil, fmt.Errorf("unexpected type %T, does not implement %T", args[0], typ)
}
return f(arg)
} }
var errMissingValueForIndex = fmt.Errorf("object is missing a value for this index") var errMissingValueForIndex = fmt.Errorf("object is missing a value for this index")
@ -65,10 +80,15 @@ var errMissingValueForIndex = fmt.Errorf("object is missing a value for this ind
// Instead of a bool return value, writeIndex expects errMissingValueForIndex to // Instead of a bool return value, writeIndex expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate // indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.SingleIndexer interface. // this error into a false value to satisfy the memdb.SingleIndexer interface.
type writeIndex func(raw interface{}) ([]byte, error) type writeIndex[W any] func(raw W) ([]byte, error)
func (f writeIndex) FromObject(raw interface{}) (bool, []byte, error) { func (f writeIndex[W]) FromObject(raw interface{}) (bool, []byte, error) {
v, err := f(raw) obj, ok := raw.(W)
if !ok {
var typ W
return false, nil, fmt.Errorf("unexpected type %T, does not implement %T", raw, typ)
}
v, err := f(obj)
if errors.Is(err, errMissingValueForIndex) { if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil return false, nil, nil
} }
@ -81,10 +101,15 @@ func (f writeIndex) FromObject(raw interface{}) (bool, []byte, error) {
// Instead of a bool return value, writeIndexMulti expects errMissingValueForIndex to // Instead of a bool return value, writeIndexMulti expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate // indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.MultiIndexer interface. // this error into a false value to satisfy the memdb.MultiIndexer interface.
type writeIndexMulti func(raw interface{}) ([][]byte, error) type writeIndexMulti[W any] func(raw W) ([][]byte, error)
func (f writeIndexMulti) FromObject(raw interface{}) (bool, [][]byte, error) { func (f writeIndexMulti[W]) FromObject(raw interface{}) (bool, [][]byte, error) {
v, err := f(raw) obj, ok := raw.(W)
if !ok {
var typ W
return false, nil, fmt.Errorf("unexpected type %T, does not implement %T", raw, typ)
}
v, err := f(obj)
if errors.Is(err, errMissingValueForIndex) { if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil return false, nil, nil
} }
@ -93,13 +118,18 @@ func (f writeIndexMulti) FromObject(raw interface{}) (bool, [][]byte, error) {
// prefixIndex implements memdb.PrefixIndexer. It exists so that a function // prefixIndex implements memdb.PrefixIndexer. It exists so that a function
// can be used to provide this interface. // can be used to provide this interface.
type prefixIndex func(args interface{}) ([]byte, error) type prefixIndex[P any] func(args P) ([]byte, error)
func (f prefixIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { func (f prefixIndex[P]) PrefixFromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 { if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg") return nil, fmt.Errorf("index supports only a single arg")
} }
return f(args[0]) arg, ok := args[0].(P)
if !ok {
var typ P
return nil, fmt.Errorf("unexpected type %T, does not implement %T", args[0], typ)
}
return f(arg)
} }
const null = "\x00" const null = "\x00"
@ -159,12 +189,7 @@ var _ singleValueID = (*Query)(nil)
var _ singleValueID = (*structs.Session)(nil) var _ singleValueID = (*structs.Session)(nil)
// indexFromIDValue creates an index key from any struct that implements singleValueID // indexFromIDValue creates an index key from any struct that implements singleValueID
func indexFromIDValueLowerCase(raw interface{}) ([]byte, error) { func indexFromIDValueLowerCase(e singleValueID) ([]byte, error) {
e, ok := raw.(singleValueID)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement singleValueID", raw)
}
v := strings.ToLower(e.IDValue()) v := strings.ToLower(e.IDValue())
if v == "" { if v == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
@ -176,11 +201,7 @@ func indexFromIDValueLowerCase(raw interface{}) ([]byte, error) {
} }
// indexFromIDValue creates an index key from any struct that implements singleValueID // indexFromIDValue creates an index key from any struct that implements singleValueID
func indexFromMultiValueID(raw interface{}) ([]byte, error) { func indexFromMultiValueID(e multiValueID) ([]byte, error) {
e, ok := raw.(multiValueID)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement multiValueID", raw)
}
var b indexBuilder var b indexBuilder
for _, v := range e.IDValue() { for _, v := range e.IDValue() {
if v == "" { if v == "" {

View File

@ -41,12 +41,7 @@ func kvsTableSchema() *memdb.TableSchema {
} }
// indexFromIDValue creates an index key from any struct that implements singleValueID // indexFromIDValue creates an index key from any struct that implements singleValueID
func indexFromIDValue(raw interface{}) ([]byte, error) { func indexFromIDValue(e singleValueID) ([]byte, error) {
e, ok := raw.(singleValueID)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement singleValueID", raw)
}
v := e.IDValue() v := e.IDValue()
if v == "" { if v == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex

View File

@ -13,11 +13,11 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
func kvsIndexer() indexerSingleWithPrefix { func kvsIndexer() indexerSingleWithPrefix[singleValueID, singleValueID, any] {
return indexerSingleWithPrefix{ return indexerSingleWithPrefix[singleValueID, singleValueID, any]{
readIndex: readIndex(indexFromIDValue), readIndex: indexFromIDValue,
writeIndex: writeIndex(indexFromIDValue), writeIndex: indexFromIDValue,
prefixIndex: prefixIndex(prefixIndexForIDValue), prefixIndex: prefixIndexForIDValue,
} }
} }

View File

@ -1,12 +1,12 @@
package state package state
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -27,16 +27,16 @@ func peeringTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[string, *pbpeering.Peering]{
readIndex: readIndex(indexFromUUIDString), readIndex: indexFromUUIDString,
writeIndex: writeIndex(indexIDFromPeering), writeIndex: indexIDFromPeering,
}, },
}, },
indexName: { indexName: {
Name: indexName, Name: indexName,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *pbpeering.Peering, any]{
readIndex: indexPeeringFromQuery, readIndex: indexPeeringFromQuery,
writeIndex: indexFromPeering, writeIndex: indexFromPeering,
prefixIndex: prefixIndexFromQueryNoNamespace, prefixIndex: prefixIndexFromQueryNoNamespace,
@ -46,7 +46,7 @@ func peeringTableSchema() *memdb.TableSchema {
Name: indexDeleted, Name: indexDeleted,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: indexerSingle{ Indexer: indexerSingle[BoolQuery, *pbpeering.Peering]{
readIndex: indexDeletedFromBoolQuery, readIndex: indexDeletedFromBoolQuery,
writeIndex: indexDeletedFromPeering, writeIndex: indexDeletedFromPeering,
}, },
@ -63,7 +63,7 @@ func peeringTrustBundlesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingleWithPrefix{ Indexer: indexerSingleWithPrefix[Query, *pbpeering.PeeringTrustBundle, any]{
readIndex: indexPeeringFromQuery, // same as peering table since we'll use the query.Value readIndex: indexPeeringFromQuery, // same as peering table since we'll use the query.Value
writeIndex: indexFromPeeringTrustBundle, writeIndex: indexFromPeeringTrustBundle,
prefixIndex: prefixIndexFromQueryNoNamespace, prefixIndex: prefixIndexFromQueryNoNamespace,
@ -73,12 +73,7 @@ func peeringTrustBundlesTableSchema() *memdb.TableSchema {
} }
} }
func indexIDFromPeering(raw interface{}) ([]byte, error) { func indexIDFromPeering(p *pbpeering.Peering) ([]byte, error) {
p, ok := raw.(*pbpeering.Peering)
if !ok {
return nil, fmt.Errorf("unexpected type %T for pbpeering.Peering index", raw)
}
if p.ID == "" { if p.ID == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -92,12 +87,7 @@ func indexIDFromPeering(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexDeletedFromPeering(raw interface{}) ([]byte, error) { func indexDeletedFromPeering(p *pbpeering.Peering) ([]byte, error) {
p, ok := raw.(*pbpeering.Peering)
if !ok {
return nil, fmt.Errorf("unexpected type %T for *pbpeering.Peering index", raw)
}
var b indexBuilder var b indexBuilder
b.Bool(!p.IsActive()) b.Bool(!p.IsActive())
return b.Bytes(), nil return b.Bytes(), nil
@ -191,50 +181,47 @@ func (s *Store) peeringListTxn(ws memdb.WatchSet, tx ReadTxn, entMeta acl.Enterp
return idx, result, nil return idx, result, nil
} }
func generatePeeringUUID(tx ReadTxn) (string, error) {
for {
uuid, err := uuid.GenerateUUID()
if err != nil {
return "", fmt.Errorf("failed to generate UUID: %w", err)
}
existing, err := peeringReadByIDTxn(tx, nil, uuid)
if err != nil {
return "", fmt.Errorf("failed to read peering: %w", err)
}
if existing == nil {
return uuid, nil
}
}
}
func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error { func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error {
tx := s.db.WriteTxn(idx) tx := s.db.WriteTxn(idx)
defer tx.Abort() defer tx.Abort()
q := Query{ // Check that the ID and Name are set.
Value: p.Name, if p.ID == "" {
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(p.Partition), return errors.New("Missing Peering ID")
} }
existingRaw, err := tx.First(tablePeering, indexName, q) if p.Name == "" {
if err != nil { return errors.New("Missing Peering Name")
return fmt.Errorf("failed peering lookup: %w", err)
} }
existing, ok := existingRaw.(*pbpeering.Peering) // ensure the name is unique (cannot conflict with another peering with a different ID)
if existingRaw != nil && !ok { _, existing, err := peeringReadTxn(tx, nil, Query{
return fmt.Errorf("invalid type %T", existingRaw) Value: p.Name,
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(p.Partition),
})
if err != nil {
return err
} }
if existing != nil { if existing != nil {
if p.ID != existing.ID {
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", p.Name, existing.ID)
}
// Prevent modifications to Peering marked for deletion // Prevent modifications to Peering marked for deletion
if !existing.IsActive() { if !existing.IsActive() {
return fmt.Errorf("cannot write to peering that is marked for deletion") return fmt.Errorf("cannot write to peering that is marked for deletion")
} }
p.CreateIndex = existing.CreateIndex p.CreateIndex = existing.CreateIndex
p.ID = existing.ID p.ModifyIndex = idx
} else { } else {
idMatch, err := peeringReadByIDTxn(tx, nil, p.ID)
if err != nil {
return err
}
if idMatch != nil {
return fmt.Errorf("A peering already exists with the ID %q and a different name %q", p.Name, existing.ID)
}
if !p.IsActive() { if !p.IsActive() {
return fmt.Errorf("cannot create a new peering marked for deletion") return fmt.Errorf("cannot create a new peering marked for deletion")
} }
@ -242,13 +229,8 @@ func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error {
// TODO(peering): consider keeping PeeringState enum elsewhere? // TODO(peering): consider keeping PeeringState enum elsewhere?
p.State = pbpeering.PeeringState_INITIAL p.State = pbpeering.PeeringState_INITIAL
p.CreateIndex = idx p.CreateIndex = idx
p.ModifyIndex = idx
p.ID, err = generatePeeringUUID(tx)
if err != nil {
return fmt.Errorf("failed to generate peering id: %w", err)
}
} }
p.ModifyIndex = idx
if err := tx.Insert(tablePeering, p); err != nil { if err := tx.Insert(tablePeering, p); err != nil {
return fmt.Errorf("failed inserting peering: %w", err) return fmt.Errorf("failed inserting peering: %w", err)

View File

@ -10,23 +10,13 @@ import (
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
) )
func indexPeeringFromQuery(raw interface{}) ([]byte, error) { func indexPeeringFromQuery(q Query) ([]byte, error) {
q, ok := raw.(Query)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", raw)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(q.Value)) b.String(strings.ToLower(q.Value))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromPeering(raw interface{}) ([]byte, error) { func indexFromPeering(p *pbpeering.Peering) ([]byte, error) {
p, ok := raw.(*pbpeering.Peering)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Peering index", raw)
}
if p.Name == "" { if p.Name == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
@ -36,12 +26,7 @@ func indexFromPeering(raw interface{}) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromPeeringTrustBundle(raw interface{}) ([]byte, error) { func indexFromPeeringTrustBundle(ptb *pbpeering.PeeringTrustBundle) ([]byte, error) {
ptb, ok := raw.(*pbpeering.PeeringTrustBundle)
if !ok {
return nil, fmt.Errorf("unexpected type %T for pbpeering.PeeringTrustBundle index", raw)
}
if ptb.PeerName == "" { if ptb.PeerName == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }

View File

@ -1,13 +1,10 @@
package state package state
import ( import (
"fmt"
"math/rand"
"testing" "testing"
"time" "time"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
@ -17,6 +14,12 @@ import (
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
) )
const (
testFooPeerID = "9e650110-ac74-4c5a-a6a8-9348b2bed4e9"
testBarPeerID = "5ebcff30-5509-4858-8142-a8e580f1863f"
testBazPeerID = "432feb2f-5476-4ae2-b33c-e43640ca0e86"
)
func insertTestPeerings(t *testing.T, s *Store) { func insertTestPeerings(t *testing.T, s *Store) {
t.Helper() t.Helper()
@ -26,7 +29,7 @@ func insertTestPeerings(t *testing.T, s *Store) {
err := tx.Insert(tablePeering, &pbpeering.Peering{ err := tx.Insert(tablePeering, &pbpeering.Peering{
Name: "foo", Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", ID: testFooPeerID,
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
CreateIndex: 1, CreateIndex: 1,
ModifyIndex: 1, ModifyIndex: 1,
@ -36,7 +39,7 @@ func insertTestPeerings(t *testing.T, s *Store) {
err = tx.Insert(tablePeering, &pbpeering.Peering{ err = tx.Insert(tablePeering, &pbpeering.Peering{
Name: "bar", Name: "bar",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "5ebcff30-5509-4858-8142-a8e580f1863f", ID: testBarPeerID,
State: pbpeering.PeeringState_FAILING, State: pbpeering.PeeringState_FAILING,
CreateIndex: 2, CreateIndex: 2,
ModifyIndex: 2, ModifyIndex: 2,
@ -97,16 +100,16 @@ func TestStateStore_PeeringReadByID(t *testing.T) {
run := func(t *testing.T, tc testcase) { run := func(t *testing.T, tc testcase) {
_, peering, err := s.PeeringReadByID(nil, tc.id) _, peering, err := s.PeeringReadByID(nil, tc.id)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expect, peering) prototest.AssertDeepEqual(t, tc.expect, peering)
} }
tcs := []testcase{ tcs := []testcase{
{ {
name: "get foo", name: "get foo",
id: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", id: testFooPeerID,
expect: &pbpeering.Peering{ expect: &pbpeering.Peering{
Name: "foo", Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", ID: testFooPeerID,
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
CreateIndex: 1, CreateIndex: 1,
ModifyIndex: 1, ModifyIndex: 1,
@ -114,11 +117,11 @@ func TestStateStore_PeeringReadByID(t *testing.T) {
}, },
{ {
name: "get bar", name: "get bar",
id: "5ebcff30-5509-4858-8142-a8e580f1863f", id: testBarPeerID,
expect: &pbpeering.Peering{ expect: &pbpeering.Peering{
Name: "bar", Name: "bar",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "5ebcff30-5509-4858-8142-a8e580f1863f", ID: testBarPeerID,
State: pbpeering.PeeringState_FAILING, State: pbpeering.PeeringState_FAILING,
CreateIndex: 2, CreateIndex: 2,
ModifyIndex: 2, ModifyIndex: 2,
@ -149,7 +152,7 @@ func TestStateStore_PeeringRead(t *testing.T) {
run := func(t *testing.T, tc testcase) { run := func(t *testing.T, tc testcase) {
_, peering, err := s.PeeringRead(nil, tc.query) _, peering, err := s.PeeringRead(nil, tc.query)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expect, peering) prototest.AssertDeepEqual(t, tc.expect, peering)
} }
tcs := []testcase{ tcs := []testcase{
{ {
@ -160,7 +163,7 @@ func TestStateStore_PeeringRead(t *testing.T) {
expect: &pbpeering.Peering{ expect: &pbpeering.Peering{
Name: "foo", Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", ID: testFooPeerID,
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
CreateIndex: 1, CreateIndex: 1,
ModifyIndex: 1, ModifyIndex: 1,
@ -189,6 +192,7 @@ func TestStore_Peering_Watch(t *testing.T) {
// set up initial write // set up initial write
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
}) })
require.NoError(t, err) require.NoError(t, err)
@ -210,6 +214,7 @@ func TestStore_Peering_Watch(t *testing.T) {
lastIdx++ lastIdx++
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testBarPeerID,
Name: "bar", Name: "bar",
}) })
require.NoError(t, err) require.NoError(t, err)
@ -229,6 +234,7 @@ func TestStore_Peering_Watch(t *testing.T) {
// unrelated write shouldn't fire watch // unrelated write shouldn't fire watch
lastIdx++ lastIdx++
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testBarPeerID,
Name: "bar", Name: "bar",
}) })
require.NoError(t, err) require.NoError(t, err)
@ -237,6 +243,7 @@ func TestStore_Peering_Watch(t *testing.T) {
// foo write should fire watch // foo write should fire watch
lastIdx++ lastIdx++
err = s.PeeringWrite(lastIdx, &pbpeering.Peering{ err = s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}) })
@ -261,6 +268,7 @@ func TestStore_Peering_Watch(t *testing.T) {
// mark for deletion before actually deleting // mark for deletion before actually deleting
lastIdx++ lastIdx++
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testBarPeerID,
Name: "bar", Name: "bar",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}) })
@ -293,7 +301,7 @@ func TestStore_PeeringList(t *testing.T) {
{ {
Name: "foo", Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", ID: testFooPeerID,
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
CreateIndex: 1, CreateIndex: 1,
ModifyIndex: 1, ModifyIndex: 1,
@ -301,7 +309,7 @@ func TestStore_PeeringList(t *testing.T) {
{ {
Name: "bar", Name: "bar",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: "5ebcff30-5509-4858-8142-a8e580f1863f", ID: testBarPeerID,
State: pbpeering.PeeringState_FAILING, State: pbpeering.PeeringState_FAILING,
CreateIndex: 2, CreateIndex: 2,
ModifyIndex: 2, ModifyIndex: 2,
@ -336,6 +344,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
lastIdx++ lastIdx++
// insert a peering // insert a peering
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}) })
@ -357,6 +366,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
// update peering // update peering
lastIdx++ lastIdx++
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
@ -422,6 +432,7 @@ func TestStore_PeeringWrite(t *testing.T) {
{ {
name: "create baz", name: "create baz",
input: &pbpeering.Peering{ input: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz", Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
@ -429,6 +440,7 @@ func TestStore_PeeringWrite(t *testing.T) {
{ {
name: "update baz", name: "update baz",
input: &pbpeering.Peering{ input: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz", Name: "baz",
State: pbpeering.PeeringState_FAILING, State: pbpeering.PeeringState_FAILING,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
@ -437,6 +449,7 @@ func TestStore_PeeringWrite(t *testing.T) {
{ {
name: "mark baz for deletion", name: "mark baz for deletion",
input: &pbpeering.Peering{ input: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz", Name: "baz",
State: pbpeering.PeeringState_TERMINATED, State: pbpeering.PeeringState_TERMINATED,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
@ -446,6 +459,7 @@ func TestStore_PeeringWrite(t *testing.T) {
{ {
name: "cannot update peering marked for deletion", name: "cannot update peering marked for deletion",
input: &pbpeering.Peering{ input: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz", Name: "baz",
// Attempt to add metadata // Attempt to add metadata
Meta: map[string]string{ Meta: map[string]string{
@ -458,6 +472,7 @@ func TestStore_PeeringWrite(t *testing.T) {
{ {
name: "cannot create peering marked for deletion", name: "cannot create peering marked for deletion",
input: &pbpeering.Peering{ input: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
@ -472,54 +487,6 @@ func TestStore_PeeringWrite(t *testing.T) {
} }
} }
func TestStore_PeeringWrite_GenerateUUID(t *testing.T) {
rand.Seed(1)
s := NewStateStore(nil)
entMeta := structs.NodeEnterpriseMetaInDefaultPartition()
partition := entMeta.PartitionOrDefault()
for i := 1; i < 11; i++ {
require.NoError(t, s.PeeringWrite(uint64(i), &pbpeering.Peering{
Name: fmt.Sprintf("peering-%d", i),
Partition: partition,
}))
}
idx, peerings, err := s.PeeringList(nil, *entMeta)
require.NoError(t, err)
require.Equal(t, uint64(10), idx)
require.Len(t, peerings, 10)
// Ensure that all assigned UUIDs are unique.
uniq := make(map[string]struct{})
for _, p := range peerings {
uniq[p.ID] = struct{}{}
}
require.Len(t, uniq, 10)
// Ensure that the ID of an existing peering cannot be overwritten.
updated := &pbpeering.Peering{
Name: peerings[0].Name,
Partition: peerings[0].Partition,
}
// Attempt to overwrite ID.
updated.ID, err = uuid.GenerateUUID()
require.NoError(t, err)
require.NoError(t, s.PeeringWrite(11, updated))
q := Query{
Value: updated.Name,
EnterpriseMeta: *entMeta,
}
idx, got, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.Equal(t, uint64(11), idx)
require.Equal(t, peerings[0].ID, got.ID)
}
func TestStore_PeeringDelete(t *testing.T) { func TestStore_PeeringDelete(t *testing.T) {
s := NewStateStore(nil) s := NewStateStore(nil)
insertTestPeerings(t, s) insertTestPeerings(t, s)
@ -532,6 +499,7 @@ func TestStore_PeeringDelete(t *testing.T) {
testutil.RunStep(t, "can delete after marking for deletion", func(t *testing.T) { testutil.RunStep(t, "can delete after marking for deletion", func(t *testing.T) {
require.NoError(t, s.PeeringWrite(11, &pbpeering.Peering{ require.NoError(t, s.PeeringWrite(11, &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo", Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
})) }))
@ -550,7 +518,7 @@ func TestStore_PeeringTerminateByID(t *testing.T) {
insertTestPeerings(t, s) insertTestPeerings(t, s)
// id corresponding to default/foo // id corresponding to default/foo
id := "9e650110-ac74-4c5a-a6a8-9348b2bed4e9" const id = testFooPeerID
require.NoError(t, s.PeeringTerminateByID(10, id)) require.NoError(t, s.PeeringTerminateByID(10, id))
@ -607,7 +575,7 @@ func TestStateStore_PeeringTrustBundleRead(t *testing.T) {
run := func(t *testing.T, tc testcase) { run := func(t *testing.T, tc testcase) {
_, ptb, err := s.PeeringTrustBundleRead(nil, tc.query) _, ptb, err := s.PeeringTrustBundleRead(nil, tc.query)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expect, ptb) prototest.AssertDeepEqual(t, tc.expect, ptb)
} }
entMeta := structs.NodeEnterpriseMetaInDefaultPartition() entMeta := structs.NodeEnterpriseMetaInDefaultPartition()
@ -708,6 +676,7 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testUUID(),
Name: "my-peering", Name: "my-peering",
})) }))
@ -1000,6 +969,9 @@ func TestStateStore_PeeringsForService(t *testing.T) {
var lastIdx uint64 var lastIdx uint64
// Create peerings // Create peerings
for _, tp := range tc.peerings { for _, tp := range tc.peerings {
if tp.peering.ID == "" {
tp.peering.ID = testUUID()
}
lastIdx++ lastIdx++
require.NoError(t, s.PeeringWrite(lastIdx, tp.peering)) require.NoError(t, s.PeeringWrite(lastIdx, tp.peering))
@ -1009,6 +981,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
lastIdx++ lastIdx++
copied := pbpeering.Peering{ copied := pbpeering.Peering{
ID: tp.peering.ID,
Name: tp.peering.Name, Name: tp.peering.Name,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
} }
@ -1247,6 +1220,11 @@ func TestStore_TrustBundleListByService(t *testing.T) {
var lastIdx uint64 var lastIdx uint64
ws := memdb.NewWatchSet() ws := memdb.NewWatchSet()
var (
peerID1 = testUUID()
peerID2 = testUUID()
)
testutil.RunStep(t, "no results on initial setup", func(t *testing.T) { testutil.RunStep(t, "no results on initial setup", func(t *testing.T) {
idx, resp, err := store.TrustBundleListByService(ws, "foo", entMeta) idx, resp, err := store.TrustBundleListByService(ws, "foo", entMeta)
require.NoError(t, err) require.NoError(t, err)
@ -1279,6 +1257,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
testutil.RunStep(t, "creating peering does not yield trust bundles", func(t *testing.T) { testutil.RunStep(t, "creating peering does not yield trust bundles", func(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: peerID1,
Name: "peer1", Name: "peer1",
})) }))
@ -1377,6 +1356,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
testutil.RunStep(t, "bundles for other peers are ignored", func(t *testing.T) { testutil.RunStep(t, "bundles for other peers are ignored", func(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: peerID2,
Name: "peer2", Name: "peer2",
})) }))
@ -1431,6 +1411,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
testutil.RunStep(t, "deleting the peering excludes its trust bundle", func(t *testing.T) { testutil.RunStep(t, "deleting the peering excludes its trust bundle", func(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{
ID: peerID1,
Name: "peer1", Name: "peer1",
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
})) }))
@ -1470,7 +1451,7 @@ func TestStateStore_Peering_ListDeleted(t *testing.T) {
err := tx.Insert(tablePeering, &pbpeering.Peering{ err := tx.Insert(tablePeering, &pbpeering.Peering{
Name: "foo", Name: "foo",
Partition: acl.DefaultPartitionName, Partition: acl.DefaultPartitionName,
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", ID: testFooPeerID,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
CreateIndex: 1, CreateIndex: 1,
ModifyIndex: 1, ModifyIndex: 1,
@ -1480,7 +1461,7 @@ func TestStateStore_Peering_ListDeleted(t *testing.T) {
err = tx.Insert(tablePeering, &pbpeering.Peering{ err = tx.Insert(tablePeering, &pbpeering.Peering{
Name: "bar", Name: "bar",
Partition: acl.DefaultPartitionName, Partition: acl.DefaultPartitionName,
ID: "5ebcff30-5509-4858-8142-a8e580f1863f", ID: testBarPeerID,
CreateIndex: 2, CreateIndex: 2,
ModifyIndex: 2, ModifyIndex: 2,
}) })
@ -1489,7 +1470,7 @@ func TestStateStore_Peering_ListDeleted(t *testing.T) {
err = tx.Insert(tablePeering, &pbpeering.Peering{ err = tx.Insert(tablePeering, &pbpeering.Peering{
Name: "baz", Name: "baz",
Partition: acl.DefaultPartitionName, Partition: acl.DefaultPartitionName,
ID: "432feb2f-5476-4ae2-b33c-e43640ca0e86", ID: testBazPeerID,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
CreateIndex: 3, CreateIndex: 3,
ModifyIndex: 3, ModifyIndex: 3,

View File

@ -60,12 +60,7 @@ func (q MultiQuery) PartitionOrDefault() string {
// indexFromQuery builds an index key where Query.Value is lowercase, and is // indexFromQuery builds an index key where Query.Value is lowercase, and is
// a required value. // a required value.
func indexFromQuery(arg interface{}) ([]byte, error) { func indexFromQuery(q Query) ([]byte, error) {
q, ok := arg.(Query)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(q.Value)) b.String(strings.ToLower(q.Value))
return b.Bytes(), nil return b.Bytes(), nil
@ -164,12 +159,8 @@ func (q KeyValueQuery) PartitionOrDefault() string {
return q.EnterpriseMeta.PartitionOrDefault() return q.EnterpriseMeta.PartitionOrDefault()
} }
func indexFromKeyValueQuery(arg interface{}) ([]byte, error) { func indexFromKeyValueQuery(q KeyValueQuery) ([]byte, error) {
// NOTE: this is case-sensitive! // NOTE: this is case-sensitive!
q, ok := arg.(KeyValueQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder var b indexBuilder
b.String(q.Key) b.String(q.Key)

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
func prefixIndexFromQuery(arg interface{}) ([]byte, error) { func prefixIndexFromQuery(arg any) ([]byte, error) {
var b indexBuilder var b indexBuilder
switch v := arg.(type) { switch v := arg.(type) {
case *acl.EnterpriseMeta: case *acl.EnterpriseMeta:
@ -29,7 +29,7 @@ func prefixIndexFromQuery(arg interface{}) ([]byte, error) {
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
} }
func prefixIndexFromQueryWithPeer(arg interface{}) ([]byte, error) { func prefixIndexFromQueryWithPeer(arg any) ([]byte, error) {
var b indexBuilder var b indexBuilder
switch v := arg.(type) { switch v := arg.(type) {
case *acl.EnterpriseMeta: case *acl.EnterpriseMeta:
@ -58,12 +58,7 @@ func prefixIndexFromQueryNoNamespace(arg interface{}) ([]byte, error) {
// indexFromAuthMethodQuery builds an index key where Query.Value is lowercase, and is // indexFromAuthMethodQuery builds an index key where Query.Value is lowercase, and is
// a required value. // a required value.
func indexFromAuthMethodQuery(arg interface{}) ([]byte, error) { func indexFromAuthMethodQuery(q AuthMethodQuery) ([]byte, error) {
q, ok := arg.(AuthMethodQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(q.Value)) b.String(strings.ToLower(q.Value))
return b.Bytes(), nil return b.Bytes(), nil

View File

@ -84,7 +84,7 @@ func indexTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: indexerSingle{ Indexer: indexerSingle[string, *IndexEntry]{
readIndex: indexFromString, readIndex: indexFromString,
writeIndex: indexNameFromIndexEntry, writeIndex: indexNameFromIndexEntry,
}, },
@ -93,39 +93,37 @@ func indexTableSchema() *memdb.TableSchema {
} }
} }
func indexNameFromIndexEntry(raw interface{}) ([]byte, error) { func indexNameFromIndexEntry(e *IndexEntry) ([]byte, error) {
p, ok := raw.(*IndexEntry) if e.Key == "" {
if !ok {
return nil, fmt.Errorf("unexpected type %T for IndexEntry index", raw)
}
if p.Key == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
} }
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(p.Key)) b.String(strings.ToLower(e.Key))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexFromString(raw interface{}) ([]byte, error) { func indexFromString(s string) ([]byte, error) {
q, ok := raw.(string)
if !ok {
return nil, fmt.Errorf("unexpected type %T for string prefix query", raw)
}
var b indexBuilder var b indexBuilder
b.String(strings.ToLower(q)) b.String(strings.ToLower(s))
return b.Bytes(), nil return b.Bytes(), nil
} }
func indexDeletedFromBoolQuery(raw interface{}) ([]byte, error) { func indexDeletedFromBoolQuery(q BoolQuery) ([]byte, error) {
q, ok := raw.(BoolQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for BoolQuery index", raw)
}
var b indexBuilder var b indexBuilder
b.Bool(q.Value) b.Bool(q.Value)
return b.Bytes(), nil return b.Bytes(), nil
} }
type enterpriseIndexable interface {
partitionIndexable
namespaceIndexable
}
type partitionIndexable interface {
PartitionOrDefault() string
}
type namespaceIndexable interface {
NamespaceOrDefault() string
}

View File

@ -19,12 +19,7 @@ const (
indexNodeCheck = "node_check" indexNodeCheck = "node_check"
) )
func indexFromSession(raw interface{}) ([]byte, error) { func indexFromSession(e *structs.Session) ([]byte, error) {
e, ok := raw.(*structs.Session)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw)
}
v := strings.ToLower(e.ID) v := strings.ToLower(e.ID)
if v == "" { if v == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
@ -86,12 +81,7 @@ func sessionChecksTableSchema() *memdb.TableSchema {
} }
// indexNodeFromSession creates an index key from *structs.Session // indexNodeFromSession creates an index key from *structs.Session
func indexNodeFromSession(raw interface{}) ([]byte, error) { func indexNodeFromSession(e *structs.Session) ([]byte, error) {
e, ok := raw.(*structs.Session)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw)
}
v := strings.ToLower(e.Node) v := strings.ToLower(e.Node)
if v == "" { if v == "" {
return nil, errMissingValueForIndex return nil, errMissingValueForIndex
@ -103,12 +93,7 @@ func indexNodeFromSession(raw interface{}) ([]byte, error) {
} }
// indexFromNodeCheckIDSession creates an index key from sessionCheck // indexFromNodeCheckIDSession creates an index key from sessionCheck
func indexFromNodeCheckIDSession(raw interface{}) ([]byte, error) { func indexFromNodeCheckIDSession(e *sessionCheck) ([]byte, error) {
e, ok := raw.(*sessionCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement sessionCheck", raw)
}
var b indexBuilder var b indexBuilder
v := strings.ToLower(e.Node) v := strings.ToLower(e.Node)
if v == "" { if v == "" {
@ -132,12 +117,7 @@ func indexFromNodeCheckIDSession(raw interface{}) ([]byte, error) {
} }
// indexSessionCheckFromSession creates an index key from sessionCheck // indexSessionCheckFromSession creates an index key from sessionCheck
func indexSessionCheckFromSession(raw interface{}) ([]byte, error) { func indexSessionCheckFromSession(e *sessionCheck) ([]byte, error) {
e, ok := raw.(*sessionCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement *sessionCheck", raw)
}
var b indexBuilder var b indexBuilder
v := strings.ToLower(e.Session) v := strings.ToLower(e.Session)
if v == "" { if v == "" {

View File

@ -14,48 +14,44 @@ import (
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
) )
func sessionIndexer() indexerSingleWithPrefix { func sessionIndexer() indexerSingleWithPrefix[Query, *structs.Session, any] {
return indexerSingleWithPrefix{ return indexerSingleWithPrefix[Query, *structs.Session, any]{
readIndex: readIndex(indexFromQuery), readIndex: indexFromQuery,
writeIndex: writeIndex(indexFromSession), writeIndex: indexFromSession,
prefixIndex: prefixIndex(prefixIndexFromQuery), prefixIndex: prefixIndexFromQuery,
} }
} }
func nodeSessionsIndexer() indexerSingle { func nodeSessionsIndexer() indexerSingle[singleValueID, *structs.Session] {
return indexerSingle{ return indexerSingle[singleValueID, *structs.Session]{
readIndex: readIndex(indexFromIDValueLowerCase), readIndex: indexFromIDValueLowerCase,
writeIndex: writeIndex(indexNodeFromSession), writeIndex: indexNodeFromSession,
} }
} }
func idCheckIndexer() indexerSingle { func idCheckIndexer() indexerSingle[*sessionCheck, *sessionCheck] {
return indexerSingle{ return indexerSingle[*sessionCheck, *sessionCheck]{
readIndex: indexFromNodeCheckIDSession, readIndex: indexFromNodeCheckIDSession,
writeIndex: indexFromNodeCheckIDSession, writeIndex: indexFromNodeCheckIDSession,
} }
} }
func sessionCheckIndexer() indexerSingle { func sessionCheckIndexer() indexerSingle[Query, *sessionCheck] {
return indexerSingle{ return indexerSingle[Query, *sessionCheck]{
readIndex: indexFromQuery, readIndex: indexFromQuery,
writeIndex: indexSessionCheckFromSession, writeIndex: indexSessionCheckFromSession,
} }
} }
func nodeChecksIndexer() indexerSingle { func nodeChecksIndexer() indexerSingle[multiValueID, *sessionCheck] {
return indexerSingle{ return indexerSingle[multiValueID, *sessionCheck]{
readIndex: indexFromMultiValueID, readIndex: indexFromMultiValueID,
writeIndex: indexFromNodeCheckID, writeIndex: indexFromNodeCheckID,
} }
} }
// indexFromNodeCheckID creates an index key from a sessionCheck structure // indexFromNodeCheckID creates an index key from a sessionCheck structure
func indexFromNodeCheckID(raw interface{}) ([]byte, error) { func indexFromNodeCheckID(e *sessionCheck) ([]byte, error) {
e, ok := raw.(*sessionCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw)
}
var b indexBuilder var b indexBuilder
v := strings.ToLower(e.Node) v := strings.ToLower(e.Node)
if v == "" { if v == "" {

View File

@ -263,25 +263,25 @@ func (s *Store) Abandon() {
} }
// maxIndex is a helper used to retrieve the highest known index // maxIndex is a helper used to retrieve the highest known index
// amongst a set of tables in the db. // amongst a set of index keys (e.g. table names) in the db.
func (s *Store) maxIndex(tables ...string) uint64 { func (s *Store) maxIndex(keys ...string) uint64 {
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
return maxIndexTxn(tx, tables...) return maxIndexTxn(tx, keys...)
} }
// maxIndexTxn is a helper used to retrieve the highest known index // maxIndexTxn is a helper used to retrieve the highest known index
// amongst a set of tables in the db. // amongst a set of index keys (e.g. table names) in the db.
func maxIndexTxn(tx ReadTxn, tables ...string) uint64 { func maxIndexTxn(tx ReadTxn, keys ...string) uint64 {
return maxIndexWatchTxn(tx, nil, tables...) return maxIndexWatchTxn(tx, nil, keys...)
} }
func maxIndexWatchTxn(tx ReadTxn, ws memdb.WatchSet, tables ...string) uint64 { func maxIndexWatchTxn(tx ReadTxn, ws memdb.WatchSet, keys ...string) uint64 {
var lindex uint64 var lindex uint64
for _, table := range tables { for _, key := range keys {
ch, ti, err := tx.FirstWatch(tableIndex, "id", table) ch, ti, err := tx.FirstWatch(tableIndex, "id", key)
if err != nil { if err != nil {
panic(fmt.Sprintf("unknown index: %s err: %s", table, err)) panic(fmt.Sprintf("unknown index: %s err: %s", key, err))
} }
if idx, ok := ti.(*IndexEntry); ok && idx.Value > lindex { if idx, ok := ti.(*IndexEntry); ok && idx.Value > lindex {
lindex = idx.Value lindex = idx.Value

View File

@ -0,0 +1,40 @@
// Code generated by mockery v2.12.2. DO NOT EDIT.
package watch
import (
testing "testing"
mock "github.com/stretchr/testify/mock"
)
// MockStateStore is an autogenerated mock type for the StateStore type
type MockStateStore struct {
mock.Mock
}
// AbandonCh provides a mock function with given fields:
func (_m *MockStateStore) AbandonCh() <-chan struct{} {
ret := _m.Called()
var r0 <-chan struct{}
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan struct{})
}
}
return r0
}
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockStateStore(t testing.TB) *MockStateStore {
mock := &MockStateStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,342 @@
package watch
import (
"context"
"errors"
"fmt"
"time"
"github.com/hashicorp/consul/lib/retry"
"github.com/hashicorp/go-memdb"
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
)
var (
ErrorNotFound = errors.New("no data found for query")
ErrorNotChanged = errors.New("data did not change for query")
errNilContext = errors.New("cannot call ServerLocalNotify with a nil context")
errNilGetStore = errors.New("cannot call ServerLocalNotify without a callback to get a StateStore")
errNilQuery = errors.New("cannot call ServerLocalNotify without a callback to perform the query")
errNilNotify = errors.New("cannot call ServerLocalNotify without a callback to send notifications")
)
//go:generate mockery --name StateStore --inpackage --testonly
type StateStore interface {
AbandonCh() <-chan struct{}
}
const (
defaultWaiterMinFailures uint = 1
defaultWaiterMinWait = time.Second
defaultWaiterMaxWait = 60 * time.Second
defaultWaiterFactor = 2 * time.Second
)
var (
defaultWaiterJitter = retry.NewJitter(100)
)
func defaultWaiter() *retry.Waiter {
return &retry.Waiter{
MinFailures: defaultWaiterMinFailures,
MinWait: defaultWaiterMinWait,
MaxWait: defaultWaiterMaxWait,
Jitter: defaultWaiterJitter,
Factor: defaultWaiterFactor,
}
}
// noopDone can be passed to serverLocalNotifyWithWaiter
func noopDone() {}
// ServerLocalBlockingQuery performs a blocking query similar to the pre-existing blockingQuery
// method on the agent/consul.Server type. There are a few key differences.
//
// 1. This function makes use of Go 1.18 generics. The function is parameterized with two
// types. The first is the ResultType which can be anything. Having this be parameterized
// instead of using interface{} allows us to simplify the call sites so that no type
// coercion from interface{} to the real type is necessary. The second parameterized type
// is something that VERY loosely resembles a agent/consul/state.Store type. The StateStore
// interface in this package has a single method to get the stores abandon channel so we
// know when a snapshot restore is occurring and can act accordingly. We could have not
// parameterized this type and used a real *state.Store instead but then we would have
// concrete dependencies on the state package and it would make it a little harder to
// test this function.
//
// We could have also avoided the need to use a ResultType parameter by taking the route
// the original blockingQuery method did and to just assume all callers close around
// a pointer to their results and can modify it as necessary. That way of doing things
// feels a little gross so I have taken this one a different direction. The old way
// also gets especially gross with how we have to push concerns of spurious wakeup
// suppression down into every call site.
//
// 2. This method has no internal timeout and can potentially run forever until a state
// change is observed. If there is a desire to have a timeout, that should be built into
// the context.Context passed as the first argument.
//
// 3. This method bakes in some newer functionality around hashing of results to prevent sending
// back data when nothing has actually changed. With the old blockingQuery method this has to
// be done within the closure passed to the method which means the same bit of code is duplicated
// in many places. As this functionality isn't necessary in many scenarios whether to opt-in to
// that behavior is a argument to this function.
//
// Similar to the older method:
//
// 1. Errors returned from the query will be propagated back to the caller.
//
// The query function must follow these rules:
//
// 1. To access data it must use the passed in StoreType (which will be a state.Store when
// everything gets stiched together outside of unit tests).
// 2. It must return an index greater than the minIndex if the results returned by the query
// have changed.
// 3. Any channels added to the memdb.WatchSet must unblock when the results
// returned by the query have changed.
//
// To ensure optimal performance of the query, the query function should make a
// best-effort attempt to follow these guidelines:
//
// 1. Only return an index greater than the minIndex.
// 2. Any channels added to the memdb.WatchSet should only unblock when the
// results returned by the query have changed. This might be difficult
// to do when blocking on non-existent data.
//
func ServerLocalBlockingQuery[ResultType any, StoreType StateStore](
ctx context.Context,
getStore func() StoreType,
minIndex uint64,
suppressSpuriousWakeup bool,
query func(memdb.WatchSet, StoreType) (uint64, ResultType, error),
) (uint64, ResultType, error) {
var (
notFound bool
ranOnce bool
priorHash uint64
)
var zeroResult ResultType
if getStore == nil {
return 0, zeroResult, fmt.Errorf("no getStore function was provided to ServerLocalBlockingQuery")
}
if query == nil {
return 0, zeroResult, fmt.Errorf("no query function was provided to ServerLocalBlockingQuery")
}
for {
state := getStore()
ws := memdb.NewWatchSet()
// Adding the AbandonCh to the WatchSet allows us to detect when
// a snapshot restore happens that would otherwise not modify anything
// within the individual state store. If we didn't do this then we
// could end up blocking indefinitely.
ws.Add(state.AbandonCh())
index, result, err := query(ws, state)
// Always set a non-zero index. Generally we expect the index
// to be set to Raft index which can never be 0. If the query
// returned no results we expect it to be set to the max index of the table,
// however we can't guarantee this always happens.
// To prevent a client from accidentally performing many non-blocking queries
// (which causes lots of unnecessary load), we always set a default value of 1.
// This is sufficient to prevent the unnecessary load in most cases.
if index < 1 {
index = 1
}
switch {
case errors.Is(err, ErrorNotFound):
// if minIndex is 0 then we should never block but we
// also should not propagate the error
if minIndex == 0 {
return index, result, nil
}
// update the min index if the previous result was not found. This
// is an attempt to not return data unnecessarily when we end up
// watching the root of a memdb Radix tree because the data being
// watched doesn't exist yet.
if notFound {
minIndex = index
}
notFound = true
case err != nil:
return index, result, err
}
// when enabled we can prevent sending back data that hasn't changed.
if suppressSpuriousWakeup {
newHash, err := hashstructure_v2.Hash(result, hashstructure_v2.FormatV2, nil)
if err != nil {
return index, result, fmt.Errorf("error hashing data for spurious wakeup suppression: %w", err)
}
// set minIndex to the returned index to prevent sending back identical data
if ranOnce && priorHash == newHash {
minIndex = index
}
ranOnce = true
priorHash = newHash
}
// one final check if we should be considered unblocked and
// return the value. Some conditions in the switch above
// alter the minIndex and prevent this return if it would
// be desirable. One such case is when the actual data has
// not changed since the last round through the query and
// we would rather not do any further processing for unchanged
// data. This mostly protects against watches for data that
// doesn't exist from return the non-existant value constantly.
if index > minIndex {
return index, result, nil
}
// Block until something changes. Because we have added the state
// stores AbandonCh to this watch set, a snapshot restore will
// cause things to unblock in addition to changes to the actual
// queried data.
if err := ws.WatchCtx(ctx); err != nil {
// exit if the context was cancelled
return index, result, nil
}
select {
case <-state.AbandonCh():
return index, result, nil
default:
}
}
}
// ServerLocalNotify will watch for changes in the State Store using the provided
// query function and invoke the notify callback whenever the results of that query
// function have changed. This function will return an error if parameter validations
// fail but otherwise the background go routine to process the notifications will
// be spawned and nil will be returned. Just like ServerLocalBlockingQuery this makes
// use of Go Generics and for the same reasons as outlined in the documentation for
// that function.
func ServerLocalNotify[ResultType any, StoreType StateStore](
ctx context.Context,
correlationID string,
getStore func() StoreType,
query func(memdb.WatchSet, StoreType) (uint64, ResultType, error),
notify func(ctx context.Context, correlationID string, result ResultType, err error),
) error {
return serverLocalNotify(
ctx,
correlationID,
getStore,
query,
notify,
// Public callers should not need to know when the internal go routines are finished.
// Being able to provide a done function to the internal version of this function is
// to allow our tests to be more determinstic and to eliminate arbitrary sleeps.
noopDone,
// Public callers do not get to override the error backoff configuration. Internally
// we want to allow for this to enable our unit tests to run much more quickly.
defaultWaiter(),
)
}
// serverLocalNotify is the internal version of ServerLocalNotify. It takes
// two additional arguments of the waiter to use and a function to call
// when the notification go routine has finished
func serverLocalNotify[ResultType any, StoreType StateStore](
ctx context.Context,
correlationID string,
getStore func() StoreType,
query func(memdb.WatchSet, StoreType) (uint64, ResultType, error),
notify func(ctx context.Context, correlationID string, result ResultType, err error),
done func(),
waiter *retry.Waiter,
) error {
if ctx == nil {
return errNilContext
}
if getStore == nil {
return errNilGetStore
}
if query == nil {
return errNilQuery
}
if notify == nil {
return errNilNotify
}
go serverLocalNotifyRoutine(
ctx,
correlationID,
getStore,
query,
notify,
done,
waiter,
)
return nil
}
// serverLocalNotifyRoutine is the function intended to be run within a new
// go routine to process the updates. It will not check to ensure callbacks
// are non-nil nor perform other parameter validation. It is assumed that
// the in-package caller of this method will have already done that. It also
// takes the backoff waiter in as an argument so that unit tests within this
// package can override the default values that the exported ServerLocalNotify
// function would have set up.
func serverLocalNotifyRoutine[ResultType any, StoreType StateStore](
ctx context.Context,
correlationID string,
getStore func() StoreType,
query func(memdb.WatchSet, StoreType) (uint64, ResultType, error),
notify func(ctx context.Context, correlationID string, result ResultType, err error),
done func(),
waiter *retry.Waiter,
) {
defer done()
var minIndex uint64
for {
// Check if the context has been cancelled. Do not issue
// more queries if it has been cancelled.
if ctx.Err() != nil {
return
}
// Perform the blocking query
index, result, err := ServerLocalBlockingQuery(ctx, getStore, minIndex, true, query)
// Check if the context has been cancelled. If it has we should not send more
// notifications.
if ctx.Err() != nil {
return
}
// Check the index to see if we should call notify
if minIndex == 0 || minIndex < index {
notify(ctx, correlationID, result, err)
minIndex = index
}
// Handle errors with backoff. Badly behaved blocking calls that returned
// a zero index are considered as failures since we need to not get stuck
// in a busy loop.
if err == nil && index > 0 {
waiter.Reset()
} else {
if waiter.Wait(ctx) != nil {
return
}
}
// ensure we don't use zero indexes
if err == nil && minIndex < 1 {
minIndex = 1
}
}
}

View File

@ -0,0 +1,454 @@
package watch
import (
"context"
"fmt"
"testing"
"time"
"github.com/hashicorp/consul/lib/retry"
"github.com/hashicorp/go-memdb"
mock "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
type mockStoreProvider struct {
mock.Mock
}
func newMockStoreProvider(t *testing.T) *mockStoreProvider {
t.Helper()
provider := &mockStoreProvider{}
t.Cleanup(func() {
provider.AssertExpectations(t)
})
return provider
}
func (m *mockStoreProvider) getStore() *MockStateStore {
return m.Called().Get(0).(*MockStateStore)
}
type testResult struct {
value string
}
func (m *mockStoreProvider) query(ws memdb.WatchSet, store *MockStateStore) (uint64, *testResult, error) {
ret := m.Called(ws, store)
index := ret.Get(0).(uint64)
result := ret.Get(1).(*testResult)
err := ret.Error(2)
return index, result, err
}
func (m *mockStoreProvider) notify(ctx context.Context, correlationID string, result *testResult, err error) {
m.Called(ctx, correlationID, result, err)
}
func TestServerLocalBlockingQuery_getStoreNotProvided(t *testing.T) {
_, _, err := ServerLocalBlockingQuery(
context.Background(),
nil,
0,
true,
func(memdb.WatchSet, *MockStateStore) (uint64, struct{}, error) {
return 0, struct{}{}, nil
},
)
require.Error(t, err)
require.Contains(t, err.Error(), "no getStore function was provided")
}
func TestServerLocalBlockingQuery_queryNotProvided(t *testing.T) {
var query func(memdb.WatchSet, *MockStateStore) (uint64, struct{}, error)
_, _, err := ServerLocalBlockingQuery(
context.Background(),
func() *MockStateStore { return nil },
0,
true,
query,
)
require.Error(t, err)
require.Contains(t, err.Error(), "no query function was provided")
}
func TestServerLocalBlockingQuery_NonBlocking(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Once()
provider := newMockStoreProvider(t)
provider.On("getStore").Return(store).Once()
provider.On("query", mock.Anything, store).
Return(uint64(1), &testResult{value: "foo"}, nil).
Once()
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
0,
true,
provider.query,
)
require.NoError(t, err)
require.EqualValues(t, 1, idx)
require.Equal(t, &testResult{value: "foo"}, result)
}
func TestServerLocalBlockingQuery_Index0(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Once()
provider := newMockStoreProvider(t)
provider.On("getStore").Return(store).Once()
provider.On("query", mock.Anything, store).
// the index 0 returned here should get translated to 1 by ServerLocalBlockingQuery
Return(uint64(0), &testResult{value: "foo"}, nil).
Once()
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
0,
true,
provider.query,
)
require.NoError(t, err)
require.EqualValues(t, 1, idx)
require.Equal(t, &testResult{value: "foo"}, result)
}
func TestServerLocalBlockingQuery_NotFound(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Once()
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Once()
var nilResult *testResult
provider.On("query", mock.Anything, store).
Return(uint64(1), nilResult, ErrorNotFound).
Once()
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
0,
true,
provider.query,
)
require.NoError(t, err)
require.EqualValues(t, 1, idx)
require.Nil(t, result)
}
func TestServerLocalBlockingQuery_NotFoundBlocks(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Times(5)
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Times(3)
var nilResult *testResult
// Initial data returned is not found and has an index less than the original
// blocking index. This should not return data to the caller.
provider.On("query", mock.Anything, store).
Return(uint64(4), nilResult, ErrorNotFound).
Run(addReadyWatchSet).
Once()
// There is an update to the data but the value still doesn't exist. Therefore
// we should not return data to the caller.
provider.On("query", mock.Anything, store).
Return(uint64(6), nilResult, ErrorNotFound).
Run(addReadyWatchSet).
Once()
// Finally we have some real data and can return it to the caller.
provider.On("query", mock.Anything, store).
Return(uint64(7), &testResult{value: "foo"}, nil).
Once()
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
5,
true,
provider.query,
)
require.NoError(t, err)
require.EqualValues(t, 7, idx)
require.Equal(t, &testResult{value: "foo"}, result)
}
func TestServerLocalBlockingQuery_Error(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Once()
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Once()
var nilResult *testResult
provider.On("query", mock.Anything, store).
Return(uint64(10), nilResult, fmt.Errorf("synthetic error")).
Once()
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
4,
true,
provider.query,
)
require.Error(t, err)
require.Contains(t, err.Error(), "synthetic error")
require.EqualValues(t, 10, idx)
require.Nil(t, result)
}
func TestServerLocalBlockingQuery_ContextCancellation(t *testing.T) {
abandonCh := make(chan struct{})
t.Cleanup(func() { close(abandonCh) })
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Once()
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Once()
provider.On("query", mock.Anything, store).
// Return an index that should not cause the blocking query to return.
Return(uint64(4), &testResult{value: "foo"}, nil).
Once().
Run(func(_ mock.Arguments) {
// Cancel the context so that the memdb WatchCtx call will error.
cancel()
})
idx, result, err := ServerLocalBlockingQuery(
ctx,
provider.getStore,
8,
true,
provider.query,
)
// The internal cancellation error should not be propagated.
require.NoError(t, err)
require.EqualValues(t, 4, idx)
require.Equal(t, &testResult{value: "foo"}, result)
}
func TestServerLocalBlockingQuery_StateAbandoned(t *testing.T) {
abandonCh := make(chan struct{})
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Twice()
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Once()
provider.On("query", mock.Anything, store).
// Return an index that should not cause the blocking query to return.
Return(uint64(4), &testResult{value: "foo"}, nil).
Once().
Run(func(_ mock.Arguments) {
// Cancel the context so that the memdb WatchCtx call will error.
close(abandonCh)
})
idx, result, err := ServerLocalBlockingQuery(
context.Background(),
provider.getStore,
8,
true,
provider.query,
)
// The internal cancellation error should not be propagated.
require.NoError(t, err)
require.EqualValues(t, 4, idx)
require.Equal(t, &testResult{value: "foo"}, result)
}
func TestServerLocalNotify_Validations(t *testing.T) {
provider := newMockStoreProvider(t)
type testCase struct {
ctx context.Context
getStore func() *MockStateStore
query func(memdb.WatchSet, *MockStateStore) (uint64, *testResult, error)
notify func(context.Context, string, *testResult, error)
err error
}
cases := map[string]testCase{
"nil-context": {
getStore: provider.getStore,
query: provider.query,
notify: provider.notify,
err: errNilContext,
},
"nil-getStore": {
ctx: context.Background(),
query: provider.query,
notify: provider.notify,
err: errNilGetStore,
},
"nil-query": {
ctx: context.Background(),
getStore: provider.getStore,
notify: provider.notify,
err: errNilQuery,
},
"nil-notify": {
ctx: context.Background(),
getStore: provider.getStore,
query: provider.query,
err: errNilNotify,
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
err := ServerLocalNotify(tcase.ctx, "test", tcase.getStore, tcase.query, tcase.notify)
require.ErrorIs(t, err, tcase.err)
})
}
}
func TestServerLocalNotify(t *testing.T) {
notifyCtx, notifyCancel := context.WithCancel(context.Background())
t.Cleanup(notifyCancel)
abandonCh := make(chan struct{})
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Times(3)
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Times(3)
provider.On("query", mock.Anything, store).
Return(uint64(4), &testResult{value: "foo"}, nil).
Once()
provider.On("notify", notifyCtx, t.Name(), &testResult{value: "foo"}, nil).Once()
provider.On("query", mock.Anything, store).
Return(uint64(6), &testResult{value: "bar"}, nil).
Once()
provider.On("notify", notifyCtx, t.Name(), &testResult{value: "bar"}, nil).Once()
provider.On("query", mock.Anything, store).
Return(uint64(7), &testResult{value: "baz"}, context.Canceled).
Run(func(mock.Arguments) {
notifyCancel()
})
doneCtx, routineDone := context.WithCancel(context.Background())
err := serverLocalNotify(notifyCtx, t.Name(), provider.getStore, provider.query, provider.notify, routineDone, defaultWaiter())
require.NoError(t, err)
// Wait for the context cancellation which will happen when the "query" func is run the third time. The doneCtx gets "cancelled"
// by the backgrounded go routine when it is actually finished. We need to wait for this to ensure that all mocked calls have been
// made and that no extra calls get made.
<-doneCtx.Done()
}
func TestServerLocalNotify_internal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
abandonCh := make(chan struct{})
store := NewMockStateStore(t)
store.On("AbandonCh").
Return(closeChan(abandonCh)).
Times(4)
var nilResult *testResult
provider := newMockStoreProvider(t)
provider.On("getStore").
Return(store).
Times(4)
provider.On("query", mock.Anything, store).
Return(uint64(0), nilResult, fmt.Errorf("injected error")).
Times(3)
// we should only notify the first time as the index of 1 wont exceed the min index
// after the second two queries.
provider.On("notify", ctx, "test", nilResult, fmt.Errorf("injected error")).
Once()
provider.On("query", mock.Anything, store).
Return(uint64(7), &testResult{value: "foo"}, nil).
Once()
provider.On("notify", ctx, "test", &testResult{value: "foo"}, nil).
Once().
Run(func(mock.Arguments) {
cancel()
})
waiter := retry.Waiter{
MinFailures: 1,
MinWait: time.Millisecond,
MaxWait: 50 * time.Millisecond,
Jitter: retry.NewJitter(100),
Factor: 2 * time.Millisecond,
}
// all the mock expectations should ensure things are working properly
serverLocalNotifyRoutine(ctx, "test", provider.getStore, provider.query, provider.notify, noopDone, &waiter)
}
func addReadyWatchSet(args mock.Arguments) {
ws := args.Get(0).(memdb.WatchSet)
ch := make(chan struct{})
ws.Add(ch)
close(ch)
}
// small convenience to make this more readable. The alternative in a few
// cases would be to do something like (<-chan struct{})(ch). I find that
// syntax very difficult to read.
func closeChan(ch chan struct{}) <-chan struct{} {
return ch
}

View File

@ -145,15 +145,15 @@ func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Reques
// order of the returned responses. // order of the returned responses.
args.Match.Entries = make([]structs.IntentionMatchEntry, len(names)) args.Match.Entries = make([]structs.IntentionMatchEntry, len(names))
for i, n := range names { for i, n := range names {
ap, ns, name, err := parseIntentionStringComponent(n, &entMeta) parsed, err := parseIntentionStringComponent(n, &entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("name %q is invalid: %s", n, err) return nil, fmt.Errorf("name %q is invalid: %s", n, err)
} }
args.Match.Entries[i] = structs.IntentionMatchEntry{ args.Match.Entries[i] = structs.IntentionMatchEntry{
Partition: ap, Partition: parsed.ap,
Namespace: ns, Namespace: parsed.ns,
Name: name, Name: parsed.name,
} }
} }
@ -235,23 +235,23 @@ func (s *HTTPHandlers) IntentionCheck(resp http.ResponseWriter, req *http.Reques
// We parse them the same way as matches to extract partition/namespace/name // We parse them the same way as matches to extract partition/namespace/name
args.Check.SourceName = source[0] args.Check.SourceName = source[0]
if args.Check.SourceType == structs.IntentionSourceConsul { if args.Check.SourceType == structs.IntentionSourceConsul {
ap, ns, name, err := parseIntentionStringComponent(source[0], &entMeta) parsed, err := parseIntentionStringComponent(source[0], &entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("source %q is invalid: %s", source[0], err) return nil, fmt.Errorf("source %q is invalid: %s", source[0], err)
} }
args.Check.SourcePartition = ap args.Check.SourcePartition = parsed.ap
args.Check.SourceNS = ns args.Check.SourceNS = parsed.ns
args.Check.SourceName = name args.Check.SourceName = parsed.name
} }
// The destination is always in the Consul format // The destination is always in the Consul format
ap, ns, name, err := parseIntentionStringComponent(destination[0], &entMeta) parsed, err := parseIntentionStringComponent(destination[0], &entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err) return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err)
} }
args.Check.DestinationPartition = ap args.Check.DestinationPartition = parsed.ap
args.Check.DestinationNS = ns args.Check.DestinationNS = parsed.ns
args.Check.DestinationName = name args.Check.DestinationName = parsed.name
var reply structs.IntentionQueryCheckResponse var reply structs.IntentionQueryCheckResponse
if err := s.agent.RPC("Intention.Check", args, &reply); err != nil { if err := s.agent.RPC("Intention.Check", args, &reply); err != nil {
@ -302,23 +302,25 @@ func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Req
} }
{ {
ap, ns, name, err := parseIntentionStringComponent(source[0], &entMeta) parsed, err := parseIntentionStringComponent(source[0], &entMeta, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("source %q is invalid: %s", source[0], err) return nil, fmt.Errorf("source %q is invalid: %s", source[0], err)
} }
args.Exact.SourcePartition = ap
args.Exact.SourceNS = ns args.Exact.SourcePeer = parsed.peer
args.Exact.SourceName = name args.Exact.SourcePartition = parsed.ap
args.Exact.SourceNS = parsed.ns
args.Exact.SourceName = parsed.name
} }
{ {
ap, ns, name, err := parseIntentionStringComponent(destination[0], &entMeta) parsed, err := parseIntentionStringComponent(destination[0], &entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err) return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err)
} }
args.Exact.DestinationPartition = ap args.Exact.DestinationPartition = parsed.ap
args.Exact.DestinationNS = ns args.Exact.DestinationNS = parsed.ns
args.Exact.DestinationName = name args.Exact.DestinationName = parsed.name
} }
var reply structs.IndexedIntentions var reply structs.IndexedIntentions
@ -444,42 +446,67 @@ func parseIntentionQueryExact(req *http.Request, entMeta *acl.EnterpriseMeta) (*
var exact structs.IntentionQueryExact var exact structs.IntentionQueryExact
{ {
ap, ns, name, err := parseIntentionStringComponent(source[0], entMeta) parsed, err := parseIntentionStringComponent(source[0], entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("source %q is invalid: %s", source[0], err) return nil, fmt.Errorf("source %q is invalid: %s", source[0], err)
} }
exact.SourcePartition = ap exact.SourcePartition = parsed.ap
exact.SourceNS = ns exact.SourceNS = parsed.ns
exact.SourceName = name exact.SourceName = parsed.name
} }
{ {
ap, ns, name, err := parseIntentionStringComponent(destination[0], entMeta) parsed, err := parseIntentionStringComponent(destination[0], entMeta, false)
if err != nil { if err != nil {
return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err) return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err)
} }
exact.DestinationPartition = ap exact.DestinationPartition = parsed.ap
exact.DestinationNS = ns exact.DestinationNS = parsed.ns
exact.DestinationName = name exact.DestinationName = parsed.name
} }
return &exact, nil return &exact, nil
} }
func parseIntentionStringComponent(input string, entMeta *acl.EnterpriseMeta) (string, string, string, error) { type parsedIntentionInput struct {
peer, ap, ns, name string
}
func parseIntentionStringComponent(input string, entMeta *acl.EnterpriseMeta, allowPeerKeyword bool) (*parsedIntentionInput, error) {
if strings.HasPrefix(input, "peer:") && !allowPeerKeyword {
return nil, fmt.Errorf("cannot specify a peer here")
}
ss := strings.Split(input, "/") ss := strings.Split(input, "/")
switch len(ss) { switch len(ss) {
case 1: // Name only case 1: // Name only
// need to specify at least the service name too
if strings.HasPrefix(ss[0], "peer:") {
return nil, fmt.Errorf("need to specify the service name as well")
}
ns := entMeta.NamespaceOrEmpty() ns := entMeta.NamespaceOrEmpty()
ap := entMeta.PartitionOrEmpty() ap := entMeta.PartitionOrEmpty()
return ap, ns, ss[0], nil return &parsedIntentionInput{ap: ap, ns: ns, name: ss[0]}, nil
case 2: // namespace/name case 2: // peer:peer/name OR namespace/name
if strings.HasPrefix(ss[0], "peer:") {
peerName := strings.TrimPrefix(ss[0], "peer:")
ns := entMeta.NamespaceOrEmpty()
return &parsedIntentionInput{peer: peerName, ns: ns, name: ss[1]}, nil
}
ap := entMeta.PartitionOrEmpty() ap := entMeta.PartitionOrEmpty()
return ap, ss[0], ss[1], nil return &parsedIntentionInput{ap: ap, ns: ss[0], name: ss[1]}, nil
case 3: // partition/namespace/name case 3: // peer:peer/namespace/name OR partition/namespace/name
return ss[0], ss[1], ss[2], nil if strings.HasPrefix(ss[0], "peer:") {
peerName := strings.TrimPrefix(ss[0], "peer:")
return &parsedIntentionInput{peer: peerName, ns: ss[1], name: ss[2]}, nil
} else {
return &parsedIntentionInput{ap: ss[0], ns: ss[1], name: ss[2]}, nil
}
default: default:
return "", "", "", fmt.Errorf("input can contain at most two '/'") return nil, fmt.Errorf("input can contain at most two '/'")
} }
} }

View File

@ -349,6 +349,57 @@ func TestIntentionCheck(t *testing.T) {
}) })
} }
func TestIntentionGetExact_PeerIntentions(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
testutil.RunStep(t, "create a peer intentions", func(t *testing.T) {
configEntryIntention := structs.ServiceIntentionsConfigEntry{
Kind: structs.ServiceIntentions,
Name: "bar",
Sources: []*structs.SourceIntention{
{
Name: "foo",
Peer: "peer1",
Action: structs.IntentionActionAllow,
},
},
}
req, err := http.NewRequest("PUT", "/v1/config", jsonReader(configEntryIntention))
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.ConfigApply(resp, req)
require.NoError(t, err)
applied, ok := obj.(bool)
require.True(t, ok)
require.True(t, applied)
})
t.Run("get peer intention", func(t *testing.T) {
req, err := http.NewRequest("GET", "/v1/connect/intentions/exact?source=peer:peer1/foo&destination=bar", nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.IntentionExact(resp, req)
require.NoError(t, err)
require.NotNil(t, obj)
value, ok := obj.(*structs.Intention)
require.True(t, ok)
require.Equal(t, "peer1", value.SourcePeer)
require.Equal(t, "foo", value.SourceName)
require.Equal(t, "bar", value.DestinationName)
})
}
func TestIntentionGetExact(t *testing.T) { func TestIntentionGetExact(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
@ -828,6 +879,8 @@ func TestParseIntentionStringComponent(t *testing.T) {
cases := []struct { cases := []struct {
TestName string TestName string
Input string Input string
AllowsPeers bool
ExpectedPeer string
ExpectedAP string ExpectedAP string
ExpectedNS string ExpectedNS string
ExpectedName string ExpectedName string
@ -866,20 +919,47 @@ func TestParseIntentionStringComponent(t *testing.T) {
Input: "uhoh/blah/foo/bar", Input: "uhoh/blah/foo/bar",
Err: true, Err: true,
}, },
{
TestName: "peered without namespace",
Input: "peer:peer1/service_name",
AllowsPeers: true,
ExpectedPeer: "peer1",
ExpectedAP: "",
ExpectedNS: "",
ExpectedName: "service_name",
},
{
TestName: "need to specify at least a service",
Input: "peer:peer1",
Err: true,
},
{
TestName: "peered not allowed error",
Input: "peer:peer1/service_name",
AllowsPeers: false,
Err: true,
},
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.TestName, func(t *testing.T) { t.Run(tc.TestName, func(t *testing.T) {
var entMeta acl.EnterpriseMeta var entMeta acl.EnterpriseMeta
ap, ns, name, err := parseIntentionStringComponent(tc.Input, &entMeta) parsed, err := parseIntentionStringComponent(tc.Input, &entMeta, tc.AllowsPeers)
if tc.Err { if tc.Err {
require.Error(t, err) require.Error(t, err)
} else { } else {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tc.ExpectedAP, ap) if tc.AllowsPeers {
assert.Equal(t, tc.ExpectedNS, ns) assert.Equal(t, tc.ExpectedPeer, parsed.peer)
assert.Equal(t, tc.ExpectedName, name) assert.Equal(t, "", parsed.ap)
} else {
assert.Equal(t, tc.ExpectedAP, parsed.ap)
assert.Equal(t, "", parsed.peer)
}
assert.Equal(t, tc.ExpectedNS, parsed.ns)
assert.Equal(t, tc.ExpectedName, parsed.name)
} }
}) })
} }

View File

@ -22,8 +22,8 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
snap.ConnectProxy.WatchedDiscoveryChains = make(map[UpstreamID]context.CancelFunc) snap.ConnectProxy.WatchedDiscoveryChains = make(map[UpstreamID]context.CancelFunc)
snap.ConnectProxy.WatchedUpstreams = make(map[UpstreamID]map[string]context.CancelFunc) snap.ConnectProxy.WatchedUpstreams = make(map[UpstreamID]map[string]context.CancelFunc)
snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes) snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
snap.ConnectProxy.WatchedPeerTrustBundles = make(map[string]context.CancelFunc) snap.ConnectProxy.WatchedUpstreamPeerTrustBundles = make(map[string]context.CancelFunc)
snap.ConnectProxy.PeerTrustBundles = make(map[string]*pbpeering.PeeringTrustBundle) snap.ConnectProxy.UpstreamPeerTrustBundles = make(map[string]*pbpeering.PeeringTrustBundle)
snap.ConnectProxy.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc) snap.ConnectProxy.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes) snap.ConnectProxy.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType) snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
@ -212,7 +212,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
} }
// Check whether a watch for this peer exists to avoid duplicates. // Check whether a watch for this peer exists to avoid duplicates.
if _, ok := snap.ConnectProxy.WatchedPeerTrustBundles[uid.Peer]; !ok { if _, ok := snap.ConnectProxy.WatchedUpstreamPeerTrustBundles[uid.Peer]; !ok {
peerCtx, cancel := context.WithCancel(ctx) peerCtx, cancel := context.WithCancel(ctx)
if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{ if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{
Name: uid.Peer, Name: uid.Peer,
@ -222,7 +222,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
return snap, fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) return snap, fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
} }
snap.ConnectProxy.WatchedPeerTrustBundles[uid.Peer] = cancel snap.ConnectProxy.WatchedUpstreamPeerTrustBundles[uid.Peer] = cancel
} }
continue continue
} }
@ -270,7 +270,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
} }
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix) peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil { if resp.Bundle != nil {
snap.ConnectProxy.PeerTrustBundles[peer] = resp.Bundle snap.ConnectProxy.UpstreamPeerTrustBundles[peer] = resp.Bundle
} }
case u.CorrelationID == peeringTrustBundlesWatchID: case u.CorrelationID == peeringTrustBundlesWatchID:
@ -279,9 +279,9 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
return fmt.Errorf("invalid type for response: %T", u.Result) return fmt.Errorf("invalid type for response: %T", u.Result)
} }
if len(resp.Bundles) > 0 { if len(resp.Bundles) > 0 {
snap.ConnectProxy.PeeringTrustBundles = resp.Bundles snap.ConnectProxy.InboundPeerTrustBundles = resp.Bundles
} }
snap.ConnectProxy.PeeringTrustBundlesSet = true snap.ConnectProxy.InboundPeerTrustBundlesSet = true
case u.CorrelationID == intentionsWatchID: case u.CorrelationID == intentionsWatchID:
resp, ok := u.Result.(*structs.IndexedIntentionMatches) resp, ok := u.Result.(*structs.IndexedIntentionMatches)

View File

@ -238,7 +238,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
}, },
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{}, PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{}, PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{}, UpstreamPeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{}, PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{}, PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{},
}, },
@ -299,7 +299,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
}, },
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{}, PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{}, PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{}, UpstreamPeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{}, PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{}, PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{},
}, },

View File

@ -44,13 +44,13 @@ type ConfigSnapshotUpstreams struct {
// endpoints of an upstream. // endpoints of an upstream.
WatchedUpstreamEndpoints map[UpstreamID]map[string]structs.CheckServiceNodes WatchedUpstreamEndpoints map[UpstreamID]map[string]structs.CheckServiceNodes
// WatchedPeerTrustBundles is a map of (PeerName -> CancelFunc) in order to cancel // WatchedUpstreamPeerTrustBundles is a map of (PeerName -> CancelFunc) in order to cancel
// watches for peer trust bundles any time the list of upstream peers changes. // watches for peer trust bundles any time the list of upstream peers changes.
WatchedPeerTrustBundles map[string]context.CancelFunc WatchedUpstreamPeerTrustBundles map[string]context.CancelFunc
// PeerTrustBundles is a map of (PeerName -> PeeringTrustBundle). // UpstreamPeerTrustBundles is a map of (PeerName -> PeeringTrustBundle).
// It is used to store trust bundles for upstream TLS transport sockets. // It is used to store trust bundles for upstream TLS transport sockets.
PeerTrustBundles map[string]*pbpeering.PeeringTrustBundle UpstreamPeerTrustBundles map[string]*pbpeering.PeeringTrustBundle
// WatchedGateways is a map of UpstreamID -> (map of GatewayKey.String() -> // WatchedGateways is a map of UpstreamID -> (map of GatewayKey.String() ->
// CancelFunc) in order to cancel watches for mesh gateways // CancelFunc) in order to cancel watches for mesh gateways
@ -128,8 +128,8 @@ func gatewayKeyFromString(s string) GatewayKey {
type configSnapshotConnectProxy struct { type configSnapshotConnectProxy struct {
ConfigSnapshotUpstreams ConfigSnapshotUpstreams
PeeringTrustBundlesSet bool InboundPeerTrustBundlesSet bool
PeeringTrustBundles []*pbpeering.PeeringTrustBundle InboundPeerTrustBundles []*pbpeering.PeeringTrustBundle
WatchedServiceChecks map[structs.ServiceID][]structs.CheckType // TODO: missing garbage collection WatchedServiceChecks map[structs.ServiceID][]structs.CheckType // TODO: missing garbage collection
PreparedQueryEndpoints map[UpstreamID]structs.CheckServiceNodes // DEPRECATED:see:WatchedUpstreamEndpoints PreparedQueryEndpoints map[UpstreamID]structs.CheckServiceNodes // DEPRECATED:see:WatchedUpstreamEndpoints
@ -152,8 +152,8 @@ func (c *configSnapshotConnectProxy) isEmpty() bool {
len(c.WatchedDiscoveryChains) == 0 && len(c.WatchedDiscoveryChains) == 0 &&
len(c.WatchedUpstreams) == 0 && len(c.WatchedUpstreams) == 0 &&
len(c.WatchedUpstreamEndpoints) == 0 && len(c.WatchedUpstreamEndpoints) == 0 &&
len(c.WatchedPeerTrustBundles) == 0 && len(c.WatchedUpstreamPeerTrustBundles) == 0 &&
len(c.PeerTrustBundles) == 0 && len(c.UpstreamPeerTrustBundles) == 0 &&
len(c.WatchedGateways) == 0 && len(c.WatchedGateways) == 0 &&
len(c.WatchedGatewayEndpoints) == 0 && len(c.WatchedGatewayEndpoints) == 0 &&
len(c.WatchedServiceChecks) == 0 && len(c.WatchedServiceChecks) == 0 &&
@ -161,7 +161,7 @@ func (c *configSnapshotConnectProxy) isEmpty() bool {
len(c.UpstreamConfig) == 0 && len(c.UpstreamConfig) == 0 &&
len(c.PassthroughUpstreams) == 0 && len(c.PassthroughUpstreams) == 0 &&
len(c.IntentionUpstreams) == 0 && len(c.IntentionUpstreams) == 0 &&
!c.PeeringTrustBundlesSet && !c.InboundPeerTrustBundlesSet &&
!c.MeshConfigSet && !c.MeshConfigSet &&
len(c.PeerUpstreamEndpoints) == 0 && len(c.PeerUpstreamEndpoints) == 0 &&
len(c.PeerUpstreamEndpointsUseHostnames) == 0 len(c.PeerUpstreamEndpointsUseHostnames) == 0
@ -637,7 +637,7 @@ func (s *ConfigSnapshot) Clone() (*ConfigSnapshot, error) {
snap.ConnectProxy.WatchedUpstreams = nil snap.ConnectProxy.WatchedUpstreams = nil
snap.ConnectProxy.WatchedGateways = nil snap.ConnectProxy.WatchedGateways = nil
snap.ConnectProxy.WatchedDiscoveryChains = nil snap.ConnectProxy.WatchedDiscoveryChains = nil
snap.ConnectProxy.WatchedPeerTrustBundles = nil snap.ConnectProxy.WatchedUpstreamPeerTrustBundles = nil
case structs.ServiceKindTerminatingGateway: case structs.ServiceKindTerminatingGateway:
snap.TerminatingGateway.WatchedServices = nil snap.TerminatingGateway.WatchedServices = nil
snap.TerminatingGateway.WatchedIntentions = nil snap.TerminatingGateway.WatchedIntentions = nil
@ -652,7 +652,7 @@ func (s *ConfigSnapshot) Clone() (*ConfigSnapshot, error) {
snap.IngressGateway.WatchedUpstreams = nil snap.IngressGateway.WatchedUpstreams = nil
snap.IngressGateway.WatchedGateways = nil snap.IngressGateway.WatchedGateways = nil
snap.IngressGateway.WatchedDiscoveryChains = nil snap.IngressGateway.WatchedDiscoveryChains = nil
snap.IngressGateway.WatchedPeerTrustBundles = nil snap.IngressGateway.WatchedUpstreamPeerTrustBundles = nil
// only ingress-gateway // only ingress-gateway
snap.IngressGateway.LeafCertWatchCancel = nil snap.IngressGateway.LeafCertWatchCancel = nil
} }
@ -676,7 +676,7 @@ func (s *ConfigSnapshot) Leaf() *structs.IssuedCert {
func (s *ConfigSnapshot) PeeringTrustBundles() []*pbpeering.PeeringTrustBundle { func (s *ConfigSnapshot) PeeringTrustBundles() []*pbpeering.PeeringTrustBundle {
switch s.Kind { switch s.Kind {
case structs.ServiceKindConnectProxy: case structs.ServiceKindConnectProxy:
return s.ConnectProxy.PeeringTrustBundles return s.ConnectProxy.InboundPeerTrustBundles
case structs.ServiceKindMeshGateway: case structs.ServiceKindMeshGateway:
return s.MeshGateway.PeeringTrustBundles return s.MeshGateway.PeeringTrustBundles
default: default:
@ -755,7 +755,7 @@ func (u *ConfigSnapshotUpstreams) PeeredUpstreamIDs() []UpstreamID {
continue continue
} }
if _, ok := u.PeerTrustBundles[uid.Peer]; uid.Peer != "" && !ok { if _, ok := u.UpstreamPeerTrustBundles[uid.Peer]; uid.Peer != "" && !ok {
// The trust bundle for this upstream is not available yet, skip for now. // The trust bundle for this upstream is not available yet, skip for now.
continue continue
} }

View File

@ -2572,15 +2572,15 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Len(t, snap.ConnectProxy.WatchedGateways, 0, "%+v", snap.ConnectProxy.WatchedGateways) require.Len(t, snap.ConnectProxy.WatchedGateways, 0, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 0, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 0, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Contains(t, snap.ConnectProxy.WatchedPeerTrustBundles, "peer-a", "%+v", snap.ConnectProxy.WatchedPeerTrustBundles) require.Contains(t, snap.ConnectProxy.WatchedUpstreamPeerTrustBundles, "peer-a", "%+v", snap.ConnectProxy.WatchedUpstreamPeerTrustBundles)
require.Len(t, snap.ConnectProxy.PeerTrustBundles, 0, "%+v", snap.ConnectProxy.PeerTrustBundles) require.Len(t, snap.ConnectProxy.UpstreamPeerTrustBundles, 0, "%+v", snap.ConnectProxy.UpstreamPeerTrustBundles)
require.Len(t, snap.ConnectProxy.PeerUpstreamEndpoints, 0, "%+v", snap.ConnectProxy.PeerUpstreamEndpoints) require.Len(t, snap.ConnectProxy.PeerUpstreamEndpoints, 0, "%+v", snap.ConnectProxy.PeerUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Len(t, snap.ConnectProxy.PeeringTrustBundles, 0, "%+v", snap.ConnectProxy.PeeringTrustBundles) require.Len(t, snap.ConnectProxy.InboundPeerTrustBundles, 0, "%+v", snap.ConnectProxy.InboundPeerTrustBundles)
require.False(t, snap.ConnectProxy.PeeringTrustBundlesSet) require.False(t, snap.ConnectProxy.InboundPeerTrustBundlesSet)
}, },
}, },
{ {
@ -2655,7 +2655,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
prototest.AssertDeepEqual(t, peerTrustBundles.Bundles, snap.ConnectProxy.PeeringTrustBundles) prototest.AssertDeepEqual(t, peerTrustBundles.Bundles, snap.ConnectProxy.InboundPeerTrustBundles)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 1, "%+v", snap.ConnectProxy.DiscoveryChain) require.Len(t, snap.ConnectProxy.DiscoveryChain, 1, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 1, "%+v", snap.ConnectProxy.WatchedUpstreams) require.Len(t, snap.ConnectProxy.WatchedUpstreams, 1, "%+v", snap.ConnectProxy.WatchedUpstreams)
@ -2663,8 +2663,8 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Len(t, snap.ConnectProxy.WatchedGateways, 1, "%+v", snap.ConnectProxy.WatchedGateways) require.Len(t, snap.ConnectProxy.WatchedGateways, 1, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 1, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 1, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Contains(t, snap.ConnectProxy.WatchedPeerTrustBundles, "peer-a", "%+v", snap.ConnectProxy.WatchedPeerTrustBundles) require.Contains(t, snap.ConnectProxy.WatchedUpstreamPeerTrustBundles, "peer-a", "%+v", snap.ConnectProxy.WatchedUpstreamPeerTrustBundles)
require.Equal(t, peerTrustBundles.Bundles[0], snap.ConnectProxy.PeerTrustBundles["peer-a"], "%+v", snap.ConnectProxy.WatchedPeerTrustBundles) require.Equal(t, peerTrustBundles.Bundles[0], snap.ConnectProxy.UpstreamPeerTrustBundles["peer-a"], "%+v", snap.ConnectProxy.WatchedUpstreamPeerTrustBundles)
require.Len(t, snap.ConnectProxy.PeerUpstreamEndpoints, 1, "%+v", snap.ConnectProxy.PeerUpstreamEndpoints) require.Len(t, snap.ConnectProxy.PeerUpstreamEndpoints, 1, "%+v", snap.ConnectProxy.PeerUpstreamEndpoints)
require.NotNil(t, snap.ConnectProxy.PeerUpstreamEndpoints[extApiUID]) require.NotNil(t, snap.ConnectProxy.PeerUpstreamEndpoints[extApiUID])

View File

@ -24,6 +24,7 @@ import (
"github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/dns"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
) )
@ -140,6 +141,7 @@ type Store interface {
// Apply provides a write-only interface for persisting Peering data. // Apply provides a write-only interface for persisting Peering data.
type Apply interface { type Apply interface {
CheckPeeringUUID(id string) (bool, error)
PeeringWrite(req *pbpeering.PeeringWriteRequest) error PeeringWrite(req *pbpeering.PeeringWriteRequest) error
PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
@ -189,8 +191,16 @@ func (s *Service) GenerateToken(
return nil, err return nil, err
} }
canRetry := true
RETRY_ONCE:
id, err := s.getExistingOrCreateNewPeerID(req.PeerName, req.Partition)
if err != nil {
return nil, err
}
writeReq := pbpeering.PeeringWriteRequest{ writeReq := pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: id,
Name: req.PeerName, Name: req.PeerName,
// TODO(peering): Normalize from ACL token once this endpoint is guarded by ACLs. // TODO(peering): Normalize from ACL token once this endpoint is guarded by ACLs.
Partition: req.PartitionOrDefault(), Partition: req.PartitionOrDefault(),
@ -198,6 +208,15 @@ func (s *Service) GenerateToken(
}, },
} }
if err := s.Backend.Apply().PeeringWrite(&writeReq); err != nil { if err := s.Backend.Apply().PeeringWrite(&writeReq); err != nil {
// There's a possible race where two servers call Generate Token at the
// same time with the same peer name for the first time. They both
// generate an ID and try to insert and only one wins. This detects the
// collision and forces the loser to discard its generated ID and use
// the one from the other server.
if canRetry && strings.Contains(err.Error(), "A peering already exists with the name") {
canRetry = false
goto RETRY_ONCE
}
return nil, fmt.Errorf("failed to write peering: %w", err) return nil, fmt.Errorf("failed to write peering: %w", err)
} }
@ -270,6 +289,11 @@ func (s *Service) Establish(
serverAddrs[i] = addr serverAddrs[i] = addr
} }
id, err := s.getExistingOrCreateNewPeerID(req.PeerName, req.Partition)
if err != nil {
return nil, err
}
// as soon as a peering is written with a list of ServerAddresses that is // as soon as a peering is written with a list of ServerAddresses that is
// non-empty, the leader routine will see the peering and attempt to // non-empty, the leader routine will see the peering and attempt to
// establish a connection with the remote peer. // establish a connection with the remote peer.
@ -278,6 +302,7 @@ func (s *Service) Establish(
// RemotePeerID(PeerID) but at this point the other peer does not. // RemotePeerID(PeerID) but at this point the other peer does not.
writeReq := &pbpeering.PeeringWriteRequest{ writeReq := &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: id,
Name: req.PeerName, Name: req.PeerName,
PeerCAPems: tok.CA, PeerCAPems: tok.CA,
PeerServerAddresses: serverAddrs, PeerServerAddresses: serverAddrs,
@ -368,6 +393,16 @@ func (s *Service) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteR
defer metrics.MeasureSince([]string{"peering", "write"}, time.Now()) defer metrics.MeasureSince([]string{"peering", "write"}, time.Now())
// TODO(peering): ACL check request token // TODO(peering): ACL check request token
if req.Peering == nil {
return nil, fmt.Errorf("missing required peering body")
}
id, err := s.getExistingOrCreateNewPeerID(req.Peering.Name, req.Peering.Partition)
if err != nil {
return nil, err
}
req.Peering.ID = id
// TODO(peering): handle blocking queries // TODO(peering): handle blocking queries
err = s.Backend.Apply().PeeringWrite(req) err = s.Backend.Apply().PeeringWrite(req)
if err != nil { if err != nil {
@ -418,6 +453,7 @@ func (s *Service) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelet
// We only need to include the name and partition for the peering to be identified. // We only need to include the name and partition for the peering to be identified.
// All other data associated with the peering can be discarded because once marked // All other data associated with the peering can be discarded because once marked
// for deletion the peering is effectively gone. // for deletion the peering is effectively gone.
ID: existing.ID,
Name: req.Name, Name: req.Name,
Partition: req.Partition, Partition: req.Partition,
DeletedAt: structs.TimeToProto(time.Now().UTC()), DeletedAt: structs.TimeToProto(time.Now().UTC()),
@ -837,6 +873,26 @@ func getTrustDomain(store Store, logger hclog.Logger) (string, error) {
return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil
} }
func (s *Service) getExistingOrCreateNewPeerID(peerName, partition string) (string, error) {
q := state.Query{
Value: strings.ToLower(peerName),
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(partition),
}
_, peering, err := s.Backend.Store().PeeringRead(nil, q)
if err != nil {
return "", err
}
if peering != nil {
return peering.ID, nil
}
id, err := lib.GenerateUUID(s.Backend.Apply().CheckPeeringUUID)
if err != nil {
return "", err
}
return id, nil
}
func (s *Service) StreamStatus(peer string) (resp StreamStatus, found bool) { func (s *Service) StreamStatus(peer string) (resp StreamStatus, found bool) {
return s.streams.streamStatus(peer) return s.streams.streamStatus(peer)
} }

View File

@ -30,6 +30,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbservice"
"github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/proto/prototest"
@ -224,6 +225,7 @@ func TestPeeringService_Read(t *testing.T) {
// insert peering directly to state store // insert peering directly to state store
p := &pbpeering.Peering{ p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil, PeerCAPems: nil,
@ -279,6 +281,7 @@ func TestPeeringService_Delete(t *testing.T) {
s := newTestServer(t, nil) s := newTestServer(t, nil)
p := &pbpeering.Peering{ p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil, PeerCAPems: nil,
@ -316,6 +319,7 @@ func TestPeeringService_List(t *testing.T) {
// Note that the state store holds reference to the underlying // Note that the state store holds reference to the underlying
// variables; do not modify them after writing. // variables; do not modify them after writing.
foo := &pbpeering.Peering{ foo := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil, PeerCAPems: nil,
@ -324,6 +328,7 @@ func TestPeeringService_List(t *testing.T) {
} }
require.NoError(t, s.Server.FSM().State().PeeringWrite(10, foo)) require.NoError(t, s.Server.FSM().State().PeeringWrite(10, foo))
bar := &pbpeering.Peering{ bar := &pbpeering.Peering{
ID: testUUID(t),
Name: "bar", Name: "bar",
State: pbpeering.PeeringState_ACTIVE, State: pbpeering.PeeringState_ACTIVE,
PeerCAPems: nil, PeerCAPems: nil,
@ -405,6 +410,7 @@ func TestPeeringService_TrustBundleListByService(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, s.Server.FSM().State().PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s.Server.FSM().State().PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testUUID(t),
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
PeerServerName: "test", PeerServerName: "test",
@ -413,6 +419,7 @@ func TestPeeringService_TrustBundleListByService(t *testing.T) {
lastIdx++ lastIdx++
require.NoError(t, s.Server.FSM().State().PeeringWrite(lastIdx, &pbpeering.Peering{ require.NoError(t, s.Server.FSM().State().PeeringWrite(lastIdx, &pbpeering.Peering{
ID: testUUID(t),
Name: "bar", Name: "bar",
State: pbpeering.PeeringState_INITIAL, State: pbpeering.PeeringState_INITIAL,
PeerServerName: "test-bar", PeerServerName: "test-bar",
@ -513,6 +520,7 @@ func Test_StreamHandler_UpsertServices(t *testing.T) {
) )
require.NoError(t, s.Server.FSM().State().PeeringWrite(0, &pbpeering.Peering{ require.NoError(t, s.Server.FSM().State().PeeringWrite(0, &pbpeering.Peering{
ID: testUUID(t),
Name: "my-peer", Name: "my-peer",
})) }))
@ -998,7 +1006,9 @@ func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps {
} }
func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string { func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string {
t.Helper()
err := store.PeeringWrite(index, &pbpeering.Peering{ err := store.PeeringWrite(index, &pbpeering.Peering{
ID: testUUID(t),
Name: name, Name: name,
}) })
require.NoError(t, err) require.NoError(t, err)
@ -1009,3 +1019,9 @@ func setupTestPeering(t *testing.T, store *state.Store, name string, index uint6
return p.ID return p.ID
} }
func testUUID(t *testing.T) string {
v, err := lib.GenerateUUID(nil)
require.NoError(t, err)
return v
}

View File

@ -23,6 +23,7 @@ import (
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbcommon"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbservice"
@ -1030,6 +1031,10 @@ type testApplier struct {
store *state.Store store *state.Store
} }
func (a *testApplier) CheckPeeringUUID(id string) (bool, error) {
panic("not implemented")
}
func (a *testApplier) PeeringWrite(req *pbpeering.PeeringWriteRequest) error { func (a *testApplier) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
panic("not implemented") panic("not implemented")
} }
@ -1216,6 +1221,7 @@ func writeEstablishedPeering(t *testing.T, store *state.Store, idx uint64, peerN
require.NoError(t, err) require.NoError(t, err)
peering := pbpeering.Peering{ peering := pbpeering.Peering{
ID: testUUID(t),
Name: peerName, Name: peerName,
PeerID: remotePeerID, PeerID: remotePeerID,
} }
@ -2169,5 +2175,10 @@ func requireEqualInstances(t *testing.T, expect, got structs.CheckServiceNodes)
require.Equal(t, expect[i].Checks[j].PartitionOrDefault(), got[i].Checks[j].PartitionOrDefault(), "partition mismatch") require.Equal(t, expect[i].Checks[j].PartitionOrDefault(), got[i].Checks[j].PartitionOrDefault(), "partition mismatch")
} }
} }
}
func testUUID(t *testing.T) string {
v, err := lib.GenerateUUID(nil)
require.NoError(t, err)
return v
} }

View File

@ -589,6 +589,7 @@ func (b *testSubscriptionBackend) ensureCARoots(t *testing.T, roots ...*structs.
func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string { func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string {
err := store.PeeringWrite(index, &pbpeering.Peering{ err := store.PeeringWrite(index, &pbpeering.Peering{
ID: testUUID(t),
Name: name, Name: name,
}) })
require.NoError(t, err) require.NoError(t, err)

View File

@ -2239,8 +2239,9 @@ type IndexedCheckServiceNodes struct {
} }
type IndexedNodesWithGateways struct { type IndexedNodesWithGateways struct {
Nodes CheckServiceNodes ImportedNodes CheckServiceNodes
Gateways GatewayServices Nodes CheckServiceNodes
Gateways GatewayServices
QueryMeta QueryMeta
} }
@ -2250,7 +2251,8 @@ type DatacenterIndexedCheckServiceNodes struct {
} }
type IndexedNodeDump struct { type IndexedNodeDump struct {
Dump NodeDump ImportedDump NodeDump
Dump NodeDump
QueryMeta QueryMeta
} }

View File

@ -37,6 +37,8 @@ type ServiceSummary struct {
transparentProxySet bool transparentProxySet bool
ConnectNative bool ConnectNative bool
PeerName string `json:",omitempty"`
acl.EnterpriseMeta acl.EnterpriseMeta
} }
@ -117,7 +119,18 @@ RPC:
if out.Dump == nil { if out.Dump == nil {
out.Dump = make(structs.NodeDump, 0) out.Dump = make(structs.NodeDump, 0)
} }
return out.Dump, nil
// Use empty list instead of nil
for _, info := range out.ImportedDump {
if info.Services == nil {
info.Services = make([]*structs.NodeService, 0)
}
if info.Checks == nil {
info.Checks = make([]*structs.HealthCheck, 0)
}
}
return append(out.Dump, out.ImportedDump...), nil
} }
// UINodeInfo is used to get info on a single node in a given datacenter. We return a // UINodeInfo is used to get info on a single node in a given datacenter. We return a
@ -139,6 +152,10 @@ func (s *HTTPHandlers) UINodeInfo(resp http.ResponseWriter, req *http.Request) (
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"}
} }
if peer := req.URL.Query().Get("peer"); peer != "" {
args.PeerName = peer
}
// Make the RPC request // Make the RPC request
var out structs.IndexedNodeDump var out structs.IndexedNodeDump
defer setMeta(resp, &out.QueryMeta) defer setMeta(resp, &out.QueryMeta)
@ -216,15 +233,17 @@ RPC:
// Store the names of the gateways associated with each service // Store the names of the gateways associated with each service
var ( var (
serviceGateways = make(map[structs.ServiceName][]structs.ServiceName) serviceGateways = make(map[structs.PeeredServiceName][]structs.PeeredServiceName)
numLinkedServices = make(map[structs.ServiceName]int) numLinkedServices = make(map[structs.PeeredServiceName]int)
) )
for _, gs := range out.Gateways { for _, gs := range out.Gateways {
serviceGateways[gs.Service] = append(serviceGateways[gs.Service], gs.Gateway) psn := structs.PeeredServiceName{Peer: structs.DefaultPeerKeyword, ServiceName: gs.Service}
numLinkedServices[gs.Gateway] += 1 gpsn := structs.PeeredServiceName{Peer: structs.DefaultPeerKeyword, ServiceName: gs.Gateway}
serviceGateways[psn] = append(serviceGateways[psn], gpsn)
numLinkedServices[gpsn] += 1
} }
summaries, hasProxy := summarizeServices(out.Nodes.ToServiceDump(), nil, "") summaries, hasProxy := summarizeServices(append(out.Nodes, out.ImportedNodes...).ToServiceDump(), nil, "")
sorted := prepSummaryOutput(summaries, false) sorted := prepSummaryOutput(summaries, false)
// Ensure at least a zero length slice // Ensure at least a zero length slice
@ -233,17 +252,18 @@ RPC:
sum := ServiceListingSummary{ServiceSummary: *svc} sum := ServiceListingSummary{ServiceSummary: *svc}
sn := structs.NewServiceName(svc.Name, &svc.EnterpriseMeta) sn := structs.NewServiceName(svc.Name, &svc.EnterpriseMeta)
if hasProxy[sn] { psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: sn}
if hasProxy[psn] {
sum.ConnectedWithProxy = true sum.ConnectedWithProxy = true
} }
// Verify that at least one of the gateways linked by config entry has an instance registered in the catalog // Verify that at least one of the gateways linked by config entry has an instance registered in the catalog
for _, gw := range serviceGateways[sn] { for _, gw := range serviceGateways[psn] {
if s := summaries[gw]; s != nil && sum.InstanceCount > 0 { if s := summaries[gw]; s != nil && sum.InstanceCount > 0 {
sum.ConnectedWithGateway = true sum.ConnectedWithGateway = true
} }
} }
sum.GatewayConfig.AssociatedServiceCount = numLinkedServices[sn] sum.GatewayConfig.AssociatedServiceCount = numLinkedServices[psn]
result = append(result, &sum) result = append(result, &sum)
} }
@ -389,31 +409,43 @@ RPC:
return topo, nil return topo, nil
} }
func summarizeServices(dump structs.ServiceDump, cfg *config.RuntimeConfig, dc string) (map[structs.ServiceName]*ServiceSummary, map[structs.ServiceName]bool) { func summarizeServices(dump structs.ServiceDump, cfg *config.RuntimeConfig, dc string) (map[structs.PeeredServiceName]*ServiceSummary, map[structs.PeeredServiceName]bool) {
var ( var (
summary = make(map[structs.ServiceName]*ServiceSummary) summary = make(map[structs.PeeredServiceName]*ServiceSummary)
hasProxy = make(map[structs.ServiceName]bool) hasProxy = make(map[structs.PeeredServiceName]bool)
) )
getService := func(service structs.ServiceName) *ServiceSummary { getService := func(psn structs.PeeredServiceName) *ServiceSummary {
serv, ok := summary[service] serv, ok := summary[psn]
if !ok { if !ok {
serv = &ServiceSummary{ serv = &ServiceSummary{
Name: service.Name, Name: psn.ServiceName.Name,
EnterpriseMeta: service.EnterpriseMeta, EnterpriseMeta: psn.ServiceName.EnterpriseMeta,
// the other code will increment this unconditionally so we // the other code will increment this unconditionally so we
// shouldn't initialize it to 1 // shouldn't initialize it to 1
InstanceCount: 0, InstanceCount: 0,
PeerName: psn.Peer,
} }
summary[service] = serv summary[psn] = serv
} }
return serv return serv
} }
for _, csn := range dump { for _, csn := range dump {
var peerName string
// all entities will have the same peer name so it is safe to use the node's peer name
if csn.Node == nil {
// this can happen for gateway dumps that call this summarize func
peerName = structs.DefaultPeerKeyword
} else {
peerName = csn.Node.PeerName
}
if cfg != nil && csn.GatewayService != nil { if cfg != nil && csn.GatewayService != nil {
gwsvc := csn.GatewayService gwsvc := csn.GatewayService
sum := getService(gwsvc.Service)
psn := structs.PeeredServiceName{Peer: peerName, ServiceName: gwsvc.Service}
sum := getService(psn)
modifySummaryForGatewayService(cfg, dc, sum, gwsvc) modifySummaryForGatewayService(cfg, dc, sum, gwsvc)
} }
@ -421,8 +453,10 @@ func summarizeServices(dump structs.ServiceDump, cfg *config.RuntimeConfig, dc s
if csn.Service == nil { if csn.Service == nil {
continue continue
} }
sn := structs.NewServiceName(csn.Service.Service, &csn.Service.EnterpriseMeta) sn := structs.NewServiceName(csn.Service.Service, &csn.Service.EnterpriseMeta)
sum := getService(sn) psn := structs.PeeredServiceName{Peer: peerName, ServiceName: sn}
sum := getService(psn)
svc := csn.Service svc := csn.Service
sum.Nodes = append(sum.Nodes, csn.Node.Node) sum.Nodes = append(sum.Nodes, csn.Node.Node)
@ -432,9 +466,10 @@ func summarizeServices(dump structs.ServiceDump, cfg *config.RuntimeConfig, dc s
sum.ConnectNative = svc.Connect.Native sum.ConnectNative = svc.Connect.Native
if svc.Kind == structs.ServiceKindConnectProxy { if svc.Kind == structs.ServiceKindConnectProxy {
sn := structs.NewServiceName(svc.Proxy.DestinationServiceName, &svc.EnterpriseMeta) sn := structs.NewServiceName(svc.Proxy.DestinationServiceName, &svc.EnterpriseMeta)
hasProxy[sn] = true psn := structs.PeeredServiceName{Peer: peerName, ServiceName: sn}
hasProxy[psn] = true
destination := getService(sn) destination := getService(psn)
for _, check := range csn.Checks { for _, check := range csn.Checks {
cid := structs.NewCheckID(check.CheckID, &check.EnterpriseMeta) cid := structs.NewCheckID(check.CheckID, &check.EnterpriseMeta)
uid := structs.UniqueID(csn.Node.Node, cid.String()) uid := structs.UniqueID(csn.Node.Node, cid.String())
@ -496,7 +531,7 @@ func summarizeServices(dump structs.ServiceDump, cfg *config.RuntimeConfig, dc s
return summary, hasProxy return summary, hasProxy
} }
func prepSummaryOutput(summaries map[structs.ServiceName]*ServiceSummary, excludeSidecars bool) []*ServiceSummary { func prepSummaryOutput(summaries map[structs.PeeredServiceName]*ServiceSummary, excludeSidecars bool) []*ServiceSummary {
var resp []*ServiceSummary var resp []*ServiceSummary
// Ensure at least a zero length slice // Ensure at least a zero length slice
resp = make([]*ServiceSummary, 0) resp = make([]*ServiceSummary, 0)

View File

@ -2,6 +2,7 @@ package agent
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -11,6 +12,7 @@ import (
"path/filepath" "path/filepath"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp" cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -19,12 +21,14 @@ import (
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
) )
func TestUiIndex(t *testing.T) { func TestUIIndex(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -74,7 +78,7 @@ func TestUiIndex(t *testing.T) {
} }
} }
func TestUiNodes(t *testing.T) { func TestUINodes(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -84,15 +88,42 @@ func TestUiNodes(t *testing.T) {
defer a.Shutdown() defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1") testrpc.WaitForTestAgent(t, a.RPC, "dc1")
args := &structs.RegisterRequest{ args := []*structs.RegisterRequest{
Datacenter: "dc1", {
Node: "test", Datacenter: "dc1",
Address: "127.0.0.1", Node: "test",
Address: "127.0.0.1",
},
{
Datacenter: "dc1",
Node: "foo-peer",
Address: "127.0.0.3",
PeerName: "peer1",
},
} }
var out struct{} for _, reg := range args {
if err := a.RPC("Catalog.Register", args, &out); err != nil { var out struct{}
t.Fatalf("err: %v", err) err := a.RPC("Catalog.Register", reg, &out)
require.NoError(t, err)
}
// establish "peer1"
{
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
peerOne := &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
Name: "peer1",
State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil,
PeerServerName: "fooservername",
PeerServerAddresses: []string{"addr1"},
},
}
_, err := a.rpcClientPeering.PeeringWrite(ctx, peerOne)
require.NoError(t, err)
} }
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1", nil) req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1", nil)
@ -103,20 +134,32 @@ func TestUiNodes(t *testing.T) {
} }
assertIndex(t, resp) assertIndex(t, resp)
// Should be 2 nodes, and all the empty lists should be non-nil // Should be 3 nodes, and all the empty lists should be non-nil
nodes := obj.(structs.NodeDump) nodes := obj.(structs.NodeDump)
if len(nodes) != 2 || require.Len(t, nodes, 3)
nodes[0].Node != a.Config.NodeName ||
nodes[0].Services == nil || len(nodes[0].Services) != 1 || // check local nodes, services and checks
nodes[0].Checks == nil || len(nodes[0].Checks) != 1 || require.Equal(t, a.Config.NodeName, nodes[0].Node)
nodes[1].Node != "test" || require.NotNil(t, nodes[0].Services)
nodes[1].Services == nil || len(nodes[1].Services) != 0 || require.Len(t, nodes[0].Services, 1)
nodes[1].Checks == nil || len(nodes[1].Checks) != 0 { require.NotNil(t, nodes[0].Checks)
t.Fatalf("bad: %v", obj) require.Len(t, nodes[0].Checks, 1)
} require.Equal(t, "test", nodes[1].Node)
require.NotNil(t, nodes[1].Services)
require.Len(t, nodes[1].Services, 0)
require.NotNil(t, nodes[1].Checks)
require.Len(t, nodes[1].Checks, 0)
// peered node
require.Equal(t, "foo-peer", nodes[2].Node)
require.Equal(t, "peer1", nodes[2].PeerName)
require.NotNil(t, nodes[2].Services)
require.Len(t, nodes[2].Services, 0)
require.NotNil(t, nodes[1].Checks)
require.Len(t, nodes[2].Services, 0)
} }
func TestUiNodes_Filter(t *testing.T) { func TestUINodes_Filter(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -162,7 +205,7 @@ func TestUiNodes_Filter(t *testing.T) {
require.Empty(t, nodes[0].Checks) require.Empty(t, nodes[0].Checks)
} }
func TestUiNodeInfo(t *testing.T) { func TestUINodeInfo(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -214,7 +257,7 @@ func TestUiNodeInfo(t *testing.T) {
} }
} }
func TestUiServices(t *testing.T) { func TestUIServices(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -318,6 +361,30 @@ func TestUiServices(t *testing.T) {
Tags: []string{}, Tags: []string{},
}, },
}, },
// register peer node foo with peer service
{
Datacenter: "dc1",
Node: "foo",
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
Address: "127.0.0.2",
TaggedAddresses: map[string]string{
"lan": "127.0.0.2",
"wan": "198.18.0.2",
},
NodeMeta: map[string]string{
"env": "production",
"os": "linux",
},
PeerName: "peer1",
Service: &structs.NodeService{
Kind: structs.ServiceKindTypical,
ID: "serviceID",
Service: "service",
Port: 1235,
Address: "198.18.1.2",
PeerName: "peer1",
},
},
} }
for _, args := range requests { for _, args := range requests {
@ -325,6 +392,24 @@ func TestUiServices(t *testing.T) {
require.NoError(t, a.RPC("Catalog.Register", args, &out)) require.NoError(t, a.RPC("Catalog.Register", args, &out))
} }
// establish "peer1"
{
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
peerOne := &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
Name: "peer1",
State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil,
PeerServerName: "fooservername",
PeerServerAddresses: []string{"addr1"},
},
}
_, err := a.rpcClientPeering.PeeringWrite(ctx, peerOne)
require.NoError(t, err)
}
// Register a terminating gateway associated with api and cache // Register a terminating gateway associated with api and cache
{ {
arg := structs.RegisterRequest{ arg := structs.RegisterRequest{
@ -393,7 +478,7 @@ func TestUiServices(t *testing.T) {
// Should be 2 nodes, and all the empty lists should be non-nil // Should be 2 nodes, and all the empty lists should be non-nil
summary := obj.([]*ServiceListingSummary) summary := obj.([]*ServiceListingSummary)
require.Len(t, summary, 6) require.Len(t, summary, 7)
// internal accounting that users don't see can be blown away // internal accounting that users don't see can be blown away
for _, sum := range summary { for _, sum := range summary {
@ -493,6 +578,21 @@ func TestUiServices(t *testing.T) {
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}, },
}, },
{
ServiceSummary: ServiceSummary{
Kind: structs.ServiceKindTypical,
Name: "service",
Datacenter: "dc1",
Tags: nil,
Nodes: []string{"foo"},
InstanceCount: 1,
ChecksPassing: 0,
ChecksWarning: 0,
ChecksCritical: 0,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
PeerName: "peer1",
},
},
} }
require.ElementsMatch(t, expected, summary) require.ElementsMatch(t, expected, summary)
}) })

View File

@ -465,9 +465,9 @@ func (s *ResourceGenerator) makeDestinationClusters(cfgSnap *proxycfg.ConfigSnap
cluster := s.makeDynamicForwardProxyCluster(cfgSnap, opts) cluster := s.makeDynamicForwardProxyCluster(cfgSnap, opts)
// TODO (dans): might be relevant later for TLS addons like CA validation // TODO (dans): might be relevant later for TLS addons like CA validation
//if err := s.injectGatewayServiceAddons(cfgSnap, cluster, svc, loadBalancer); err != nil { // if err := s.injectGatewayServiceAddons(cfgSnap, cluster, svc, loadBalancer); err != nil {
// return nil, err // return nil, err
//} // }
clusters = append(clusters, cluster) clusters = append(clusters, cluster)
} }
return clusters, nil return clusters, nil
@ -695,7 +695,7 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
rootPEMs := cfgSnap.RootPEMs() rootPEMs := cfgSnap.RootPEMs()
if uid.Peer != "" { if uid.Peer != "" {
rootPEMs = cfgSnap.ConnectProxy.PeerTrustBundles[uid.Peer].ConcatenatedRootPEMs() rootPEMs = cfgSnap.ConnectProxy.UpstreamPeerTrustBundles[uid.Peer].ConcatenatedRootPEMs()
} }
// Enable TLS upstream with the configured client certificate. // Enable TLS upstream with the configured client certificate.
@ -999,7 +999,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
rootPEMs := cfgSnap.RootPEMs() rootPEMs := cfgSnap.RootPEMs()
if uid.Peer != "" { if uid.Peer != "" {
rootPEMs = cfgSnap.ConnectProxy.PeerTrustBundles[uid.Peer].ConcatenatedRootPEMs() rootPEMs = cfgSnap.ConnectProxy.UpstreamPeerTrustBundles[uid.Peer].ConcatenatedRootPEMs()
} }
commonTLSContext := makeCommonTLSContext( commonTLSContext := makeCommonTLSContext(
cfgSnap.Leaf(), cfgSnap.Leaf(),

View File

@ -697,7 +697,8 @@ func (s *ResourceGenerator) injectConnectFilters(cfgSnap *proxycfg.ConfigSnapsho
authzFilter, err := makeRBACNetworkFilter( authzFilter, err := makeRBACNetworkFilter(
cfgSnap.ConnectProxy.Intentions, cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow, cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles, cfgSnap.Roots.TrustDomain,
cfgSnap.ConnectProxy.InboundPeerTrustBundles,
) )
if err != nil { if err != nil {
return err return err
@ -952,7 +953,8 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
httpAuthzFilter, err := makeRBACHTTPFilter( httpAuthzFilter, err := makeRBACHTTPFilter(
cfgSnap.ConnectProxy.Intentions, cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow, cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles, cfgSnap.Roots.TrustDomain,
cfgSnap.ConnectProxy.InboundPeerTrustBundles,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -1009,7 +1011,8 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter( filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
cfgSnap.ConnectProxy.Intentions, cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow, cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles, cfgSnap.Roots.TrustDomain,
cfgSnap.ConnectProxy.InboundPeerTrustBundles,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -1307,6 +1310,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
authFilter, err := makeRBACNetworkFilter( authFilter, err := makeRBACNetworkFilter(
intentions, intentions,
cfgSnap.IntentionDefaultAllow, cfgSnap.IntentionDefaultAllow,
cfgSnap.Roots.TrustDomain,
nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway
) )
if err != nil { if err != nil {
@ -1344,6 +1348,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
opts.httpAuthzFilter, err = makeRBACHTTPFilter( opts.httpAuthzFilter, err = makeRBACHTTPFilter(
intentions, intentions,
cfgSnap.IntentionDefaultAllow, cfgSnap.IntentionDefaultAllow,
cfgSnap.Roots.TrustDomain,
nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway
) )
if err != nil { if err != nil {

View File

@ -21,9 +21,10 @@ import (
func makeRBACNetworkFilter( func makeRBACNetworkFilter(
intentions structs.Intentions, intentions structs.Intentions,
intentionDefaultAllow bool, intentionDefaultAllow bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle, trustDomain string,
peerTrustBundles []*pbpeering.PeeringTrustBundle,
) (*envoy_listener_v3.Filter, error) { ) (*envoy_listener_v3.Filter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, false, peerTrustBundles) rules, err := makeRBACRules(intentions, intentionDefaultAllow, trustDomain, false, peerTrustBundles)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -38,9 +39,10 @@ func makeRBACNetworkFilter(
func makeRBACHTTPFilter( func makeRBACHTTPFilter(
intentions structs.Intentions, intentions structs.Intentions,
intentionDefaultAllow bool, intentionDefaultAllow bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle, trustDomain string,
peerTrustBundles []*pbpeering.PeeringTrustBundle,
) (*envoy_http_v3.HttpFilter, error) { ) (*envoy_http_v3.HttpFilter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, true, peerTrustBundles) rules, err := makeRBACRules(intentions, intentionDefaultAllow, trustDomain, true, peerTrustBundles)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -53,6 +55,7 @@ func makeRBACHTTPFilter(
func intentionListToIntermediateRBACForm( func intentionListToIntermediateRBACForm(
intentions structs.Intentions, intentions structs.Intentions,
trustDomain string,
isHTTP bool, isHTTP bool,
trustBundlesByPeer map[string]*pbpeering.PeeringTrustBundle, trustBundlesByPeer map[string]*pbpeering.PeeringTrustBundle,
) []*rbacIntention { ) []*rbacIntention {
@ -72,7 +75,7 @@ func intentionListToIntermediateRBACForm(
continue continue
} }
rixn := intentionToIntermediateRBACForm(ixn, isHTTP, trustBundle) rixn := intentionToIntermediateRBACForm(ixn, trustDomain, isHTTP, trustBundle)
rbacIxns = append(rbacIxns, rixn) rbacIxns = append(rbacIxns, rixn)
} }
return rbacIxns return rbacIxns
@ -210,11 +213,12 @@ func removePermissionPrecedence(perms []*rbacPermission, intentionDefaultAction
return out return out
} }
func intentionToIntermediateRBACForm(ixn *structs.Intention, isHTTP bool, bundle *pbpeering.PeeringTrustBundle) *rbacIntention { func intentionToIntermediateRBACForm(ixn *structs.Intention, trustDomain string, isHTTP bool, bundle *pbpeering.PeeringTrustBundle) *rbacIntention {
rixn := &rbacIntention{ rixn := &rbacIntention{
Source: rbacService{ Source: rbacService{
ServiceName: ixn.SourceServiceName(), ServiceName: ixn.SourceServiceName(),
Peer: ixn.SourcePeer, Peer: ixn.SourcePeer,
TrustDomain: trustDomain,
}, },
Precedence: ixn.Precedence, Precedence: ixn.Precedence,
} }
@ -426,25 +430,21 @@ func simplifyNotSourceSlice(notSources []rbacService) []rbacService {
func makeRBACRules( func makeRBACRules(
intentions structs.Intentions, intentions structs.Intentions,
intentionDefaultAllow bool, intentionDefaultAllow bool,
trustDomain string,
isHTTP bool, isHTTP bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle, peerTrustBundles []*pbpeering.PeeringTrustBundle,
) (*envoy_rbac_v3.RBAC, error) { ) (*envoy_rbac_v3.RBAC, error) {
// Note that we DON'T explicitly validate the trust-domain matches ours.
//
// For now we don't validate the trust domain of the _destination_ at all.
// The RBAC policies below ignore the trust domain and it's implicit that
// the request is for the correct cluster. We might want to reconsider this
// later but plumbing in additional machinery to check the clusterID here
// is not really necessary for now unless the Envoys are badly configured.
// Our threat model _requires_ correctly configured and well behaved
// proxies given that they have ACLs to fetch certs and so can do whatever
// they want including not authorizing traffic at all or routing it do a
// different service than they auth'd against.
// TODO(banks,rb): Implement revocation list checking? // TODO(banks,rb): Implement revocation list checking?
// TODO(peering): mkeeler asked that these maps come from proxycfg instead of
// being constructed in xds to save memory allocation and gc pressure. Low priority.
trustBundlesByPeer := make(map[string]*pbpeering.PeeringTrustBundle, len(peerTrustBundles))
for _, ptb := range peerTrustBundles {
trustBundlesByPeer[ptb.PeerName] = ptb
}
// First build up just the basic principal matches. // First build up just the basic principal matches.
rbacIxns := intentionListToIntermediateRBACForm(intentions, isHTTP, peerTrustBundles) rbacIxns := intentionListToIntermediateRBACForm(intentions, trustDomain, isHTTP, trustBundlesByPeer)
// Normalize: if we are in default-deny then all intentions must be allows and vice versa // Normalize: if we are in default-deny then all intentions must be allows and vice versa
intentionDefaultAction := intentionActionFromBool(intentionDefaultAllow) intentionDefaultAction := intentionActionFromBool(intentionDefaultAllow)
@ -641,7 +641,7 @@ const anyPath = `[^/]+`
func makeSpiffePattern(src rbacService) string { func makeSpiffePattern(src rbacService) string {
var ( var (
host = anyPath // TODO(peering): We match trust domain on any value but should be defaulting to the local trust domain host = src.TrustDomain
ap = src.PartitionOrDefault() ap = src.PartitionOrDefault()
ns = src.NamespaceOrDefault() ns = src.NamespaceOrDefault()
svc = src.Name svc = src.Name

View File

@ -58,10 +58,13 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
ExportedPartition: "part1", ExportedPartition: "part1",
}, },
} }
testTrustDomain := "test.consul"
var ( var (
nameWild = rbacService{ServiceName: structs.NewServiceName("*", nil)} nameWild = rbacService{ServiceName: structs.NewServiceName("*", nil),
nameWeb = rbacService{ServiceName: structs.NewServiceName("web", nil)} TrustDomain: testTrustDomain}
nameWeb = rbacService{ServiceName: structs.NewServiceName("web", nil),
TrustDomain: testTrustDomain}
nameWildPeered = rbacService{ServiceName: structs.NewServiceName("*", nil), nameWildPeered = rbacService{ServiceName: structs.NewServiceName("*", nil),
Peer: "peer1", TrustDomain: "peer1.domain", ExportedPartition: "part1"} Peer: "peer1", TrustDomain: "peer1.domain", ExportedPartition: "part1"}
nameWebPeered = rbacService{ServiceName: structs.NewServiceName("web", nil), nameWebPeered = rbacService{ServiceName: structs.NewServiceName("web", nil),
@ -439,7 +442,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
for name, tt := range tests { for name, tt := range tests {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
rbacIxns := intentionListToIntermediateRBACForm(tt.intentions, tt.http, testPeerTrustBundle) rbacIxns := intentionListToIntermediateRBACForm(tt.intentions, testTrustDomain, tt.http, testPeerTrustBundle)
intentionDefaultAction := intentionActionFromBool(tt.intentionDefaultAllow) intentionDefaultAction := intentionActionFromBool(tt.intentionDefaultAllow)
rbacIxns = removeIntentionPrecedence(rbacIxns, intentionDefaultAction) rbacIxns = removeIntentionPrecedence(rbacIxns, intentionDefaultAction)
@ -472,13 +475,14 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
ixn.Permissions = perms ixn.Permissions = perms
return ixn return ixn
} }
testPeerTrustBundle := map[string]*pbpeering.PeeringTrustBundle{ testPeerTrustBundle := []*pbpeering.PeeringTrustBundle{
"peer1": { {
PeerName: "peer1", PeerName: "peer1",
TrustDomain: "peer1.domain", TrustDomain: "peer1.domain",
ExportedPartition: "part1", ExportedPartition: "part1",
}, },
} }
testTrustDomain := "test.consul"
sorted := func(ixns ...*structs.Intention) structs.Intentions { sorted := func(ixns ...*structs.Intention) structs.Intentions {
sort.SliceStable(ixns, func(i, j int) bool { sort.SliceStable(ixns, func(i, j int) bool {
return ixns[j].Precedence < ixns[i].Precedence return ixns[j].Precedence < ixns[i].Precedence
@ -797,7 +801,7 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
tt := tt tt := tt
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
t.Run("network filter", func(t *testing.T) { t.Run("network filter", func(t *testing.T) {
filter, err := makeRBACNetworkFilter(tt.intentions, tt.intentionDefaultAllow, testPeerTrustBundle) filter, err := makeRBACNetworkFilter(tt.intentions, tt.intentionDefaultAllow, testTrustDomain, testPeerTrustBundle)
require.NoError(t, err) require.NoError(t, err)
t.Run("current", func(t *testing.T) { t.Run("current", func(t *testing.T) {
@ -807,7 +811,7 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
}) })
}) })
t.Run("http filter", func(t *testing.T) { t.Run("http filter", func(t *testing.T) {
filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow, testPeerTrustBundle) filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow, testTrustDomain, testPeerTrustBundle)
require.NoError(t, err) require.NoError(t, err)
t.Run("current", func(t *testing.T) { t.Run("current", func(t *testing.T) {

View File

@ -22,7 +22,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -35,7 +35,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -34,7 +34,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -47,7 +47,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -22,7 +22,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -35,7 +35,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -64,7 +64,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -34,7 +34,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -47,7 +47,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }
@ -31,7 +31,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -46,7 +46,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -59,7 +59,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -73,7 +73,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/unsafe$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/unsafe$"
} }
} }
} }
@ -87,7 +87,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }
@ -31,7 +31,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -46,7 +46,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -59,7 +59,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -73,7 +73,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/unsafe$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/unsafe$"
} }
} }
} }
@ -87,7 +87,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -23,7 +23,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }

View File

@ -227,7 +227,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -45,7 +45,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -19,7 +19,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -21,7 +21,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -34,7 +34,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -21,7 +21,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -34,7 +34,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -22,7 +22,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }
@ -30,7 +30,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -45,7 +45,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -58,7 +58,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -72,7 +72,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/unsafe$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/unsafe$"
} }
} }
} }
@ -86,7 +86,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }
@ -30,7 +30,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -45,7 +45,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }
@ -58,7 +58,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }
@ -72,7 +72,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/unsafe$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/unsafe$"
} }
} }
} }
@ -86,7 +86,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/cron$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/cron$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -22,7 +22,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }

View File

@ -18,7 +18,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/[^/]+$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/[^/]+$"
} }
} }
} }

View File

@ -226,7 +226,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -46,7 +46,7 @@
"googleRe2": { "googleRe2": {
}, },
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$" "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$"
} }
} }
} }

View File

@ -637,6 +637,31 @@ func (c *BootstrapConfig) generateListenerConfig(args *BootstrapTplArgs, bindAdd
] ]
} }
}` }`
// Enable TLS on the prometheus listener if cert/private key are provided.
var tlsConfig string
if args.PrometheusCertFile != "" {
tlsConfig = `,
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsCertificateSdsSecretConfigs": [
{
"name": "prometheus_cert"
}
],
"validationContextSdsSecretConfig": {
"trustedCa": {
"name": "prometheus_validation_context"
}
}
}
}
}`
}
listenerJSON := `{ listenerJSON := `{
"name": "` + name + `_listener", "name": "` + name + `_listener",
"address": { "address": {
@ -694,11 +719,43 @@ func (c *BootstrapConfig) generateListenerConfig(args *BootstrapTplArgs, bindAdd
] ]
} }
} }
] ]` + tlsConfig + `
} }
] ]
}` }`
secretsTemplate := `{
"name": "prometheus_cert",
"tlsCertificate": {
"certificateChain": {
"filename": "%s"
},
"privateKey": {
"filename": "%s"
}
}
},
{
"name": "prometheus_validation_context",
"validationContext": {
%s
}
}`
var validationContext string
if args.PrometheusCAPath != "" {
validationContext = fmt.Sprintf(`"watchedDirectory": {
"path": "%s"
}`, args.PrometheusCAPath)
} else {
validationContext = fmt.Sprintf(`"trustedCa": {
"filename": "%s"
}`, args.PrometheusCAFile)
}
var secretsJSON string
if args.PrometheusCertFile != "" {
secretsJSON = fmt.Sprintf(secretsTemplate, args.PrometheusCertFile, args.PrometheusKeyFile, validationContext)
}
// Make sure we do not append the same cluster multiple times, as that will // Make sure we do not append the same cluster multiple times, as that will
// cause envoy startup to fail. // cause envoy startup to fail.
selfAdminClusterExists, err := containsSelfAdminCluster(args.StaticClustersJSON) selfAdminClusterExists, err := containsSelfAdminCluster(args.StaticClustersJSON)
@ -716,6 +773,12 @@ func (c *BootstrapConfig) generateListenerConfig(args *BootstrapTplArgs, bindAdd
listenerJSON = ",\n" + listenerJSON listenerJSON = ",\n" + listenerJSON
} }
args.StaticListenersJSON += listenerJSON args.StaticListenersJSON += listenerJSON
if args.StaticSecretsJSON != "" {
secretsJSON = ",\n" + secretsJSON
}
args.StaticSecretsJSON += secretsJSON
return nil return nil
} }

View File

@ -273,6 +273,126 @@ const (
} }
] ]
}` }`
expectedPromListenerWithBackendAndTLS = `{
"name": "envoy_prometheus_metrics_listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9000
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy_prometheus_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"path": "/metrics"
},
"route": {
"cluster": "prometheus_backend",
"prefix_rewrite": "/stats/prometheus"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
]
}
}
],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsCertificateSdsSecretConfigs": [
{
"name": "prometheus_cert"
}
],
"validationContextSdsSecretConfig": {
"trustedCa": {
"name": "prometheus_validation_context"
}
}
}
}
}
}
]
}`
expectedPromSecretsWithBackendAndTLS = `{
"name": "prometheus_cert",
"tlsCertificate": {
"certificateChain": {
"filename": "test-cert-file"
},
"privateKey": {
"filename": "test-key-file"
}
}
},
{
"name": "prometheus_validation_context",
"validationContext": {
"trustedCa": {
"filename": "test-ca-file"
}
}
}`
expectedPromSecretsWithBackendAndTLSCAPath = `{
"name": "prometheus_cert",
"tlsCertificate": {
"certificateChain": {
"filename": "test-cert-file"
},
"privateKey": {
"filename": "test-key-file"
}
}
},
{
"name": "prometheus_validation_context",
"validationContext": {
"watchedDirectory": {
"path": "test-ca-directory"
}
}
}`
expectedStatsListener = `{ expectedStatsListener = `{
"name": "envoy_metrics_listener", "name": "envoy_metrics_listener",
"address": { "address": {
@ -760,6 +880,68 @@ func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
}, },
wantErr: false, wantErr: false,
}, },
{
name: "prometheus-bind-addr-with-backend-and-tls",
input: BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/metrics",
PrometheusCAFile: "test-ca-file",
PrometheusCertFile: "test-cert-file",
PrometheusKeyFile: "test-key-file",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should use the "prometheus_backend" cluster instead, which
// uses the PrometheusBackendPort rather than Envoy admin port
StaticClustersJSON: expectedPrometheusBackendCluster,
StaticListenersJSON: expectedPromListenerWithBackendAndTLS,
StaticSecretsJSON: expectedPromSecretsWithBackendAndTLS,
StatsConfigJSON: defaultStatsConfigJSON,
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/metrics",
PrometheusCAFile: "test-ca-file",
PrometheusCertFile: "test-cert-file",
PrometheusKeyFile: "test-key-file",
},
wantErr: false,
},
{
name: "prometheus-bind-addr-with-backend-and-tls-ca-path",
input: BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/metrics",
PrometheusCAPath: "test-ca-directory",
PrometheusCertFile: "test-cert-file",
PrometheusKeyFile: "test-key-file",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should use the "prometheus_backend" cluster instead, which
// uses the PrometheusBackendPort rather than Envoy admin port
StaticClustersJSON: expectedPrometheusBackendCluster,
StaticListenersJSON: expectedPromListenerWithBackendAndTLS,
StaticSecretsJSON: expectedPromSecretsWithBackendAndTLSCAPath,
StatsConfigJSON: defaultStatsConfigJSON,
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/metrics",
PrometheusCAPath: "test-ca-directory",
PrometheusCertFile: "test-cert-file",
PrometheusKeyFile: "test-key-file",
},
wantErr: false,
},
{ {
name: "stats-bind-addr", name: "stats-bind-addr",
input: BootstrapConfig{ input: BootstrapConfig{

View File

@ -76,6 +76,10 @@ type BootstrapTplArgs struct {
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#envoy-api-msg-config-metrics-v2-statsconfig. // https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#envoy-api-msg-config-metrics-v2-statsconfig.
StatsConfigJSON string StatsConfigJSON string
// StaticSecretsJSON is a JSON string containing zero or more Secret definitions.
// See https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/secret.proto#envoy-v3-api-msg-extensions-transport-sockets-tls-v3-secret
StaticSecretsJSON string
// StatsFlushInterval is the time duration between Envoy stats flushes. It is // StatsFlushInterval is the time duration between Envoy stats flushes. It is
// in proto3 "duration" string format for example "1.12s" See // in proto3 "duration" string format for example "1.12s" See
// https://developers.google.com/protocol-buffers/docs/proto3#json and // https://developers.google.com/protocol-buffers/docs/proto3#json and
@ -106,6 +110,11 @@ type BootstrapTplArgs struct {
// PrometheusScrapePath will configure the path where metrics are exposed on // PrometheusScrapePath will configure the path where metrics are exposed on
// the envoy_prometheus_bind_addr listener. // the envoy_prometheus_bind_addr listener.
PrometheusScrapePath string PrometheusScrapePath string
PrometheusCAFile string
PrometheusCAPath string
PrometheusCertFile string
PrometheusKeyFile string
} }
// GRPC settings used in the bootstrap template. // GRPC settings used in the bootstrap template.
@ -209,6 +218,12 @@ const bootstrapTemplate = `{
{{ .StaticListenersJSON }} {{ .StaticListenersJSON }}
] ]
{{- end }} {{- end }}
{{- if .StaticSecretsJSON -}}
,
"secrets": [
{{ .StaticSecretsJSON }}
]
{{- end }}
}, },
{{- if .StatsSinksJSON }} {{- if .StatsSinksJSON }}
"stats_sinks": {{ .StatsSinksJSON }}, "stats_sinks": {{ .StatsSinksJSON }},

View File

@ -52,6 +52,10 @@ type cmd struct {
envoyVersion string envoyVersion string
prometheusBackendPort string prometheusBackendPort string
prometheusScrapePath string prometheusScrapePath string
prometheusCAFile string
prometheusCAPath string
prometheusCertFile string
prometheusKeyFile string
// mesh gateway registration information // mesh gateway registration information
register bool register bool
@ -174,6 +178,19 @@ func (c *cmd) init() {
"0.0.0.0:20200/scrape-metrics. "+ "0.0.0.0:20200/scrape-metrics. "+
"Only applicable when envoy_prometheus_bind_addr is set in proxy config.") "Only applicable when envoy_prometheus_bind_addr is set in proxy config.")
c.flags.StringVar(&c.prometheusCAFile, "prometheus-ca-file", "",
"Path to a CA file for Envoy to use when serving TLS on the Prometheus metrics endpoint. "+
"Only applicable when envoy_prometheus_bind_addr is set in proxy config.")
c.flags.StringVar(&c.prometheusCAPath, "prometheus-ca-path", "",
"Path to a directory of CA certificates for Envoy to use when serving the Prometheus metrics endpoint. "+
"Only applicable when envoy_prometheus_bind_addr is set in proxy config.")
c.flags.StringVar(&c.prometheusCertFile, "prometheus-cert-file", "",
"Path to a certificate file for Envoy to use when serving TLS on the Prometheus metrics endpoint. "+
"Only applicable when envoy_prometheus_bind_addr is set in proxy config.")
c.flags.StringVar(&c.prometheusKeyFile, "prometheus-key-file", "",
"Path to a private key file for Envoy to use when serving TLS on the Prometheus metrics endpoint. "+
"Only applicable when envoy_prometheus_bind_addr is set in proxy config.")
c.http = &flags.HTTPFlags{} c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.MultiTenancyFlags()) flags.Merge(c.flags, c.http.MultiTenancyFlags())
@ -306,6 +323,15 @@ func (c *cmd) run(args []string) int {
return 1 return 1
} }
// If any of CA/Cert/Key are specified, make sure they are all present.
if c.prometheusKeyFile != "" || c.prometheusCertFile != "" || (c.prometheusCAFile != "" || c.prometheusCAPath != "") {
if c.prometheusKeyFile == "" || c.prometheusCertFile == "" || (c.prometheusCAFile == "" && c.prometheusCAPath == "") {
c.UI.Error("Must provide a CA (-prometheus-ca-file or -prometheus-ca-path) as well as " +
"-prometheus-cert-file and -prometheus-key-file to enable TLS for prometheus metrics")
return 1
}
}
if c.register { if c.register {
if c.nodeName != "" { if c.nodeName != "" {
c.UI.Error("'-register' cannot be used with '-node-name'") c.UI.Error("'-register' cannot be used with '-node-name'")
@ -505,6 +531,10 @@ func (c *cmd) templateArgs() (*BootstrapTplArgs, error) {
Datacenter: httpCfg.Datacenter, Datacenter: httpCfg.Datacenter,
PrometheusBackendPort: c.prometheusBackendPort, PrometheusBackendPort: c.prometheusBackendPort,
PrometheusScrapePath: c.prometheusScrapePath, PrometheusScrapePath: c.prometheusScrapePath,
PrometheusCAFile: c.prometheusCAFile,
PrometheusCAPath: c.prometheusCAPath,
PrometheusCertFile: c.prometheusCertFile,
PrometheusKeyFile: c.prometheusKeyFile,
}, nil }, nil
} }

View File

@ -211,6 +211,72 @@ func TestGenerateConfig(t *testing.T) {
PrometheusScrapePath: "/scrape-path", PrometheusScrapePath: "/scrape-path",
}, },
}, },
{
Name: "prometheus-metrics-tls-ca-file",
Flags: []string{"-proxy-id", "test-proxy",
"-prometheus-backend-port", "20100", "-prometheus-scrape-path", "/scrape-path",
"-prometheus-ca-file", "../../../test/key/ourdomain.cer", "-prometheus-cert-file", "../../../test/key/ourdomain_server.cer",
"-prometheus-key-file", "../../../test/key/ourdomain_server.key"},
ProxyConfig: map[string]interface{}{
// When envoy_prometheus_bind_addr is set, if
// PrometheusBackendPort is set, there will be a
// "prometheus_backend" cluster in the Envoy configuration.
"envoy_prometheus_bind_addr": "0.0.0.0:9000",
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
// We don't know this til after the lookup so it will be empty in the
// initial args call we are testing here.
ProxySourceService: "",
GRPC: GRPC{
AgentAddress: "127.0.0.1",
AgentPort: "8502", // Note this is the gRPC port
},
AdminAccessLogPath: "/dev/null",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/scrape-path",
PrometheusCAFile: "../../../test/key/ourdomain.cer",
PrometheusCertFile: "../../../test/key/ourdomain_server.cer",
PrometheusKeyFile: "../../../test/key/ourdomain_server.key",
},
},
{
Name: "prometheus-metrics-tls-ca-path",
Flags: []string{"-proxy-id", "test-proxy",
"-prometheus-backend-port", "20100", "-prometheus-scrape-path", "/scrape-path",
"-prometheus-ca-path", "../../../test/ca_path", "-prometheus-cert-file", "../../../test/key/ourdomain_server.cer",
"-prometheus-key-file", "../../../test/key/ourdomain_server.key"},
ProxyConfig: map[string]interface{}{
// When envoy_prometheus_bind_addr is set, if
// PrometheusBackendPort is set, there will be a
// "prometheus_backend" cluster in the Envoy configuration.
"envoy_prometheus_bind_addr": "0.0.0.0:9000",
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
// We don't know this til after the lookup so it will be empty in the
// initial args call we are testing here.
ProxySourceService: "",
GRPC: GRPC{
AgentAddress: "127.0.0.1",
AgentPort: "8502", // Note this is the gRPC port
},
AdminAccessLogPath: "/dev/null",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
PrometheusBackendPort: "20100",
PrometheusScrapePath: "/scrape-path",
PrometheusCAPath: "../../../test/ca_path",
PrometheusCertFile: "../../../test/key/ourdomain_server.cer",
PrometheusKeyFile: "../../../test/key/ourdomain_server.key",
},
},
{ {
Name: "token-arg", Name: "token-arg",
Flags: []string{"-proxy-id", "test-proxy", Flags: []string{"-proxy-id", "test-proxy",

View File

@ -0,0 +1,320 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test",
"id": "test-proxy",
"metadata": {
"namespace": "default",
"partition": "default"
}
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"ignore_health_on_host_removal": false,
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"loadAssignment": {
"clusterName": "local_agent",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
}
}
]
}
]
}
},
{
"name": "prometheus_backend",
"ignore_health_on_host_removal": false,
"connect_timeout": "5s",
"type": "STATIC",
"http_protocol_options": {},
"loadAssignment": {
"clusterName": "prometheus_backend",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 20100
}
}
}
}
]
}
]
}
}
],
"listeners": [
{
"name": "envoy_prometheus_metrics_listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9000
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy_prometheus_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"path": "/scrape-path"
},
"route": {
"cluster": "prometheus_backend",
"prefix_rewrite": "/stats/prometheus"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
]
}
}
],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsCertificateSdsSecretConfigs": [
{
"name": "prometheus_cert"
}
],
"validationContextSdsSecretConfig": {
"trustedCa": {
"name": "prometheus_validation_context"
}
}
}
}
}
}
]
}
],
"secrets": [
{
"name": "prometheus_cert",
"tlsCertificate": {
"certificateChain": {
"filename": "../../../test/key/ourdomain_server.cer"
},
"privateKey": {
"filename": "../../../test/key/ourdomain_server.key"
}
}
},
{
"name": "prometheus_validation_context",
"validationContext": {
"trustedCa": {
"filename": "../../../test/key/ourdomain.cer"
}
}
}
]
},
"stats_config": {
"stats_tags": [
{
"regex": "^cluster\\.(?:passthrough~)?((?:([^.]+)~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.custom_hash"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.service_subset"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.service"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.namespace"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.partition"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.datacenter"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.routing_type"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.([^.]+)\\.consul\\.)",
"tag_name": "consul.destination.trust_domain"
},
{
"regex": "^cluster\\.(?:passthrough~)?(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.target"
},
{
"regex": "^cluster\\.(?:passthrough~)?(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+)\\.consul\\.)",
"tag_name": "consul.destination.full_target"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.(([^.]+)(?:\\.[^.]+)?(?:\\.[^.]+)?\\.[^.]+\\.)",
"tag_name": "consul.upstream.service"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.[^.]+)?(?:\\.[^.]+)?\\.([^.]+)\\.)",
"tag_name": "consul.upstream.datacenter"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.([^.]+))?(?:\\.[^.]+)?\\.[^.]+\\.)",
"tag_name": "consul.upstream.namespace"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.[^.]+)?(?:\\.([^.]+))?\\.[^.]+\\.)",
"tag_name": "consul.upstream.partition"
},
{
"regex": "^cluster\\.((?:([^.]+)~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.custom_hash"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.service_subset"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.service"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.namespace"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.datacenter"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.[^.]+\\.consul\\.)",
"tag_name": "consul.routing_type"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.([^.]+)\\.consul\\.)",
"tag_name": "consul.trust_domain"
},
{
"regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.target"
},
{
"regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+)\\.consul\\.)",
"tag_name": "consul.full_target"
},
{
"tag_name": "local_cluster",
"fixed_value": "test"
},
{
"tag_name": "consul.source.service",
"fixed_value": "test"
},
{
"tag_name": "consul.source.namespace",
"fixed_value": "default"
},
{
"tag_name": "consul.source.partition",
"fixed_value": "default"
},
{
"tag_name": "consul.source.datacenter",
"fixed_value": "dc1"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": {
"ads": {},
"resource_api_version": "V3"
},
"cds_config": {
"ads": {},
"resource_api_version": "V3"
},
"ads_config": {
"api_type": "DELTA_GRPC",
"transport_api_version": "V3",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

View File

@ -0,0 +1,320 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test",
"id": "test-proxy",
"metadata": {
"namespace": "default",
"partition": "default"
}
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"ignore_health_on_host_removal": false,
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"loadAssignment": {
"clusterName": "local_agent",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
}
}
]
}
]
}
},
{
"name": "prometheus_backend",
"ignore_health_on_host_removal": false,
"connect_timeout": "5s",
"type": "STATIC",
"http_protocol_options": {},
"loadAssignment": {
"clusterName": "prometheus_backend",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 20100
}
}
}
}
]
}
]
}
}
],
"listeners": [
{
"name": "envoy_prometheus_metrics_listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9000
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy_prometheus_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"path": "/scrape-path"
},
"route": {
"cluster": "prometheus_backend",
"prefix_rewrite": "/stats/prometheus"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
]
}
}
],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsCertificateSdsSecretConfigs": [
{
"name": "prometheus_cert"
}
],
"validationContextSdsSecretConfig": {
"trustedCa": {
"name": "prometheus_validation_context"
}
}
}
}
}
}
]
}
],
"secrets": [
{
"name": "prometheus_cert",
"tlsCertificate": {
"certificateChain": {
"filename": "../../../test/key/ourdomain_server.cer"
},
"privateKey": {
"filename": "../../../test/key/ourdomain_server.key"
}
}
},
{
"name": "prometheus_validation_context",
"validationContext": {
"watchedDirectory": {
"path": "../../../test/ca_path"
}
}
}
]
},
"stats_config": {
"stats_tags": [
{
"regex": "^cluster\\.(?:passthrough~)?((?:([^.]+)~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.custom_hash"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.service_subset"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.service"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.namespace"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.partition"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.datacenter"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.routing_type"
},
{
"regex": "^cluster\\.(?:passthrough~)?((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.([^.]+)\\.consul\\.)",
"tag_name": "consul.destination.trust_domain"
},
{
"regex": "^cluster\\.(?:passthrough~)?(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.destination.target"
},
{
"regex": "^cluster\\.(?:passthrough~)?(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+)\\.consul\\.)",
"tag_name": "consul.destination.full_target"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.(([^.]+)(?:\\.[^.]+)?(?:\\.[^.]+)?\\.[^.]+\\.)",
"tag_name": "consul.upstream.service"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.[^.]+)?(?:\\.[^.]+)?\\.([^.]+)\\.)",
"tag_name": "consul.upstream.datacenter"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.([^.]+))?(?:\\.[^.]+)?\\.[^.]+\\.)",
"tag_name": "consul.upstream.namespace"
},
{
"regex": "^(?:tcp|http)\\.upstream\\.([^.]+(?:\\.[^.]+)?(?:\\.([^.]+))?\\.[^.]+\\.)",
"tag_name": "consul.upstream.partition"
},
{
"regex": "^cluster\\.((?:([^.]+)~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.custom_hash"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.service_subset"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.service"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.namespace"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.datacenter"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.[^.]+\\.consul\\.)",
"tag_name": "consul.routing_type"
},
{
"regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.([^.]+)\\.consul\\.)",
"tag_name": "consul.trust_domain"
},
{
"regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)",
"tag_name": "consul.target"
},
{
"regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+)\\.consul\\.)",
"tag_name": "consul.full_target"
},
{
"tag_name": "local_cluster",
"fixed_value": "test"
},
{
"tag_name": "consul.source.service",
"fixed_value": "test"
},
{
"tag_name": "consul.source.namespace",
"fixed_value": "default"
},
{
"tag_name": "consul.source.partition",
"fixed_value": "default"
},
{
"tag_name": "consul.source.datacenter",
"fixed_value": "dc1"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": {
"ads": {},
"resource_api_version": "V3"
},
"cds_config": {
"ads": {},
"resource_api_version": "V3"
},
"ads_config": {
"api_type": "DELTA_GRPC",
"transport_api_version": "V3",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

View File

@ -7,6 +7,7 @@ require (
github.com/hashicorp/consul/api v1.11.0 github.com/hashicorp/consul/api v1.11.0
github.com/hashicorp/consul/sdk v0.8.0 github.com/hashicorp/consul/sdk v0.8.0
github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/hcl v1.0.0
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/testcontainers/testcontainers-go v0.13.0 github.com/testcontainers/testcontainers-go v0.13.0
) )
@ -14,11 +15,11 @@ require (
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.23 // indirect github.com/Microsoft/hcsshim v0.8.24 // indirect
github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-metrics v0.3.10 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.5.9 // indirect github.com/containerd/containerd v1.5.13 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect

View File

@ -55,8 +55,9 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim v0.8.24 h1:jP+GMeRXIR1sH1kG4lJr9ShmSjVrua5jmFZDtfYGkn4=
github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -130,8 +131,9 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
@ -150,8 +152,9 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE=
github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@ -424,6 +427,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
@ -699,6 +703,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
@ -715,6 +720,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -761,6 +767,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -797,6 +804,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
@ -886,6 +894,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -946,6 +955,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -9,6 +9,7 @@ import (
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/hcl"
"github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait" "github.com/testcontainers/testcontainers-go/wait"
@ -48,7 +49,6 @@ func newConsulContainerWithReq(ctx context.Context, req testcontainers.Container
// NewConsulContainer starts a Consul node in a container with the given config. // NewConsulContainer starts a Consul node in a container with the given config.
func NewConsulContainer(ctx context.Context, config Config) (Node, error) { func NewConsulContainer(ctx context.Context, config Config) (Node, error) {
license, err := readLicense() license, err := readLicense()
if err != nil { if err != nil {
return nil, err return nil, err
@ -64,30 +64,29 @@ func NewConsulContainer(ctx context.Context, config Config) (Node, error) {
return nil, err return nil, err
} }
pc, err := readSomeConfigFileFields(config.HCL)
if err != nil {
return nil, err
}
configFile, err := createConfigFile(config.HCL) configFile, err := createConfigFile(config.HCL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
skipReaper := isRYUKDisabled()
req := testcontainers.ContainerRequest{ req := newContainerRequest(config, name, configFile, tmpDirData, license)
Image: consulImage + ":" + config.Version,
ExposedPorts: []string{"8500/tcp"},
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: name,
Mounts: testcontainers.ContainerMounts{
testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: configFile}, Target: "/consul/config/config.hcl"},
testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: tmpDirData}, Target: "/consul/data"},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": license},
}
container, err := newConsulContainerWithReq(ctx, req) container, err := newConsulContainerWithReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&NodeLogConsumer{
Prefix: pc.NodeName,
})
localIP, err := container.Host(ctx) localIP, err := container.Host(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -104,21 +103,42 @@ func NewConsulContainer(ctx context.Context, config Config) (Node, error) {
} }
uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port()) uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port())
c := new(consulContainerNode)
c.config = config
c.container = container
c.ip = ip
c.port = mappedPort.Int()
apiConfig := api.DefaultConfig() apiConfig := api.DefaultConfig()
apiConfig.Address = uri apiConfig.Address = uri
c.client, err = api.NewClient(apiConfig) apiClient, err := api.NewClient(apiConfig)
c.ctx = ctx
c.req = req
c.dataDir = tmpDirData
if err != nil { if err != nil {
return nil, err return nil, err
} }
return c, nil
return &consulContainerNode{
config: config,
container: container,
ip: ip,
port: mappedPort.Int(),
client: apiClient,
ctx: ctx,
req: req,
dataDir: tmpDirData,
}, nil
}
func newContainerRequest(config Config, name, configFile, dataDir, license string) testcontainers.ContainerRequest {
skipReaper := isRYUKDisabled()
return testcontainers.ContainerRequest{
Image: consulImage + ":" + config.Version,
ExposedPorts: []string{"8500/tcp"},
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: name,
Mounts: []testcontainers.ContainerMount{
{Source: testcontainers.DockerBindMountSource{HostPath: configFile}, Target: "/consul/config/config.hcl"},
{Source: testcontainers.DockerBindMountSource{HostPath: dataDir}, Target: "/consul/data"},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": license},
}
} }
// GetClient returns an API client that can be used to communicate with the Node. // GetClient returns an API client that can be used to communicate with the Node.
@ -132,25 +152,44 @@ func (c *consulContainerNode) GetAddr() (string, int) {
} }
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error { func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error {
pc, err := readSomeConfigFileFields(config.HCL)
if err != nil {
return err
}
file, err := createConfigFile(config.HCL) file, err := createConfigFile(config.HCL)
if err != nil { if err != nil {
return err return err
} }
c.req.Cmd = config.Cmd
c.req.Mounts = testcontainers.ContainerMounts{ req2 := newContainerRequest(
testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: file}, Target: "/consul/config/config.hcl"}, config,
testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: c.dataDir}, Target: "/consul/data"}, c.req.Name,
} file,
c.req.Image = consulImage + ":" + config.Version c.dataDir,
err = c.container.Terminate(ctx) "",
if err != nil { )
req2.Env = c.req.Env // copy license
_ = c.container.StopLogProducer()
if err := c.container.Terminate(ctx); err != nil {
return err return err
} }
c.req = req2
container, err := newConsulContainerWithReq(ctx, c.req) container, err := newConsulContainerWithReq(ctx, c.req)
if err != nil { if err != nil {
return err return err
} }
if err := container.StartLogProducer(ctx); err != nil {
return err
}
container.FollowOutput(&NodeLogConsumer{
Prefix: pc.NodeName,
})
c.container = container c.container = container
localIP, err := container.Host(ctx) localIP, err := container.Host(ctx)
@ -185,7 +224,19 @@ func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error
// Terminate attempts to terminate the container. On failure, an error will be // Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup. // returned and the reaper process (RYUK) will handle cleanup.
func (c *consulContainerNode) Terminate() error { func (c *consulContainerNode) Terminate() error {
return c.container.Terminate(c.ctx) if c.container == nil {
return nil
}
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
c.container = nil
return err
} }
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled // isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
@ -236,3 +287,15 @@ func createConfigFile(HCL string) (string, error) {
} }
return configFile, nil return configFile, nil
} }
type parsedConfig struct {
NodeName string `hcl:"node_name"`
}
func readSomeConfigFileFields(HCL string) (parsedConfig, error) {
var pc parsedConfig
if err := hcl.Decode(&pc, HCL); err != nil {
return pc, fmt.Errorf("Failed to parse config file: %w", err)
}
return pc, nil
}

View File

@ -0,0 +1,23 @@
package node
import (
"fmt"
"os"
"github.com/testcontainers/testcontainers-go"
)
type NodeLogConsumer struct {
Prefix string
}
var _ testcontainers.LogConsumer = (*NodeLogConsumer)(nil)
func (c *NodeLogConsumer) Accept(log testcontainers.Log) {
switch log.LogType {
case "STDOUT":
fmt.Fprint(os.Stdout, c.Prefix+" ~~ "+string(log.Content))
case "STDERR":
fmt.Fprint(os.Stderr, c.Prefix+" ~~ "+string(log.Content))
}
}

View File

@ -2,19 +2,18 @@ package node
import ( import (
"context" "context"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
) )
type ( // Node represent a Consul node abstraction
// Node represent a Consul node abstraction type Node interface {
Node interface { Terminate() error
Terminate() error GetClient() *api.Client
GetClient() *api.Client GetAddr() (string, int)
GetAddr() (string, int) GetConfig() Config
GetConfig() Config Upgrade(ctx context.Context, config Config) error
Upgrade(ctx context.Context, config Config) error }
}
)
// Config is a set of configurations required to create a Node // Config is a set of configurations required to create a Node
type Config struct { type Config struct {

Some files were not shown because too many files have changed in this diff Show More