2023-03-28 22:48:58 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
package testrpc
|
|
|
|
|
|
|
|
import (
|
2022-12-14 15:24:22 +00:00
|
|
|
"context"
|
2017-04-19 23:00:11 +00:00
|
|
|
"testing"
|
|
|
|
|
2021-12-08 17:35:36 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2017-04-19 23:00:11 +00:00
|
|
|
)
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
type rpcFn func(context.Context, string, interface{}, interface{}) error
|
2017-04-19 23:00:11 +00:00
|
|
|
|
2022-08-31 16:58:41 +00:00
|
|
|
// WaitForLeader ensures we have a leader and a node registration. It
|
|
|
|
// does not wait for the Consul (node) service to be ready. Use `WaitForTestAgent`
|
|
|
|
// to make sure the Consul service is ready.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
//
|
|
|
|
// Most uses of this would be better served in the agent/consul package by
|
|
|
|
// using waitForLeaderEstablishment() instead.
|
2020-05-29 21:16:03 +00:00
|
|
|
func WaitForLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) {
|
2019-12-06 19:01:34 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
flat := flattenOptions(options)
|
|
|
|
if flat.WaitForAntiEntropySync {
|
|
|
|
t.Fatalf("WaitForLeader doesn't accept the WaitForAntiEntropySync option")
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
var out structs.IndexedNodes
|
2017-05-05 11:48:34 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-05-29 21:16:03 +00:00
|
|
|
args := &structs.DCSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: flat.Token},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "Catalog.ListNodes", args, &out); err != nil {
|
2017-05-05 11:48:34 +00:00
|
|
|
r.Fatalf("Catalog.ListNodes failed: %v", err)
|
2017-04-19 23:00:11 +00:00
|
|
|
}
|
|
|
|
if !out.QueryMeta.KnownLeader {
|
2017-05-05 11:48:34 +00:00
|
|
|
r.Fatalf("No leader")
|
2017-04-19 23:00:11 +00:00
|
|
|
}
|
2018-08-06 23:46:09 +00:00
|
|
|
if out.Index < 2 {
|
2020-01-17 22:27:13 +00:00
|
|
|
r.Fatalf("Consul index should be at least 2 in %s", dc)
|
2017-04-19 23:00:11 +00:00
|
|
|
}
|
2017-05-05 11:48:34 +00:00
|
|
|
})
|
2017-04-19 23:00:11 +00:00
|
|
|
}
|
2018-08-10 19:04:07 +00:00
|
|
|
|
2018-08-23 16:06:39 +00:00
|
|
|
// WaitUntilNoLeader ensures no leader is present, useful for testing lost leadership.
|
2020-05-29 21:16:03 +00:00
|
|
|
func WaitUntilNoLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) {
|
2019-12-06 19:01:34 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
flat := flattenOptions(options)
|
|
|
|
if flat.WaitForAntiEntropySync {
|
|
|
|
t.Fatalf("WaitUntilNoLeader doesn't accept the WaitForAntiEntropySync option")
|
|
|
|
}
|
|
|
|
|
2018-08-23 16:06:39 +00:00
|
|
|
var out structs.IndexedNodes
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-05-29 21:16:03 +00:00
|
|
|
args := &structs.DCSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: flat.Token},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "Catalog.ListNodes", args, &out); err == nil {
|
2018-08-28 16:37:34 +00:00
|
|
|
r.Fatalf("It still has a leader: %#v", out)
|
2018-08-23 16:06:39 +00:00
|
|
|
}
|
|
|
|
if out.QueryMeta.KnownLeader {
|
|
|
|
r.Fatalf("Has still a leader")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-01-25 16:01:21 +00:00
|
|
|
type waitOption struct {
|
2019-10-04 18:37:34 +00:00
|
|
|
Token string
|
|
|
|
WaitForAntiEntropySync bool
|
2019-01-25 16:01:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func WithToken(token string) waitOption {
|
|
|
|
return waitOption{Token: token}
|
|
|
|
}
|
|
|
|
|
2019-10-04 18:37:34 +00:00
|
|
|
func WaitForAntiEntropySync() waitOption {
|
|
|
|
return waitOption{WaitForAntiEntropySync: true}
|
|
|
|
}
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
func flattenOptions(options []waitOption) waitOption {
|
|
|
|
var flat waitOption
|
2019-01-25 16:01:21 +00:00
|
|
|
for _, opt := range options {
|
|
|
|
if opt.Token != "" {
|
2020-05-29 21:16:03 +00:00
|
|
|
flat.Token = opt.Token
|
2019-01-25 16:01:21 +00:00
|
|
|
}
|
2019-10-04 18:37:34 +00:00
|
|
|
if opt.WaitForAntiEntropySync {
|
2020-05-29 21:16:03 +00:00
|
|
|
flat.WaitForAntiEntropySync = true
|
2019-10-04 18:37:34 +00:00
|
|
|
}
|
2019-01-25 16:01:21 +00:00
|
|
|
}
|
2020-05-29 21:16:03 +00:00
|
|
|
return flat
|
|
|
|
}
|
|
|
|
|
2022-08-31 16:58:41 +00:00
|
|
|
// WaitForTestAgent ensures we have a node with serfHealth check registered.
|
|
|
|
// You'll want to use this if you expect the Consul (node) service to be ready.
|
2020-05-29 21:16:03 +00:00
|
|
|
func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
flat := flattenOptions(options)
|
|
|
|
|
|
|
|
var nodes structs.IndexedNodes
|
|
|
|
var checks structs.IndexedHealthChecks
|
2019-01-25 16:01:21 +00:00
|
|
|
|
2018-08-10 19:04:07 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-01-25 16:01:21 +00:00
|
|
|
dcReq := &structs.DCSpecificRequest{
|
|
|
|
Datacenter: dc,
|
2020-05-29 21:16:03 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: flat.Token},
|
2019-01-25 16:01:21 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "Catalog.ListNodes", dcReq, &nodes); err != nil {
|
2018-08-10 19:04:07 +00:00
|
|
|
r.Fatalf("Catalog.ListNodes failed: %v", err)
|
|
|
|
}
|
|
|
|
if len(nodes.Nodes) == 0 {
|
|
|
|
r.Fatalf("No registered nodes")
|
|
|
|
}
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
if flat.WaitForAntiEntropySync {
|
2019-10-04 18:37:34 +00:00
|
|
|
if len(nodes.Nodes[0].TaggedAddresses) == 0 {
|
|
|
|
r.Fatalf("Not synced via anti entropy yet")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 19:04:07 +00:00
|
|
|
// This assumes that there is a single agent per dc, typically a TestAgent
|
2019-01-25 16:01:21 +00:00
|
|
|
nodeReq := &structs.NodeSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
Node: nodes.Nodes[0].Node,
|
2020-05-29 21:16:03 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: flat.Token},
|
2019-01-25 16:01:21 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "Health.NodeChecks", nodeReq, &checks); err != nil {
|
2018-08-10 19:04:07 +00:00
|
|
|
r.Fatalf("Health.NodeChecks failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var found bool
|
|
|
|
for _, check := range checks.HealthChecks {
|
|
|
|
if check.CheckID == "serfHealth" {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
r.Fatalf("serfHealth check not found")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-11-11 20:57:16 +00:00
|
|
|
|
|
|
|
// WaitForActiveCARoot polls until the server returns an active Connect root CA
|
|
|
|
// with the same ID field as expect. If expect is nil, it just waits until _any_
|
|
|
|
// active root is returned. This is useful because initializing CA happens after
|
|
|
|
// raft leadership is gained so WaitForLeader isn't sufficient to be sure that
|
|
|
|
// the CA is fully initialized.
|
|
|
|
func WaitForActiveCARoot(t *testing.T, rpc rpcFn, dc string, expect *structs.CARoot) {
|
2021-11-25 21:33:48 +00:00
|
|
|
t.Helper()
|
2019-11-11 20:57:16 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
args := &structs.DCSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
}
|
|
|
|
var reply structs.IndexedCARoots
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "ConnectCA.Roots", args, &reply); err != nil {
|
2019-11-11 20:57:16 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:35:36 +00:00
|
|
|
root := reply.Active()
|
2019-11-11 20:57:16 +00:00
|
|
|
if root == nil {
|
|
|
|
r.Fatal("no active root")
|
|
|
|
}
|
|
|
|
if expect != nil && root.ID != expect.ID {
|
|
|
|
r.Fatalf("current active root is %s; waiting for %s", root.ID, expect.ID)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-12-06 14:25:26 +00:00
|
|
|
|
2020-10-07 19:20:25 +00:00
|
|
|
// WaitForServiceIntentions waits until the server can accept config entry
|
|
|
|
// kinds of service-intentions meaning any migration bootstrapping from pre-1.9
|
|
|
|
// intentions has completed.
|
|
|
|
func WaitForServiceIntentions(t *testing.T, rpc rpcFn, dc string) {
|
|
|
|
const fakeConfigName = "Sa4ohw5raith4si0Ohwuqu3lowiethoh"
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
args := &structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryDelete,
|
|
|
|
Datacenter: dc,
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: fakeConfigName,
|
|
|
|
},
|
|
|
|
}
|
2021-11-01 16:42:01 +00:00
|
|
|
var ignored structs.ConfigEntryDeleteResponse
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := rpc(context.Background(), "ConfigEntry.Delete", args, &ignored); err != nil {
|
2020-10-07 19:20:25 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-12-06 14:25:26 +00:00
|
|
|
func WaitForACLReplication(t *testing.T, rpc rpcFn, dc string, expectedReplicationType structs.ACLReplicationType, minPolicyIndex, minTokenIndex, minRoleIndex uint64) {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
args := structs.DCSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
}
|
|
|
|
var reply structs.ACLReplicationStatus
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(r, rpc(context.Background(), "ACL.ReplicationStatus", &args, &reply))
|
2019-12-06 14:25:26 +00:00
|
|
|
|
|
|
|
require.Equal(r, expectedReplicationType, reply.ReplicationType)
|
|
|
|
require.True(r, reply.Running, "Server not running new replicator yet")
|
|
|
|
require.True(r, reply.ReplicatedIndex >= minPolicyIndex, "Server hasn't replicated enough policies")
|
|
|
|
require.True(r, reply.ReplicatedTokenIndex >= minTokenIndex, "Server hasn't replicated enough tokens")
|
|
|
|
require.True(r, reply.ReplicatedRoleIndex >= minRoleIndex, "Server hasn't replicated enough roles")
|
|
|
|
})
|
|
|
|
}
|