2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2021-02-25 23:11:21 +00:00
|
|
|
package health
|
|
|
|
|
|
|
|
import (
|
2021-04-26 16:20:33 +00:00
|
|
|
"context"
|
2020-09-18 22:25:56 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-11-20 15:23:35 +00:00
|
|
|
"strings"
|
2020-09-18 22:25:56 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
"github.com/google/go-cmp/cmp"
|
2020-09-18 22:25:56 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2020-11-20 15:23:35 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2020-09-18 22:25:56 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-09-18 22:33:02 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2021-04-26 16:20:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/submatview"
|
2023-02-17 21:14:46 +00:00
|
|
|
"github.com/hashicorp/consul/proto/private/pbcommon"
|
|
|
|
"github.com/hashicorp/consul/proto/private/pbservice"
|
|
|
|
"github.com/hashicorp/consul/proto/private/pbsubscribe"
|
|
|
|
"github.com/hashicorp/consul/proto/private/prototest"
|
2022-05-10 20:25:51 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2020-11-20 15:23:35 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2020-09-18 22:25:56 +00:00
|
|
|
)
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
func TestSortCheckServiceNodes_OrderIsConsistentWithRPCResponse(t *testing.T) {
|
|
|
|
index := uint64(42)
|
|
|
|
buildTestNode := func(nodeName string, serviceID string) structs.CheckServiceNode {
|
|
|
|
newID, err := uuid.GenerateUUID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
return structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
ID: types.NodeID(strings.ToUpper(newID)),
|
|
|
|
Node: nodeName,
|
|
|
|
Address: nodeName,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
RaftIndex: structs.RaftIndex{
|
|
|
|
CreateIndex: index,
|
|
|
|
ModifyIndex: index,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: serviceID,
|
|
|
|
Service: "testService",
|
|
|
|
Port: 8080,
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
|
|
|
RaftIndex: structs.RaftIndex{
|
|
|
|
CreateIndex: index,
|
|
|
|
ModifyIndex: index,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-26 16:20:33 +00:00
|
|
|
},
|
|
|
|
Checks: []*structs.HealthCheck{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
zero := buildTestNode("a-zero-node", "testService:1")
|
|
|
|
one := buildTestNode("node1", "testService:1")
|
|
|
|
two := buildTestNode("node1", "testService:2")
|
|
|
|
three := buildTestNode("node2", "testService")
|
|
|
|
result := structs.IndexedCheckServiceNodes{
|
|
|
|
Nodes: structs.CheckServiceNodes{three, two, zero, one},
|
|
|
|
QueryMeta: structs.QueryMeta{Index: index},
|
|
|
|
}
|
|
|
|
sortCheckServiceNodes(&result)
|
|
|
|
expected := structs.CheckServiceNodes{zero, one, two, three}
|
|
|
|
require.Equal(t, expected, result.Nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
t.Run("local data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_WithEmptySnapshot(t, structs.DefaultPeerKeyword)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("peered data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_WithEmptySnapshot(t, "my-peer")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerName string) {
|
2022-03-28 21:17:50 +00:00
|
|
|
namespace := getNamespace(pbcommon.DefaultEnterpriseMeta.Namespace)
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient := newStreamClient(validateNamespace(namespace))
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
store := submatview.NewStore(hclog.New(nil))
|
|
|
|
go store.Run(ctx)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Initially there are no services registered. Server should send an
|
|
|
|
// EndOfSnapshot message immediately with index of 1.
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient.QueueEvents(newEndOfSnapshotEvent(1))
|
|
|
|
|
|
|
|
req := serviceRequestStub{
|
|
|
|
serviceRequest: serviceRequest{
|
|
|
|
ServiceSpecificRequest: structs.ServiceSpecificRequest{
|
2022-05-20 20:27:01 +00:00
|
|
|
PeerName: peerName,
|
2021-04-26 16:20:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(namespace),
|
2021-04-26 16:20:33 +00:00
|
|
|
QueryOptions: structs.QueryOptions{MaxQueryTime: time.Second},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
streamClient: streamClient,
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
empty := &structs.IndexedCheckServiceNodes{
|
|
|
|
Nodes: structs.CheckServiceNodes{},
|
|
|
|
QueryMeta: structs.QueryMeta{
|
2021-06-28 20:48:10 +00:00
|
|
|
Index: 1,
|
|
|
|
Backend: structs.QueryBackendStreaming,
|
2020-09-18 22:25:56 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "empty snapshot returned", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(1), result.Index)
|
|
|
|
require.Equal(t, empty, result.Value)
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "blocks for timeout", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Subsequent fetch should block for the timeout
|
|
|
|
start := time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = 200 * time.Millisecond
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed >= 200*time.Millisecond,
|
|
|
|
"Fetch should have blocked until timeout")
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index, "result index should not have changed")
|
2020-09-18 22:25:56 +00:00
|
|
|
require.Equal(t, empty, result.Value, "result value should not have changed")
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
var lastResultValue structs.CheckServiceNodes
|
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "blocks until update", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Make another blocking query with a longer timeout and trigger an update
|
|
|
|
// event part way through.
|
|
|
|
start := time.Now()
|
|
|
|
go func() {
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
2022-05-20 20:27:01 +00:00
|
|
|
streamClient.QueueEvents(newEventServiceHealthRegister(4, 1, "web", peerName))
|
2020-09-18 22:25:56 +00:00
|
|
|
}()
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed >= 200*time.Millisecond,
|
|
|
|
"Fetch should have blocked until the event was delivered")
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
|
|
|
require.Equal(t, uint64(4), result.Index, "result index should not have changed")
|
2021-04-26 16:20:33 +00:00
|
|
|
lastResultValue = result.Value.(*structs.IndexedCheckServiceNodes).Nodes
|
|
|
|
require.Len(t, lastResultValue, 1,
|
2020-09-18 22:25:56 +00:00
|
|
|
"result value should contain the new registration")
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
require.Equal(t, peerName, lastResultValue[0].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[0].Service.PeerName)
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "reconnects and resumes after temporary error", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient.QueueErr(tempError("broken pipe"))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Next fetch will continue to block until timeout and receive the same
|
|
|
|
// result.
|
|
|
|
start := time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = 200 * time.Millisecond
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed >= 200*time.Millisecond,
|
|
|
|
"Fetch should have blocked until timeout")
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index,
|
|
|
|
"result index should not have changed")
|
|
|
|
require.Equal(t, lastResultValue, result.Value.(*structs.IndexedCheckServiceNodes).Nodes,
|
|
|
|
"result value should not have changed")
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// But an update should still be noticed due to reconnection
|
2022-05-20 20:27:01 +00:00
|
|
|
streamClient.QueueEvents(newEventServiceHealthRegister(10, 2, "web", peerName))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
start = time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err = store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed = time.Since(start)
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
|
|
|
require.Equal(t, uint64(10), result.Index, "result index should not have changed")
|
2021-04-26 16:20:33 +00:00
|
|
|
lastResultValue = result.Value.(*structs.IndexedCheckServiceNodes).Nodes
|
|
|
|
require.Len(t, lastResultValue, 2,
|
2020-09-18 22:25:56 +00:00
|
|
|
"result value should contain the new registration")
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
require.Equal(t, peerName, lastResultValue[0].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[0].Service.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[1].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[1].Service.PeerName)
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "returns non-temporary error to watchers", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Wait and send the error while fetcher is waiting
|
|
|
|
go func() {
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient.QueueErr(errors.New("invalid request"))
|
2020-09-18 22:25:56 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Next fetch should return the error
|
|
|
|
start := time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed >= 200*time.Millisecond,
|
|
|
|
"Fetch should have blocked until error was sent")
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index, "result index should not have changed")
|
|
|
|
require.Equal(t, lastResultValue, result.Value.(*structs.IndexedCheckServiceNodes).Nodes)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// But an update should still be noticed due to reconnection
|
2022-05-20 20:27:01 +00:00
|
|
|
streamClient.QueueEvents(newEventServiceHealthRegister(req.QueryOptions.MinQueryIndex+5, 3, "web", peerName))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err = store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed = time.Since(start)
|
2021-04-26 16:20:33 +00:00
|
|
|
require.True(t, elapsed < time.Second, "Fetch should have returned before the timeout")
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
require.Equal(t, req.QueryOptions.MinQueryIndex+5, result.Index, "result index should not have changed")
|
2022-05-20 20:27:01 +00:00
|
|
|
lastResultValue = result.Value.(*structs.IndexedCheckServiceNodes).Nodes
|
|
|
|
require.Len(t, lastResultValue, 3,
|
2020-09-18 22:25:56 +00:00
|
|
|
"result value should contain the new registration")
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
require.Equal(t, peerName, lastResultValue[0].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[0].Service.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[1].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[1].Service.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[2].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, lastResultValue[2].Service.PeerName)
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
|
2020-10-01 06:21:50 +00:00
|
|
|
type tempError string
|
|
|
|
|
|
|
|
func (e tempError) Error() string {
|
|
|
|
return string(e)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e tempError) Temporary() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
func TestHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
t.Run("local data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_WithFullSnapshot(t, structs.DefaultPeerKeyword)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("peered data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_WithFullSnapshot(t, "my-peer")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName string) {
|
2020-10-30 16:25:22 +00:00
|
|
|
namespace := getNamespace("ns2")
|
2021-04-26 16:20:33 +00:00
|
|
|
client := newStreamClient(validateNamespace(namespace))
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
store := submatview.NewStore(hclog.New(nil))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Create an initial snapshot of 3 instances on different nodes
|
2020-10-03 00:04:45 +00:00
|
|
|
registerServiceWeb := func(index uint64, nodeNum int) *pbsubscribe.Event {
|
2022-05-20 20:27:01 +00:00
|
|
|
return newEventServiceHealthRegister(index, nodeNum, "web", peerName)
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
client.QueueEvents(
|
2020-10-03 00:04:45 +00:00
|
|
|
registerServiceWeb(5, 1),
|
|
|
|
registerServiceWeb(5, 2),
|
|
|
|
registerServiceWeb(5, 3),
|
2020-10-27 17:43:20 +00:00
|
|
|
newEndOfSnapshotEvent(5))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req := serviceRequestStub{
|
|
|
|
serviceRequest: serviceRequest{
|
|
|
|
ServiceSpecificRequest: structs.ServiceSpecificRequest{
|
2022-05-20 20:27:01 +00:00
|
|
|
PeerName: peerName,
|
2021-04-26 16:20:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(namespace),
|
2021-04-26 16:20:33 +00:00
|
|
|
QueryOptions: structs.QueryOptions{MaxQueryTime: time.Second},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
streamClient: client,
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "full snapshot returned", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(5), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node1", "node2", "node3")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 5
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "blocks until deregistration", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Make another blocking query with a longer timeout and trigger an update
|
|
|
|
// event part way through.
|
|
|
|
start := time.Now()
|
|
|
|
go func() {
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Deregister instance on node1
|
2022-05-20 20:27:01 +00:00
|
|
|
client.QueueEvents(newEventServiceHealthDeregister(20, 1, "web", peerName))
|
2020-09-18 22:25:56 +00:00
|
|
|
}()
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed >= 200*time.Millisecond,
|
|
|
|
"Fetch should have blocked until the event was delivered")
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
|
|
|
require.Equal(t, uint64(20), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node2", "node3")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 20
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "server reload is respected", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Simulates the server noticing the request's ACL token privs changing. To
|
|
|
|
// detect this we'll queue up the new snapshot as a different set of nodes
|
|
|
|
// to the first.
|
2020-09-18 22:33:02 +00:00
|
|
|
client.QueueErr(status.Error(codes.Aborted, "reset by server"))
|
|
|
|
|
2020-09-18 22:25:56 +00:00
|
|
|
client.QueueEvents(
|
2020-10-03 00:04:45 +00:00
|
|
|
registerServiceWeb(50, 3), // overlap existing node
|
|
|
|
registerServiceWeb(50, 4),
|
|
|
|
registerServiceWeb(50, 5),
|
2020-10-27 17:43:20 +00:00
|
|
|
newEndOfSnapshotEvent(50))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Make another blocking query with THE SAME index. It should immediately
|
|
|
|
// return the new snapshot.
|
|
|
|
start := time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
|
|
|
require.Equal(t, uint64(50), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node3", "node4", "node5")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 50
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-10-03 00:04:45 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "reconnects and receives new snapshot when server state has changed", func(t *testing.T) {
|
2020-10-03 00:04:45 +00:00
|
|
|
client.QueueErr(tempError("temporary connection error"))
|
|
|
|
|
|
|
|
client.QueueEvents(
|
2020-10-27 17:43:20 +00:00
|
|
|
newNewSnapshotToFollowEvent(),
|
2020-10-03 00:04:45 +00:00
|
|
|
registerServiceWeb(50, 3), // overlap existing node
|
|
|
|
registerServiceWeb(50, 4),
|
|
|
|
registerServiceWeb(50, 5),
|
2020-10-27 17:43:20 +00:00
|
|
|
newEndOfSnapshotEvent(50))
|
2020-10-03 00:04:45 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = 49
|
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-10-03 00:04:45 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed < time.Second,
|
|
|
|
"Fetch should have returned before the timeout")
|
|
|
|
|
|
|
|
require.Equal(t, uint64(50), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node3", "node4", "node5")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 50
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-10-03 00:04:45 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
func newExpectedNodesInPeer(peerName string, nodes ...string) *structs.IndexedCheckServiceNodes {
|
2021-04-26 16:20:33 +00:00
|
|
|
result := &structs.IndexedCheckServiceNodes{}
|
2021-06-28 20:48:10 +00:00
|
|
|
result.QueryMeta.Backend = structs.QueryBackendStreaming
|
2021-04-26 16:20:33 +00:00
|
|
|
for _, node := range nodes {
|
|
|
|
result.Nodes = append(result.Nodes, structs.CheckServiceNode{
|
2022-05-20 20:27:01 +00:00
|
|
|
Node: &structs.Node{
|
|
|
|
Node: node,
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
2021-04-26 16:20:33 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// cmpCheckServiceNodeNames does a shallow comparison of structs.CheckServiceNode
|
|
|
|
// by Node name.
|
|
|
|
var cmpCheckServiceNodeNames = cmp.Options{
|
|
|
|
cmp.Comparer(func(x, y structs.CheckServiceNode) bool {
|
|
|
|
return x.Node.Node == y.Node.Node
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestHealthView_IntegrationWithStore_EventBatches(t *testing.T) {
|
2022-05-20 20:27:01 +00:00
|
|
|
t.Run("local data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_EventBatches(t, structs.DefaultPeerKeyword)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("peered data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_EventBatches(t, "my-peer")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testHealthView_IntegrationWithStore_EventBatches(t *testing.T, peerName string) {
|
2020-10-30 16:25:22 +00:00
|
|
|
namespace := getNamespace("ns3")
|
2021-04-26 16:20:33 +00:00
|
|
|
client := newStreamClient(validateNamespace(namespace))
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
store := submatview.NewStore(hclog.New(nil))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Create an initial snapshot of 3 instances but in a single event batch
|
2020-09-18 22:33:02 +00:00
|
|
|
batchEv := newEventBatchWithEvents(
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthRegister(5, 1, "web", peerName),
|
|
|
|
newEventServiceHealthRegister(5, 2, "web", peerName),
|
|
|
|
newEventServiceHealthRegister(5, 3, "web", peerName))
|
2020-09-18 22:25:56 +00:00
|
|
|
client.QueueEvents(
|
2020-10-03 00:04:45 +00:00
|
|
|
batchEv,
|
2020-10-27 17:43:20 +00:00
|
|
|
newEndOfSnapshotEvent(5))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req := serviceRequestStub{
|
|
|
|
serviceRequest: serviceRequest{
|
|
|
|
ServiceSpecificRequest: structs.ServiceSpecificRequest{
|
2022-05-20 20:27:01 +00:00
|
|
|
PeerName: peerName,
|
2021-04-26 16:20:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(namespace),
|
2021-04-26 16:20:33 +00:00
|
|
|
QueryOptions: structs.QueryOptions{MaxQueryTime: time.Second},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
streamClient: client,
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "full snapshot returned", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(5), result.Index)
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node1", "node2", "node3")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 5
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "batched updates work too", func(t *testing.T) {
|
2020-09-18 22:25:56 +00:00
|
|
|
// Simulate multiple registrations happening in one Txn (so all have same
|
|
|
|
// index)
|
2020-09-18 22:33:02 +00:00
|
|
|
batchEv := newEventBatchWithEvents(
|
2020-09-18 22:25:56 +00:00
|
|
|
// Deregister an existing node
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthDeregister(20, 1, "web", peerName),
|
2020-09-18 22:25:56 +00:00
|
|
|
// Register another
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthRegister(20, 4, "web", peerName),
|
2020-09-18 22:25:56 +00:00
|
|
|
)
|
2020-10-03 00:04:45 +00:00
|
|
|
client.QueueEvents(batchEv)
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MaxQueryTime = time.Second
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(20), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node2", "node3", "node4")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 20
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
}
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
func TestHealthView_IntegrationWithStore_Filtering(t *testing.T) {
|
2022-05-20 20:27:01 +00:00
|
|
|
t.Run("local data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_Filtering(t, structs.DefaultPeerKeyword)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("peered data", func(t *testing.T) {
|
|
|
|
testHealthView_IntegrationWithStore_Filtering(t, "my-peer")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testHealthView_IntegrationWithStore_Filtering(t *testing.T, peerName string) {
|
2020-10-30 16:25:22 +00:00
|
|
|
namespace := getNamespace("ns3")
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient := newStreamClient(validateNamespace(namespace))
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
store := submatview.NewStore(hclog.New(nil))
|
|
|
|
go store.Run(ctx)
|
|
|
|
|
|
|
|
req := serviceRequestStub{
|
|
|
|
serviceRequest: serviceRequest{
|
|
|
|
ServiceSpecificRequest: structs.ServiceSpecificRequest{
|
2022-05-20 20:27:01 +00:00
|
|
|
PeerName: peerName,
|
2021-04-26 16:20:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(namespace),
|
2021-04-26 16:20:33 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: `Node.Node == "node2"`,
|
|
|
|
MaxQueryTime: time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
streamClient: streamClient,
|
|
|
|
}
|
2020-09-18 22:25:56 +00:00
|
|
|
|
|
|
|
// Create an initial snapshot of 3 instances but in a single event batch
|
2020-09-18 22:33:02 +00:00
|
|
|
batchEv := newEventBatchWithEvents(
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthRegister(5, 1, "web", peerName),
|
|
|
|
newEventServiceHealthRegister(5, 2, "web", peerName),
|
|
|
|
newEventServiceHealthRegister(5, 3, "web", peerName))
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient.QueueEvents(
|
2020-10-03 00:04:45 +00:00
|
|
|
batchEv,
|
2020-10-27 17:43:20 +00:00
|
|
|
newEndOfSnapshotEvent(5))
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "filtered snapshot returned", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(5), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node2")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 5
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
req.QueryOptions.MinQueryIndex = result.Index
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
2020-09-18 22:25:56 +00:00
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "filtered updates work too", func(t *testing.T) {
|
2021-04-26 16:20:33 +00:00
|
|
|
// Simulate multiple registrations happening in one Txn (all have same index)
|
2020-09-18 22:33:02 +00:00
|
|
|
batchEv := newEventBatchWithEvents(
|
2020-09-18 22:25:56 +00:00
|
|
|
// Deregister an existing node
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthDeregister(20, 1, "web", peerName),
|
2020-09-18 22:25:56 +00:00
|
|
|
// Register another
|
2022-05-20 20:27:01 +00:00
|
|
|
newEventServiceHealthRegister(20, 4, "web", peerName),
|
2020-09-18 22:25:56 +00:00
|
|
|
)
|
2021-04-26 16:20:33 +00:00
|
|
|
streamClient.QueueEvents(batchEv)
|
|
|
|
result, err := store.Get(ctx, req)
|
2020-09-18 22:25:56 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, uint64(20), result.Index)
|
2022-05-20 20:27:01 +00:00
|
|
|
expected := newExpectedNodesInPeer(peerName, "node2")
|
2021-04-26 16:20:33 +00:00
|
|
|
expected.Index = 20
|
2022-03-30 16:51:56 +00:00
|
|
|
prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames)
|
2020-09-18 22:33:02 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-04-26 16:20:33 +00:00
|
|
|
// serviceRequestStub overrides NewMaterializer so that test can use a fake
|
|
|
|
// StreamClient.
|
|
|
|
type serviceRequestStub struct {
|
|
|
|
serviceRequest
|
|
|
|
streamClient submatview.StreamClient
|
|
|
|
}
|
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
func (r serviceRequestStub) NewMaterializer() (submatview.Materializer, error) {
|
2022-07-12 10:37:48 +00:00
|
|
|
view, err := NewHealthView(r.ServiceSpecificRequest)
|
2021-04-26 16:20:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
deps := submatview.Deps{
|
2021-04-26 16:20:33 +00:00
|
|
|
View: view,
|
|
|
|
Logger: hclog.New(nil),
|
2022-07-12 10:37:48 +00:00
|
|
|
Request: NewMaterializerRequest(r.ServiceSpecificRequest),
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
}
|
|
|
|
return submatview.NewRPCMaterializer(r.streamClient, deps), nil
|
2021-04-26 16:20:33 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
func newEventServiceHealthRegister(index uint64, nodeNum int, svc string, peerName string) *pbsubscribe.Event {
|
2021-04-26 16:20:33 +00:00
|
|
|
node := fmt.Sprintf("node%d", nodeNum)
|
|
|
|
nodeID := types.NodeID(fmt.Sprintf("11111111-2222-3333-4444-%012d", nodeNum))
|
|
|
|
addr := fmt.Sprintf("10.10.%d.%d", nodeNum/256, nodeNum%256)
|
|
|
|
|
|
|
|
return &pbsubscribe.Event{
|
|
|
|
Index: index,
|
|
|
|
Payload: &pbsubscribe.Event_ServiceHealth{
|
|
|
|
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
|
|
|
|
Op: pbsubscribe.CatalogOp_Register,
|
|
|
|
CheckServiceNode: &pbservice.CheckServiceNode{
|
|
|
|
Node: &pbservice.Node{
|
2022-03-23 16:10:03 +00:00
|
|
|
ID: string(nodeID),
|
2021-04-26 16:20:33 +00:00
|
|
|
Node: node,
|
|
|
|
Address: addr,
|
|
|
|
Datacenter: "dc1",
|
2022-05-20 20:27:01 +00:00
|
|
|
PeerName: peerName,
|
2022-03-23 16:10:03 +00:00
|
|
|
RaftIndex: &pbcommon.RaftIndex{
|
2021-04-26 16:20:33 +00:00
|
|
|
CreateIndex: index,
|
|
|
|
ModifyIndex: index,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Service: &pbservice.NodeService{
|
2022-05-20 20:27:01 +00:00
|
|
|
ID: svc,
|
|
|
|
Service: svc,
|
|
|
|
PeerName: peerName,
|
|
|
|
Port: 8080,
|
2022-03-23 16:10:03 +00:00
|
|
|
RaftIndex: &pbcommon.RaftIndex{
|
2021-04-26 16:20:33 +00:00
|
|
|
CreateIndex: index,
|
|
|
|
ModifyIndex: index,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-20 20:27:01 +00:00
|
|
|
func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string, peerName string) *pbsubscribe.Event {
|
2021-04-26 16:20:33 +00:00
|
|
|
node := fmt.Sprintf("node%d", nodeNum)
|
|
|
|
|
|
|
|
return &pbsubscribe.Event{
|
|
|
|
Index: index,
|
|
|
|
Payload: &pbsubscribe.Event_ServiceHealth{
|
|
|
|
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
|
|
|
|
Op: pbsubscribe.CatalogOp_Deregister,
|
|
|
|
CheckServiceNode: &pbservice.CheckServiceNode{
|
|
|
|
Node: &pbservice.Node{
|
2022-05-20 20:27:01 +00:00
|
|
|
Node: node,
|
|
|
|
PeerName: peerName,
|
2021-04-26 16:20:33 +00:00
|
|
|
},
|
|
|
|
Service: &pbservice.NodeService{
|
2022-05-20 20:27:01 +00:00
|
|
|
ID: svc,
|
|
|
|
Service: svc,
|
|
|
|
PeerName: peerName,
|
|
|
|
Port: 8080,
|
2021-04-26 16:20:33 +00:00
|
|
|
Weights: &pbservice.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2022-03-23 16:10:03 +00:00
|
|
|
RaftIndex: &pbcommon.RaftIndex{
|
2021-04-26 16:20:33 +00:00
|
|
|
// The original insertion index since a delete doesn't update
|
|
|
|
// this. This magic value came from state store tests where we
|
|
|
|
// setup at index 10 and then mutate at index 100. It can be
|
|
|
|
// modified by the caller later and makes it easier than having
|
|
|
|
// yet another argument in the common case.
|
|
|
|
CreateIndex: 10,
|
|
|
|
ModifyIndex: 10,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newEventBatchWithEvents(first *pbsubscribe.Event, evs ...*pbsubscribe.Event) *pbsubscribe.Event {
|
|
|
|
events := make([]*pbsubscribe.Event, len(evs)+1)
|
|
|
|
events[0] = first
|
|
|
|
for i := range evs {
|
|
|
|
events[i+1] = evs[i]
|
|
|
|
}
|
|
|
|
return &pbsubscribe.Event{
|
|
|
|
Index: first.Index,
|
|
|
|
Payload: &pbsubscribe.Event_EventBatch{
|
|
|
|
EventBatch: &pbsubscribe.EventBatch{Events: events},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newEndOfSnapshotEvent(index uint64) *pbsubscribe.Event {
|
|
|
|
return &pbsubscribe.Event{
|
|
|
|
Index: index,
|
|
|
|
Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newNewSnapshotToFollowEvent() *pbsubscribe.Event {
|
|
|
|
return &pbsubscribe.Event{
|
|
|
|
Payload: &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getNamespace returns a namespace if namespace support exists, otherwise
|
2023-08-23 16:53:44 +00:00
|
|
|
// returns the empty string. It allows the same tests to work in both ce and ent
|
2021-04-26 16:20:33 +00:00
|
|
|
// without duplicating the tests.
|
|
|
|
func getNamespace(ns string) string {
|
2021-07-22 18:20:45 +00:00
|
|
|
meta := structs.NewEnterpriseMetaInDefaultPartition(ns)
|
2021-04-26 16:20:33 +00:00
|
|
|
return meta.NamespaceOrEmpty()
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateNamespace(ns string) func(request *pbsubscribe.SubscribeRequest) error {
|
|
|
|
return func(request *pbsubscribe.SubscribeRequest) error {
|
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
|
|
|
if got := request.GetNamedSubject().GetNamespace(); got != ns {
|
|
|
|
return fmt.Errorf("expected request.NamedSubject.Namespace %v, got %v", ns, got)
|
2021-04-26 16:20:33 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-08 23:54:37 +00:00
|
|
|
func TestNewFilterEvaluator(t *testing.T) {
|
|
|
|
type testCase struct {
|
|
|
|
name string
|
|
|
|
req structs.ServiceSpecificRequest
|
|
|
|
data structs.CheckServiceNode
|
|
|
|
expected bool
|
|
|
|
}
|
|
|
|
|
|
|
|
fn := func(t *testing.T, tc testCase) {
|
2021-02-25 21:22:30 +00:00
|
|
|
e, err := newFilterEvaluator(tc.req)
|
2021-02-08 23:54:37 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
actual, err := e.Evaluate(tc.data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tc.expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
var testCases = []testCase{
|
|
|
|
{
|
|
|
|
name: "single ServiceTags match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
ServiceTags: []string{"match"},
|
|
|
|
TagFilter: true,
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"extra", "match"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single deprecated ServiceTag match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
ServiceTag: "match",
|
|
|
|
TagFilter: true,
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"extra", "match"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single ServiceTags mismatch",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
ServiceTags: []string{"other"},
|
|
|
|
TagFilter: true,
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"extra", "match"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multiple ServiceTags match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
ServiceTags: []string{"match", "second"},
|
|
|
|
TagFilter: true,
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"extra", "match", "second"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multiple ServiceTags mismatch",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
ServiceTags: []string{"match", "not"},
|
|
|
|
TagFilter: true,
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"extra", "match"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single NodeMetaFilter match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
NodeMetaFilters: map[string]string{"meta1": "match"},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Meta: map[string]string{
|
|
|
|
"meta1": "match",
|
|
|
|
"extra": "some",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single NodeMetaFilter mismatch",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
NodeMetaFilters: map[string]string{
|
|
|
|
"meta1": "match",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Meta: map[string]string{
|
|
|
|
"meta1": "other",
|
|
|
|
"extra": "some",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multiple NodeMetaFilter match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
NodeMetaFilters: map[string]string{"meta1": "match", "meta2": "a"},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Meta: map[string]string{
|
|
|
|
"meta1": "match",
|
|
|
|
"meta2": "a",
|
|
|
|
"extra": "some",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multiple NodeMetaFilter mismatch",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
NodeMetaFilters: map[string]string{
|
|
|
|
"meta1": "match",
|
|
|
|
"meta2": "beta",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Meta: map[string]string{
|
|
|
|
"meta1": "other",
|
|
|
|
"meta2": "gamma",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "QueryOptions.Filter match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: `Node.Node == "node3"`,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "node3"},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "QueryOptions.Filter mismatch",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: `Node.Node == "node2"`,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "node3"},
|
|
|
|
},
|
|
|
|
expected: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "all match",
|
|
|
|
req: structs.ServiceSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: `Node.Node == "node3"`,
|
|
|
|
},
|
|
|
|
ServiceTags: []string{"tag1", "tag2"},
|
|
|
|
NodeMetaFilters: map[string]string{
|
|
|
|
"meta1": "match1",
|
|
|
|
"meta2": "match2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
data: structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Node: "node3",
|
|
|
|
Meta: map[string]string{
|
|
|
|
"meta1": "match1",
|
|
|
|
"meta2": "match2",
|
|
|
|
"extra": "other",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Tags: []string{"tag1", "tag2", "extra"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
fn(t, tc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-03-03 19:17:11 +00:00
|
|
|
|
|
|
|
func TestHealthView_SkipFilteringTerminatingGateways(t *testing.T) {
|
|
|
|
view, err := NewHealthView(structs.ServiceSpecificRequest{
|
|
|
|
ServiceName: "name",
|
|
|
|
Connect: true,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: "Service.Meta.version == \"v1\"",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = view.Update([]*pbsubscribe.Event{{
|
|
|
|
Index: 1,
|
|
|
|
Payload: &pbsubscribe.Event_ServiceHealth{
|
|
|
|
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
|
|
|
|
Op: pbsubscribe.CatalogOp_Register,
|
|
|
|
CheckServiceNode: &pbservice.CheckServiceNode{
|
|
|
|
Service: &pbservice.NodeService{
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Service: "name",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8443,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
node, ok := (view.Result(1)).(*structs.IndexedCheckServiceNodes)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
require.Len(t, node.Nodes, 1)
|
|
|
|
require.Equal(t, "127.0.0.1", node.Nodes[0].Service.Address)
|
|
|
|
require.Equal(t, 8443, node.Nodes[0].Service.Port)
|
|
|
|
}
|
2023-04-14 16:24:46 +00:00
|
|
|
|
|
|
|
func TestConfigEntryListView_Reset(t *testing.T) {
|
|
|
|
emptyMap := make(map[string]structs.CheckServiceNode)
|
|
|
|
view := &HealthView{state: map[string]structs.CheckServiceNode{
|
|
|
|
"test": {},
|
|
|
|
}}
|
|
|
|
view.Reset()
|
|
|
|
require.Equal(t, emptyMap, view.state)
|
|
|
|
}
|