test: fix flaky TestHealthServiceNodes_NodeMetaFilter by waiting until the streaming subsystem has a valid grpc connection (#15019)

Also potentially unflakes TestHealthIngressServiceNodes for similar
reasons.
This commit is contained in:
R.B. Boyer 2022-10-24 13:09:53 -05:00 committed by GitHub
parent 87432a8dd4
commit bf05547080
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 83 additions and 56 deletions

View File

@ -1128,29 +1128,35 @@ func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
}
for _, tst := range tests {
t.Run(tst.name, func(t *testing.T) {
a := NewTestAgent(t, tst.config)
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
waitForStreamingToBeReady(t, a)
req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&node-meta=somekey:somevalue", nil)
encodedMeta := url.QueryEscape("somekey:somevalue")
var lastIndex uint64
testutil.RunStep(t, "do initial read", func(t *testing.T) {
u := fmt.Sprintf("/v1/health/service/test?dc=dc1&node-meta=%s", encodedMeta)
req, err := http.NewRequest("GET", u, nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.HealthServiceNodes(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
assertIndex(t, resp)
cIndex, err := strconv.ParseUint(resp.Header().Get("X-Consul-Index"), 10, 64)
require.NoError(t, err)
lastIndex = getIndex(t, resp)
require.True(t, lastIndex > 0)
// Should be a non-nil empty list
nodes := obj.(structs.CheckServiceNodes)
if nodes == nil || len(nodes) != 0 {
t.Fatalf("bad: %v", obj)
}
require.NotNil(t, nodes)
require.Empty(t, nodes)
})
require.True(t, lastIndex > 0, "lastindex = %d", lastIndex)
testutil.RunStep(t, "register item 1", func(t *testing.T) {
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
@ -1162,12 +1168,12 @@ func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
},
}
var out struct{}
if err := a.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
var ignored struct{}
require.NoError(t, a.RPC("Catalog.Register", args, &ignored))
})
args = &structs.RegisterRequest{
testutil.RunStep(t, "register item 2", func(t *testing.T) {
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar2",
Address: "127.0.0.1",
@ -1177,28 +1183,30 @@ func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
Service: "test",
},
}
var ignored struct{}
require.NoError(t, a.RPC("Catalog.Register", args, &ignored))
})
if err := a.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
testutil.RunStep(t, "do blocking read", func(t *testing.T) {
u := fmt.Sprintf("/v1/health/service/test?dc=dc1&node-meta=%s&index=%d&wait=100ms&cached", encodedMeta, lastIndex)
req, _ = http.NewRequest("GET", fmt.Sprintf("/v1/health/service/test?dc=dc1&node-meta=somekey:somevalue&index=%d&wait=10ms", cIndex), nil)
resp = httptest.NewRecorder()
obj, err = a.srv.HealthServiceNodes(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
req, err := http.NewRequest("GET", u, nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.HealthServiceNodes(resp, req)
require.NoError(t, err)
assertIndex(t, resp)
// Should be a non-nil empty list for checks
nodes = obj.(structs.CheckServiceNodes)
if len(nodes) != 1 || nodes[0].Checks == nil || len(nodes[0].Checks) != 0 {
t.Fatalf("bad: %v", obj)
}
nodes := obj.(structs.CheckServiceNodes)
require.Len(t, nodes, 1)
require.NotNil(t, nodes[0].Checks)
require.Empty(t, nodes[0].Checks)
require.Equal(t, tst.queryBackend, resp.Header().Get("X-Consul-Query-Backend"))
})
})
}
}
@ -1637,6 +1645,7 @@ func testHealthIngressServiceNodes(t *testing.T, agentHCL string) {
a := NewTestAgent(t, agentHCL)
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
waitForStreamingToBeReady(t, a)
// Register gateway
gatewayArgs := structs.TestRegisterIngressGateway(t)
@ -2038,3 +2047,9 @@ func peerQuerySuffix(peerName string) string {
}
return "&peer=" + peerName
}
func waitForStreamingToBeReady(t *testing.T, a *TestAgent) {
retry.Run(t, func(r *retry.R) {
require.True(r, a.rpcClientHealth.IsReadyForStreaming())
})
}

View File

@ -3,6 +3,8 @@ package health
import (
"context"
"google.golang.org/grpc/connectivity"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/submatview"
@ -34,6 +36,16 @@ type MaterializedViewStore interface {
NotifyCallback(ctx context.Context, req submatview.Request, cID string, cb cache.Callback) error
}
// IsReadyForStreaming will indicate if the underlying gRPC connection is ready.
func (c *Client) IsReadyForStreaming() bool {
conn := c.MaterializerDeps.Conn
if conn == nil {
return false
}
return conn.GetState() == connectivity.Ready
}
func (c *Client) ServiceNodes(
ctx context.Context,
req structs.ServiceSpecificRequest,