Fix issue where terminating gateway service resolvers weren't properly cleaned up (#16498)

* Fix issue where terminating gateway service resolvers weren't properly cleaned up

* Add integration test for cleaning up resolvers

* Add changelog entry

* Use state test and drop integration test
This commit is contained in:
Andrew Stucki 2023-03-03 09:56:57 -05:00 committed by GitHub
parent 2916821b55
commit 6ca1c9f15c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 47 additions and 3 deletions

3
.changelog/16498.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
proxycfg: fix a bug where terminating gateways were not cleaning up deleted service resolvers for their referenced services
```

View File

@ -2093,6 +2093,28 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, dbResolver.Entry, snap.TerminatingGateway.ServiceResolvers[db]) require.Equal(t, dbResolver.Entry, snap.TerminatingGateway.ServiceResolvers[db])
}, },
}, },
{
requiredWatches: map[string]verifyWatchRequest{
"service-resolver:" + db.String(): genVerifyResolverWatch("db", "dc1", structs.ServiceResolver),
},
events: []UpdateEvent{
{
CorrelationID: "service-resolver:" + db.String(),
Result: &structs.ConfigEntryResponse{
Entry: nil,
},
Err: nil,
},
},
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid(), "gateway with service list is valid")
// Finally ensure we cleaned up the resolver
require.Equal(t, []structs.ServiceName{db}, snap.TerminatingGateway.ValidServices())
require.False(t, snap.TerminatingGateway.ServiceResolversSet[db])
require.Nil(t, snap.TerminatingGateway.ServiceResolvers[db])
},
},
{ {
events: []UpdateEvent{ events: []UpdateEvent{
{ {

View File

@ -354,8 +354,13 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u UpdateEv
// There should only ever be one entry for a service resolver within a namespace // There should only ever be one entry for a service resolver within a namespace
if resolver, ok := resp.Entry.(*structs.ServiceResolverConfigEntry); ok { if resolver, ok := resp.Entry.(*structs.ServiceResolverConfigEntry); ok {
snap.TerminatingGateway.ServiceResolvers[sn] = resolver snap.TerminatingGateway.ServiceResolvers[sn] = resolver
snap.TerminatingGateway.ServiceResolversSet[sn] = true
} else {
// we likely have a deleted service resolver, and our cast is a nil
// cast, so clear this out
delete(snap.TerminatingGateway.ServiceResolvers, sn)
snap.TerminatingGateway.ServiceResolversSet[sn] = false
} }
snap.TerminatingGateway.ServiceResolversSet[sn] = true
case strings.HasPrefix(u.CorrelationID, serviceIntentionsIDPrefix): case strings.HasPrefix(u.CorrelationID, serviceIntentionsIDPrefix):
resp, ok := u.Result.(structs.Intentions) resp, ok := u.Result.(structs.Intentions)

View File

@ -383,15 +383,27 @@ function assert_upstream_has_endpoints_in_status_once {
GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS) GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS)
echo "GOT: $GOT_COUNT"
[ "$GOT_COUNT" -eq $EXPECT_COUNT ] [ "$GOT_COUNT" -eq $EXPECT_COUNT ]
} }
function assert_upstream_missing_once {
local HOSTPORT=$1
local CLUSTER_NAME=$2
run get_upstream_endpoint $HOSTPORT $CLUSTER_NAME
[ "$status" -eq 0 ]
echo "$output"
[ "" == "$output" ]
}
function assert_upstream_missing { function assert_upstream_missing {
local HOSTPORT=$1 local HOSTPORT=$1
local CLUSTER_NAME=$2 local CLUSTER_NAME=$2
run retry_default get_upstream_endpoint $HOSTPORT $CLUSTER_NAME run retry_long assert_upstream_missing_once $HOSTPORT $CLUSTER_NAME
echo "OUTPUT: $output $status" echo "OUTPUT: $output $status"
[ "" == "$output" ]
[ "$status" -eq 0 ]
} }
function assert_upstream_has_endpoints_in_status { function assert_upstream_has_endpoints_in_status {
@ -400,6 +412,8 @@ function assert_upstream_has_endpoints_in_status {
local HEALTH_STATUS=$3 local HEALTH_STATUS=$3
local EXPECT_COUNT=$4 local EXPECT_COUNT=$4
run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT
echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
} }