agent: allow the /v1/connect/intentions/match endpoint to use the agent cache (#8875)

This is the recommended proxy integration API for listing intentions
which should not require an active connection to the servers to resolve
after the initial cache filling.
This commit is contained in:
R.B. Boyer 2020-10-08 14:51:53 -05:00 committed by GitHub
parent 143bfb7462
commit 69af49441a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 74 additions and 5 deletions

3
.changelog/8875.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
agent: allow the /v1/connect/intentions/match endpoint to use the agent cache
```

View File

@ -5,6 +5,7 @@ import (
"net/http" "net/http"
"strings" "strings"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
@ -135,19 +136,44 @@ func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Reques
} }
} }
var reply structs.IndexedIntentionMatches // Make the RPC request
if err := s.agent.RPC("Intention.Match", args, &reply); err != nil { var out structs.IndexedIntentionMatches
return nil, err defer setMeta(resp, &out.QueryMeta)
if s.agent.config.HTTPUseCache && args.QueryOptions.UseCache {
raw, m, err := s.agent.cache.Get(req.Context(), cachetype.IntentionMatchName, args)
if err != nil {
return nil, err
}
defer setCacheMeta(resp, &m)
reply, ok := raw.(*structs.IndexedIntentionMatches)
if !ok {
// This should never happen, but we want to protect against panics
return nil, fmt.Errorf("internal error: response type not correct")
}
out = *reply
} else {
RETRY_ONCE:
if err := s.agent.RPC("Intention.Match", args, &out); err != nil {
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
args.AllowStale = false
args.MaxStaleDuration = 0
goto RETRY_ONCE
}
} }
out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel()
// We must have an identical count of matches // We must have an identical count of matches
if len(reply.Matches) != len(names) { if len(out.Matches) != len(names) {
return nil, fmt.Errorf("internal error: match response count didn't match input count") return nil, fmt.Errorf("internal error: match response count didn't match input count")
} }
// Use empty list instead of nil. // Use empty list instead of nil.
response := make(map[string]structs.Intentions) response := make(map[string]structs.Intentions)
for i, ixns := range reply.Matches { for i, ixns := range out.Matches {
response[names[i]] = ixns response[names[i]] = ixns
} }

View File

@ -178,6 +178,46 @@ func TestIntentionMatch(t *testing.T) {
require.Equal(t, expected, actual) require.Equal(t, expected, actual)
}) })
t.Run("success with cache", func(t *testing.T) {
// First request is a MISS, but it primes the cache for the second attempt
for i := 0; i < 2; i++ {
req, err := http.NewRequest("GET", "/v1/connect/intentions/match?by=destination&name=bar&cached", nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.IntentionMatch(resp, req)
require.NoError(t, err)
// The GET request primes the cache so the POST is a hit.
if i == 0 {
// Should be a cache miss
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
} else {
// Should be a cache HIT now!
require.Equal(t, "HIT", resp.Header().Get("X-Cache"))
}
value := obj.(map[string]structs.Intentions)
require.Len(t, value, 1)
var actual [][]string
expected := [][]string{
{"default", "*", "default", "bar"},
{"default", "*", "default", "*"},
}
for _, ixn := range value["bar"] {
actual = append(actual, []string{
ixn.SourceNS,
ixn.SourceName,
ixn.DestinationNS,
ixn.DestinationName,
})
}
require.Equal(t, expected, actual)
}
})
} }
func TestIntentionCheck(t *testing.T) { func TestIntentionCheck(t *testing.T) {