ea93c7b29c
## Backport This PR is auto-generated from #17754 to be assessed for backporting due to the inclusion of the label backport/1.16. 🚨 >**Warning** automatic cherry-pick of commits failed. If the first commit failed, you will see a blank no-op commit below. If at least one commit succeeded, you will see the cherry-picked commits up to, _not including_, the commit where the merge conflict occurred. The person who merged in the original PR is: @WenInCode This person should manually cherry-pick the original PR into a new backport PR, and close this one when the manual backport PR is merged in. > merge conflict error: unable to process merge commit: "1c757b8a2c1160ad53421b7b8bd7f74b205c4b89", automatic backport requires rebase workflow The below text is copied from the body of the original PR. --- fixes #17097 Consul version of each nodes in UI nodes section @jkirschner-hashicorp @huikang @team @Maintainers Updated consul version in the request to register consul. Added this as Node MetaData. Fetching this new metadata in UI <img width="1512" alt="Screenshot 2023-06-15 at 4 21 33 PM" src="https://github.com/hashicorp/consul/assets/3139634/94f7cf6b-701f-4230-b9f7-d8c4342d0737"> Also made this backward compatible and tested. Backward compatible in this context means - If consul binary with above PR changes is deployed to one of node, and if UI is run from this node, then the version of not only current (upgraded) node is displayed in UI , but also of older nodes given that they are consul servers only. For older (non-server or client) nodes the version is not added in NodeMeta Data and hence the version will not be displayed for them. If a old node is consul server, the version will be displayed. As the endpoint - "v1/internal/ui/nodes?dc=dc1" was already returning version in service meta. This is made use of in current UI changes. <img width="1480" alt="Screenshot 2023-06-16 at 6 58 32 PM" src="https://github.com/hashicorp/consul/assets/3139634/257942f4-fbed-437d-a492-37849d2bec4c"> --- <details> <summary> Overview of commits </summary> - 931fdfc7ecdc26bb7cc20b698c5e14c1b65fcc6e - b3e2ec1ccaca3832a088ffcac54257fa6653c6c1 - 8d0e9a54907039c09330c6cd7b9e761566af6856 - 04e5d88cca37821f6667be381c16aaa5958b5c92 - 28286a2e98f8cd66ef8593c2e2893b4db6080417 - 43e50ad38207952a9c4d04d45d08b6b8f71b31fe - 0cf1b7077cdf255596254d9dc1624a269c42b94d - 27f34ce1c2973591f75b1e38a81ccbe7cee6cee3 - 2ac76d62b8cbae76b1a903021aebb9b865e29d6e - 3d618df9ef1d10dd5056c8b1ed865839c553a0e0 - 1c757b8a2c1160ad53421b7b8bd7f74b205c4b89 - 23ce82b4cee8f74dd634dbe145313e9a56c0077d - 4dc1c9b4c5aafdb8883ef977dfa9b39da138b6cb - 85a12a92528bfa267a039a9bb258170be914abf7 - 25d30a3fa980d130a30d445d26d47ef2356cb553 - 7f1d6192dce3352e92307175848b89f91e728c24 - 5174cbff84b0795d4cb36eb8980d0d5336091ac9 </details> --------- Co-authored-by: Vijay Srinivas <vijayraghav22@gmail.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
1241 lines
29 KiB
Go
1241 lines
29 KiB
Go
// Copyright (c) HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package api
|
|
|
|
import (
|
|
"reflect"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
|
)
|
|
|
|
func TestAPI_CatalogDatacenters(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
retry.Run(t, func(r *retry.R) {
|
|
datacenters, err := catalog.Datacenters()
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
if len(datacenters) < 1 {
|
|
r.Fatal("got 0 datacenters want at least one")
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogNodes(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
s.WaitForSerfCheck(t)
|
|
catalog := c.Catalog()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
nodes, meta, err := catalog.Nodes(nil)
|
|
require.NoError(r, err)
|
|
require.Len(r, nodes, 1)
|
|
require.True(r, meta.LastIndex >= 1, "Last index must be greater than 1")
|
|
|
|
// The raft indexes are not relevant for this test.
|
|
got := nodes[0]
|
|
got.CreateIndex = 0
|
|
got.ModifyIndex = 0
|
|
|
|
want := &Node{
|
|
ID: s.Config.NodeID,
|
|
Node: s.Config.NodeName,
|
|
Partition: defaultPartition,
|
|
Address: "127.0.0.1",
|
|
Datacenter: "dc1",
|
|
TaggedAddresses: map[string]string{
|
|
"lan": "127.0.0.1",
|
|
"lan_ipv4": "127.0.0.1",
|
|
"wan": "127.0.0.1",
|
|
"wan_ipv4": "127.0.0.1",
|
|
},
|
|
Meta: map[string]string{
|
|
"consul-network-segment": "",
|
|
"consul-version": s.Config.Version,
|
|
},
|
|
}
|
|
require.Equal(r, want, got)
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogNodes_MetaFilter(t *testing.T) {
|
|
t.Parallel()
|
|
meta := map[string]string{"somekey": "somevalue"}
|
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
|
conf.NodeMeta = meta
|
|
})
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
// Make sure we get the node back when filtering by its metadata
|
|
retry.Run(t, func(r *retry.R) {
|
|
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: meta})
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(nodes) == 0 {
|
|
r.Fatalf("Bad: %v", nodes)
|
|
}
|
|
|
|
if _, ok := nodes[0].TaggedAddresses["wan"]; !ok {
|
|
r.Fatalf("Bad: %v", nodes[0])
|
|
}
|
|
|
|
if v, ok := nodes[0].Meta["somekey"]; !ok || v != "somevalue" {
|
|
r.Fatalf("Bad: %v", nodes[0].Meta)
|
|
}
|
|
|
|
if nodes[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", nodes[0])
|
|
}
|
|
})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
// Get nothing back when we use an invalid filter
|
|
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(nodes) != 0 {
|
|
r.Fatalf("Bad: %v", nodes)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogNodes_Filter(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
// this sets up the catalog entries with things we can filter on
|
|
testNodeServiceCheckRegistrations(t, c, "dc1")
|
|
|
|
catalog := c.Catalog()
|
|
nodes, _, err := catalog.Nodes(nil)
|
|
require.NoError(t, err)
|
|
// 3 nodes inserted by the setup func above plus the agent itself
|
|
require.Len(t, nodes, 4)
|
|
|
|
// now filter down to just a couple nodes with a specific meta entry
|
|
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Meta.env == production"})
|
|
require.NoError(t, err)
|
|
require.Len(t, nodes, 2)
|
|
|
|
// filter out everything that isn't bar or baz
|
|
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Node == bar or Node == baz"})
|
|
require.NoError(t, err)
|
|
require.Len(t, nodes, 2)
|
|
|
|
// check for non-existent ip for the node addr
|
|
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Address == `10.0.0.1`"})
|
|
require.NoError(t, err)
|
|
require.Empty(t, nodes)
|
|
}
|
|
|
|
func TestAPI_CatalogServices(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Services(nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogServices_NodeMetaFilter(t *testing.T) {
|
|
t.Parallel()
|
|
meta := map[string]string{"somekey": "somevalue"}
|
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
|
conf.NodeMeta = meta
|
|
})
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
// Make sure we get the service back when filtering by the node's metadata
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: meta})
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
// Get nothing back when using an invalid filter
|
|
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) != 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogService(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Service("consul", "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogServiceUnmanagedProxy(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
proxyReg := testUnmanagedProxyRegistration(t)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
_, err := catalog.Register(proxyReg, nil)
|
|
r.Check(err)
|
|
|
|
services, meta, err := catalog.Service("web-proxy", "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
|
|
if !reflect.DeepEqual(services[0].ServiceProxy, proxyReg.Service.Proxy) {
|
|
r.Fatalf("bad proxy.\nwant: %v\n got: %v", proxyReg.Service.Proxy,
|
|
services[0].ServiceProxy)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogServiceCached(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
q := &QueryOptions{
|
|
UseCache: true,
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Service("consul", "", q)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
})
|
|
|
|
// Got success, next hit must be cache hit
|
|
_, meta, err := catalog.Service("consul", "", q)
|
|
require.NoError(t, err)
|
|
require.True(t, meta.CacheHit)
|
|
require.Equal(t, time.Duration(0), meta.CacheAge)
|
|
}
|
|
|
|
func TestAPI_CatalogService_SingleTag(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
|
conf.NodeName = "node123"
|
|
})
|
|
defer s.Stop()
|
|
|
|
agent := c.Agent()
|
|
catalog := c.Catalog()
|
|
locality := &Locality{Region: "us-west-1", Zone: "us-west-1a"}
|
|
reg := &AgentServiceRegistration{
|
|
Name: "foo",
|
|
ID: "foo1",
|
|
Tags: []string{"bar"},
|
|
Locality: locality,
|
|
}
|
|
require.NoError(t, agent.ServiceRegister(reg))
|
|
defer agent.ServiceDeregister("foo1")
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Service("foo", "bar", nil)
|
|
require.NoError(r, err)
|
|
require.NotEqual(r, meta.LastIndex, 0)
|
|
require.Len(r, services, 1)
|
|
require.Equal(r, services[0].ServiceID, "foo1")
|
|
require.Equal(r, locality, services[0].ServiceLocality)
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogService_MultipleTags(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
|
conf.NodeName = "node123"
|
|
})
|
|
defer s.Stop()
|
|
|
|
agent := c.Agent()
|
|
catalog := c.Catalog()
|
|
|
|
// Make two services with a check
|
|
reg := &AgentServiceRegistration{
|
|
Name: "foo",
|
|
ID: "foo1",
|
|
Tags: []string{"bar"},
|
|
}
|
|
require.NoError(t, agent.ServiceRegister(reg))
|
|
defer agent.ServiceDeregister("foo1")
|
|
|
|
reg2 := &AgentServiceRegistration{
|
|
Name: "foo",
|
|
ID: "foo2",
|
|
Tags: []string{"bar", "v2"},
|
|
}
|
|
require.NoError(t, agent.ServiceRegister(reg2))
|
|
defer agent.ServiceDeregister("foo2")
|
|
|
|
// Test searching with one tag (two results)
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.ServiceMultipleTags("foo", []string{"bar"}, nil)
|
|
|
|
require.NoError(r, err)
|
|
require.NotEqual(r, meta.LastIndex, 0)
|
|
|
|
// Should be 2 services with the `bar` tag
|
|
require.Len(r, services, 2)
|
|
})
|
|
|
|
// Test searching with two tags (one result)
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.ServiceMultipleTags("foo", []string{"bar", "v2"}, nil)
|
|
|
|
require.NoError(r, err)
|
|
require.NotEqual(r, meta.LastIndex, 0)
|
|
|
|
// Should be exactly 1 service, named "foo2"
|
|
require.Len(r, services, 1)
|
|
require.Equal(r, services[0].ServiceID, "foo2")
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogService_NodeMetaFilter(t *testing.T) {
|
|
t.Parallel()
|
|
meta := map[string]string{"somekey": "somevalue"}
|
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
|
conf.NodeMeta = meta
|
|
})
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
retry.Run(t, func(r *retry.R) {
|
|
services, meta, err := catalog.Service("consul", "", &QueryOptions{NodeMeta: meta})
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogService_Filter(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
// this sets up the catalog entries with things we can filter on
|
|
testNodeServiceCheckRegistrations(t, c, "dc1")
|
|
|
|
catalog := c.Catalog()
|
|
|
|
services, _, err := catalog.Service("redis", "", &QueryOptions{Filter: "ServiceMeta.version == 1"})
|
|
require.NoError(t, err)
|
|
// finds it on both foo and bar nodes
|
|
require.Len(t, services, 2)
|
|
|
|
require.Condition(t, func() bool {
|
|
return (services[0].Node == "foo" && services[1].Node == "bar") ||
|
|
(services[0].Node == "bar" && services[1].Node == "foo")
|
|
})
|
|
|
|
services, _, err = catalog.Service("redis", "", &QueryOptions{Filter: "NodeMeta.os != windows"})
|
|
require.NoError(t, err)
|
|
// finds both service instances on foo
|
|
require.Len(t, services, 2)
|
|
require.Equal(t, "foo", services[0].Node)
|
|
require.Equal(t, "foo", services[1].Node)
|
|
|
|
services, _, err = catalog.Service("redis", "", &QueryOptions{Filter: "Address == `10.0.0.1`"})
|
|
require.NoError(t, err)
|
|
require.Empty(t, services)
|
|
|
|
}
|
|
|
|
func testUpstreams(t *testing.T) []Upstream {
|
|
return []Upstream{
|
|
{
|
|
DestinationName: "db",
|
|
LocalBindPort: 9191,
|
|
Config: map[string]interface{}{
|
|
"connect_timeout_ms": float64(1000),
|
|
},
|
|
},
|
|
{
|
|
DestinationType: UpstreamDestTypePreparedQuery,
|
|
DestinationName: "geo-cache",
|
|
LocalBindPort: 8181,
|
|
},
|
|
}
|
|
}
|
|
|
|
func testExpectUpstreamsWithDefaults(t *testing.T, upstreams []Upstream) []Upstream {
|
|
ups := make([]Upstream, len(upstreams))
|
|
for i := range upstreams {
|
|
ups[i] = upstreams[i]
|
|
// Fill in default fields we expect to have back explicitly in a response
|
|
if ups[i].DestinationType == "" {
|
|
ups[i].DestinationType = UpstreamDestTypeService
|
|
}
|
|
}
|
|
return ups
|
|
}
|
|
|
|
// testUnmanagedProxy returns a fully configured external proxy service suitable
|
|
// for checking that all the config fields make it back in a response intact.
|
|
func testUnmanagedProxy(t *testing.T) *AgentService {
|
|
return &AgentService{
|
|
Kind: ServiceKindConnectProxy,
|
|
Proxy: &AgentServiceConnectProxyConfig{
|
|
DestinationServiceName: "web",
|
|
DestinationServiceID: "web1",
|
|
LocalServiceAddress: "127.0.0.2",
|
|
LocalServicePort: 8080,
|
|
Upstreams: testUpstreams(t),
|
|
Mode: ProxyModeTransparent,
|
|
TransparentProxy: &TransparentProxyConfig{
|
|
OutboundListenerPort: 808,
|
|
},
|
|
},
|
|
ID: "web-proxy1",
|
|
Service: "web-proxy",
|
|
Port: 8001,
|
|
}
|
|
}
|
|
|
|
// testUnmanagedProxyRegistration returns a *CatalogRegistration for a fully
|
|
// configured external proxy.
|
|
func testUnmanagedProxyRegistration(t *testing.T) *CatalogRegistration {
|
|
return &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
Service: testUnmanagedProxy(t),
|
|
}
|
|
}
|
|
|
|
func TestAPI_CatalogConnect(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
// Register service and proxy instances to test against.
|
|
proxyReg := testUnmanagedProxyRegistration(t)
|
|
|
|
proxy := proxyReg.Service
|
|
|
|
service := &AgentService{
|
|
ID: proxyReg.Service.Proxy.DestinationServiceID,
|
|
Service: proxyReg.Service.Proxy.DestinationServiceName,
|
|
Port: 8000,
|
|
}
|
|
check := &AgentCheck{
|
|
Node: "foobar",
|
|
CheckID: "service:" + service.ID,
|
|
Name: "Redis health check",
|
|
Notes: "Script based health check",
|
|
Status: HealthPassing,
|
|
ServiceID: service.ID,
|
|
}
|
|
|
|
reg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
Service: service,
|
|
Check: check,
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
if _, err := catalog.Register(proxyReg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
services, meta, err := catalog.Connect(proxyReg.Service.Proxy.DestinationServiceName, "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
|
|
if services[0].ServicePort != proxy.Port {
|
|
r.Fatalf("Returned port should be for proxy: %v", services[0])
|
|
}
|
|
|
|
if !reflect.DeepEqual(services[0].ServiceProxy, proxy.Proxy) {
|
|
r.Fatalf("Returned proxy config should match:\nWant: %v\n Got: %v",
|
|
proxy.Proxy, services[0].ServiceProxy)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogConnectNative(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
// Register service and proxy instances to test against.
|
|
service := &AgentService{
|
|
ID: "redis1",
|
|
Service: "redis",
|
|
Port: 8000,
|
|
Connect: &AgentServiceConnect{Native: true},
|
|
}
|
|
check := &AgentCheck{
|
|
Node: "foobar",
|
|
CheckID: "service:redis1",
|
|
Name: "Redis health check",
|
|
Notes: "Script based health check",
|
|
Status: HealthPassing,
|
|
ServiceID: "redis1",
|
|
}
|
|
|
|
reg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
Service: service,
|
|
Check: check,
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
services, meta, err := catalog.Connect("redis", "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(services) == 0 {
|
|
r.Fatalf("Bad: %v", services)
|
|
}
|
|
|
|
if services[0].Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", services[0])
|
|
}
|
|
|
|
if services[0].ServicePort != service.Port {
|
|
r.Fatalf("Returned port should be for proxy: %v", services[0])
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogConnect_Filter(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
// this sets up the catalog entries with things we can filter on
|
|
testNodeServiceCheckRegistrations(t, c, "dc1")
|
|
|
|
catalog := c.Catalog()
|
|
|
|
services, _, err := catalog.Connect("web", "", &QueryOptions{Filter: "ServicePort == 443"})
|
|
require.NoError(t, err)
|
|
require.Len(t, services, 2)
|
|
require.Condition(t, func() bool {
|
|
return (services[0].Node == "bar" && services[1].Node == "baz") ||
|
|
(services[0].Node == "baz" && services[1].Node == "bar")
|
|
})
|
|
|
|
// All the web-connect services are native
|
|
services, _, err = catalog.Connect("web", "", &QueryOptions{Filter: "ServiceConnect.Native != true"})
|
|
require.NoError(t, err)
|
|
require.Empty(t, services)
|
|
}
|
|
|
|
func TestAPI_CatalogNode(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
name, err := c.Agent().NodeName()
|
|
require.NoError(t, err)
|
|
|
|
proxyReg := testUnmanagedProxyRegistration(t)
|
|
proxyReg.Node = name
|
|
proxyReg.SkipNodeUpdate = true
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
// Register a connect proxy to ensure all it's config fields are returned
|
|
_, err := catalog.Register(proxyReg, nil)
|
|
r.Check(err)
|
|
|
|
info, meta, err := catalog.Node(name, nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(info.Services) != 2 {
|
|
r.Fatalf("Bad: %v (len %d)", info, len(info.Services))
|
|
}
|
|
|
|
if _, ok := info.Node.TaggedAddresses["wan"]; !ok {
|
|
r.Fatalf("Bad: %v", info.Node.TaggedAddresses)
|
|
}
|
|
|
|
if info.Node.Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", info)
|
|
}
|
|
|
|
if _, ok := info.Services["web-proxy1"]; !ok {
|
|
r.Fatalf("Missing proxy service: %v", info.Services)
|
|
}
|
|
|
|
if !reflect.DeepEqual(proxyReg.Service.Proxy, info.Services["web-proxy1"].Proxy) {
|
|
r.Fatalf("Bad proxy config:\nwant %v\n got: %v", proxyReg.Service.Proxy,
|
|
info.Services["web-proxy"].Proxy)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogNodeServiceList(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
name, err := c.Agent().NodeName()
|
|
require.NoError(t, err)
|
|
|
|
proxyReg := testUnmanagedProxyRegistration(t)
|
|
proxyReg.Node = name
|
|
proxyReg.SkipNodeUpdate = true
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
// Register a connect proxy to ensure all it's config fields are returned
|
|
_, err := catalog.Register(proxyReg, nil)
|
|
r.Check(err)
|
|
|
|
info, meta, err := catalog.NodeServiceList(name, nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if meta.LastIndex == 0 {
|
|
r.Fatalf("Bad: %v", meta)
|
|
}
|
|
|
|
if len(info.Services) != 2 {
|
|
r.Fatalf("Bad: %v (len %d)", info, len(info.Services))
|
|
}
|
|
|
|
if _, ok := info.Node.TaggedAddresses["wan"]; !ok {
|
|
r.Fatalf("Bad: %v", info.Node.TaggedAddresses)
|
|
}
|
|
|
|
if info.Node.Datacenter != "dc1" {
|
|
r.Fatalf("Bad datacenter: %v", info)
|
|
}
|
|
|
|
var proxySvc *AgentService
|
|
for _, svc := range info.Services {
|
|
if svc.ID == "web-proxy1" {
|
|
proxySvc = svc
|
|
break
|
|
}
|
|
}
|
|
|
|
if proxySvc == nil {
|
|
r.Fatalf("Missing proxy service: %v", info.Services)
|
|
return
|
|
}
|
|
|
|
if !reflect.DeepEqual(proxyReg.Service.Proxy, proxySvc.Proxy) {
|
|
r.Fatalf("Bad proxy config:\nwant %v\n got: %v", proxyReg.Service.Proxy,
|
|
proxySvc.Proxy)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogNode_Filter(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
// this sets up the catalog entries with things we can filter on
|
|
testNodeServiceCheckRegistrations(t, c, "dc1")
|
|
|
|
catalog := c.Catalog()
|
|
|
|
// should have only 1 matching service
|
|
info, _, err := catalog.Node("bar", &QueryOptions{Filter: "connect in Tags"})
|
|
require.NoError(t, err)
|
|
require.Len(t, info.Services, 1)
|
|
require.Contains(t, info.Services, "webV1")
|
|
require.Equal(t, "web", info.Services["webV1"].Service)
|
|
|
|
// should get two services for the node
|
|
info, _, err = catalog.Node("baz", &QueryOptions{Filter: "connect in Tags"})
|
|
require.NoError(t, err)
|
|
require.Len(t, info.Services, 2)
|
|
}
|
|
|
|
func TestAPI_CatalogRegistration(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
catalog := c.Catalog()
|
|
|
|
service := &AgentService{
|
|
ID: "redis1",
|
|
Service: "redis",
|
|
Tags: []string{"primary", "v1"},
|
|
Port: 8000,
|
|
}
|
|
|
|
check := &AgentCheck{
|
|
Node: "foobar",
|
|
CheckID: "service:redis1-a",
|
|
Name: "Redis health check",
|
|
Notes: "Script based health check",
|
|
Status: HealthPassing,
|
|
ServiceID: "redis1",
|
|
}
|
|
|
|
checks := HealthChecks{
|
|
&HealthCheck{
|
|
Node: "foobar",
|
|
CheckID: "service:redis1-b",
|
|
Name: "Redis health check",
|
|
Notes: "Script based health check",
|
|
Status: HealthPassing,
|
|
ServiceID: "redis1",
|
|
},
|
|
}
|
|
|
|
reg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
Service: service,
|
|
// Specifying both Check and Checks is accepted by Consul
|
|
Check: check,
|
|
Checks: checks,
|
|
}
|
|
// Register a connect proxy for that service too
|
|
proxy := &AgentService{
|
|
ID: "redis-proxy1",
|
|
Service: "redis-proxy",
|
|
Port: 8001,
|
|
Kind: ServiceKindConnectProxy,
|
|
Proxy: &AgentServiceConnectProxyConfig{
|
|
DestinationServiceName: service.Service,
|
|
},
|
|
}
|
|
proxyReg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
Service: proxy,
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
if _, err := catalog.Register(proxyReg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
node, _, err := catalog.Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if _, ok := node.Services["redis1"]; !ok {
|
|
r.Fatal("missing service: redis1")
|
|
}
|
|
|
|
if _, ok := node.Services["redis-proxy1"]; !ok {
|
|
r.Fatal("missing service: redis-proxy1")
|
|
}
|
|
|
|
health, _, err := c.Health().Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if health[0].CheckID != "service:redis1-a" {
|
|
r.Fatal("missing checkid service:redis1-a")
|
|
}
|
|
|
|
if health[1].CheckID != "service:redis1-b" {
|
|
r.Fatal("missing checkid service:redis1-b")
|
|
}
|
|
|
|
if v, ok := node.Node.Meta["somekey"]; !ok || v != "somevalue" {
|
|
r.Fatal("missing node meta pair somekey:somevalue")
|
|
}
|
|
})
|
|
|
|
// Test catalog deregistration of the previously registered service
|
|
dereg := &CatalogDeregistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
ServiceID: "redis1",
|
|
}
|
|
|
|
// ... and proxy
|
|
deregProxy := &CatalogDeregistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
ServiceID: "redis-proxy1",
|
|
}
|
|
|
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
|
t.Fatalf("err: %v", err)
|
|
}
|
|
|
|
if _, err := catalog.Deregister(deregProxy, nil); err != nil {
|
|
t.Fatalf("err: %v", err)
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
node, _, err := catalog.Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if _, ok := node.Services["redis1"]; ok {
|
|
r.Fatal("ServiceID:redis1 is not deregistered")
|
|
}
|
|
|
|
if _, ok := node.Services["redis-proxy1"]; ok {
|
|
r.Fatal("ServiceID:redis-proxy1 is not deregistered")
|
|
}
|
|
})
|
|
|
|
// Test deregistration of the previously registered check
|
|
dereg = &CatalogDeregistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
CheckID: "service:redis1-a",
|
|
}
|
|
|
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
|
t.Fatalf("err: %v", err)
|
|
}
|
|
|
|
dereg = &CatalogDeregistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
CheckID: "service:redis1-b",
|
|
}
|
|
|
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
|
t.Fatalf("err: %v", err)
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
health, _, err := c.Health().Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if len(health) != 0 {
|
|
r.Fatal("CheckID:service:redis1-a or CheckID:service:redis1-a is not deregistered")
|
|
}
|
|
})
|
|
|
|
// Test node deregistration of the previously registered node
|
|
dereg = &CatalogDeregistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
}
|
|
|
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
|
t.Fatalf("err: %v", err)
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
node, _, err := catalog.Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if node != nil {
|
|
r.Fatalf("node is not deregistered: %v", node)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogEnableTagOverride(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
s.WaitForSerfCheck(t)
|
|
|
|
catalog := c.Catalog()
|
|
|
|
service := &AgentService{
|
|
ID: "redis1",
|
|
Service: "redis",
|
|
Tags: []string{"primary", "v1"},
|
|
Port: 8000,
|
|
}
|
|
|
|
reg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "foobar",
|
|
Address: "192.168.10.10",
|
|
Service: service,
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
node, _, err := catalog.Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if _, ok := node.Services["redis1"]; !ok {
|
|
r.Fatal("missing service: redis1")
|
|
}
|
|
if node.Services["redis1"].EnableTagOverride != false {
|
|
r.Fatal("tag override set")
|
|
}
|
|
|
|
services, _, err := catalog.Service("redis", "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if len(services) < 1 || services[0].ServiceName != "redis" {
|
|
r.Fatal("missing service: redis")
|
|
}
|
|
if services[0].ServiceEnableTagOverride != false {
|
|
r.Fatal("tag override set")
|
|
}
|
|
})
|
|
|
|
service.EnableTagOverride = true
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
node, _, err := catalog.Node("foobar", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if _, ok := node.Services["redis1"]; !ok {
|
|
r.Fatal("missing service: redis1")
|
|
}
|
|
if node.Services["redis1"].EnableTagOverride != true {
|
|
r.Fatal("tag override not set")
|
|
}
|
|
|
|
services, _, err := catalog.Service("redis", "", nil)
|
|
if err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
|
|
if len(services) < 1 || services[0].ServiceName != "redis" {
|
|
r.Fatal("missing service: redis")
|
|
}
|
|
if services[0].ServiceEnableTagOverride != true {
|
|
r.Fatal("tag override not set")
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
s.WaitForSerfCheck(t)
|
|
|
|
catalog := c.Catalog()
|
|
|
|
// Register a service to be covered by a wildcard in the config entry
|
|
svc := &AgentService{
|
|
ID: "redis",
|
|
Service: "redis",
|
|
Port: 6379,
|
|
}
|
|
reg := &CatalogRegistration{
|
|
Datacenter: "dc1",
|
|
Node: "bar",
|
|
Address: "192.168.10.11",
|
|
Service: svc,
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
if _, err := catalog.Register(reg, nil); err != nil {
|
|
r.Fatal(err)
|
|
}
|
|
})
|
|
|
|
entries := c.ConfigEntries()
|
|
|
|
// Associate the gateway and api/redis services
|
|
gwEntry := TerminatingGatewayConfigEntry{
|
|
Kind: TerminatingGateway,
|
|
Name: "terminating",
|
|
Services: []LinkedService{
|
|
{
|
|
Name: "api",
|
|
CAFile: "api/ca.crt",
|
|
CertFile: "api/client.crt",
|
|
KeyFile: "api/client.key",
|
|
SNI: "my-domain",
|
|
},
|
|
{
|
|
Name: "*",
|
|
CAFile: "ca.crt",
|
|
CertFile: "client.crt",
|
|
KeyFile: "client.key",
|
|
SNI: "my-alt-domain",
|
|
},
|
|
},
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
if success, _, err := entries.Set(&gwEntry, nil); err != nil || !success {
|
|
r.Fatal(err)
|
|
}
|
|
})
|
|
|
|
expect := []*GatewayService{
|
|
{
|
|
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
GatewayKind: ServiceKindTerminatingGateway,
|
|
CAFile: "api/ca.crt",
|
|
CertFile: "api/client.crt",
|
|
KeyFile: "api/client.key",
|
|
SNI: "my-domain",
|
|
},
|
|
{
|
|
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
GatewayKind: ServiceKindTerminatingGateway,
|
|
CAFile: "ca.crt",
|
|
CertFile: "client.crt",
|
|
KeyFile: "client.key",
|
|
SNI: "my-alt-domain",
|
|
FromWildcard: true,
|
|
},
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
resp, _, err := catalog.GatewayServices("terminating", nil)
|
|
assert.NoError(r, err)
|
|
assert.Equal(r, expect, resp)
|
|
})
|
|
}
|
|
|
|
func TestAPI_CatalogGatewayServices_Ingress(t *testing.T) {
|
|
t.Parallel()
|
|
c, s := makeClient(t)
|
|
defer s.Stop()
|
|
|
|
s.WaitForSerfCheck(t)
|
|
|
|
entries := c.ConfigEntries()
|
|
|
|
// Associate the gateway and api/redis services
|
|
gwEntry := IngressGatewayConfigEntry{
|
|
Kind: "ingress-gateway",
|
|
Name: "ingress",
|
|
Listeners: []IngressListener{
|
|
{
|
|
Port: 8888,
|
|
Services: []IngressService{
|
|
{
|
|
Name: "api",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Port: 9999,
|
|
Services: []IngressService{
|
|
{
|
|
Name: "redis",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
if success, _, err := entries.Set(&gwEntry, nil); err != nil || !success {
|
|
r.Fatal(err)
|
|
}
|
|
})
|
|
|
|
catalog := c.Catalog()
|
|
|
|
expect := []*GatewayService{
|
|
{
|
|
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
GatewayKind: ServiceKindIngressGateway,
|
|
Protocol: "tcp",
|
|
Port: 8888,
|
|
},
|
|
{
|
|
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
|
|
GatewayKind: ServiceKindIngressGateway,
|
|
Protocol: "tcp",
|
|
Port: 9999,
|
|
},
|
|
}
|
|
retry.Run(t, func(r *retry.R) {
|
|
resp, _, err := catalog.GatewayServices("ingress", nil)
|
|
assert.NoError(r, err)
|
|
assert.Equal(r, expect, resp)
|
|
})
|
|
}
|