2014-04-23 19:57:06 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2014-04-28 22:09:46 +00:00
|
|
|
"fmt"
|
2014-04-23 19:57:06 +00:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2019-04-16 16:00:15 +00:00
|
|
|
"net/url"
|
2014-04-23 19:57:06 +00:00
|
|
|
"path/filepath"
|
|
|
|
"testing"
|
2015-10-15 21:45:10 +00:00
|
|
|
|
2020-09-22 18:34:09 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2020-04-08 18:37:24 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2017-09-25 18:40:42 +00:00
|
|
|
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
2020-05-11 17:35:17 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-04-16 16:00:15 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2014-04-23 19:57:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestUiIndex(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-11-30 19:24:39 +00:00
|
|
|
// Make a test dir to serve UI files
|
2017-05-12 13:41:13 +00:00
|
|
|
uiDir := testutil.TempDir(t, "consul")
|
2015-11-30 19:24:39 +00:00
|
|
|
|
|
|
|
// Make the server
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2020-10-01 11:26:19 +00:00
|
|
|
ui_config {
|
|
|
|
dir = "`+uiDir+`"
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-27 15:49:14 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-04-23 19:57:06 +00:00
|
|
|
|
|
|
|
// Create file
|
2020-09-10 16:25:56 +00:00
|
|
|
path := filepath.Join(a.Config.UIConfig.Dir, "my-file")
|
2020-10-01 11:26:19 +00:00
|
|
|
if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil {
|
2014-04-23 20:10:18 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-04-23 19:57:06 +00:00
|
|
|
|
2020-10-01 11:26:19 +00:00
|
|
|
// Request the custom file
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/ui/my-file", nil)
|
2014-04-28 21:52:30 +00:00
|
|
|
req.URL.Scheme = "http"
|
2020-07-02 17:31:47 +00:00
|
|
|
req.URL.Host = a.HTTPAddr()
|
2014-04-23 19:57:06 +00:00
|
|
|
|
|
|
|
// Make the request
|
2015-10-22 14:47:50 +00:00
|
|
|
client := cleanhttp.DefaultClient()
|
2015-10-15 21:45:10 +00:00
|
|
|
resp, err := client.Do(req)
|
2014-04-28 21:52:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2020-04-06 13:11:04 +00:00
|
|
|
defer resp.Body.Close()
|
2014-04-23 19:57:06 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Verify the response
|
2014-04-28 21:52:30 +00:00
|
|
|
if resp.StatusCode != 200 {
|
2014-04-23 19:57:06 +00:00
|
|
|
t.Fatalf("bad: %v", resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the body
|
|
|
|
out := bytes.NewBuffer(nil)
|
|
|
|
io.Copy(out, resp.Body)
|
2019-07-20 13:37:19 +00:00
|
|
|
if out.String() != "test" {
|
2014-04-23 19:57:06 +00:00
|
|
|
t.Fatalf("bad: %s", out.Bytes())
|
|
|
|
}
|
|
|
|
}
|
2014-04-28 21:52:30 +00:00
|
|
|
|
|
|
|
func TestUiNodes(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-04-28 21:52:30 +00:00
|
|
|
|
2015-11-15 05:05:37 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-11-15 05:05:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1", nil)
|
2014-04-28 21:52:30 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.UINodes(resp, req)
|
2014-04-28 21:52:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2015-11-15 05:05:37 +00:00
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
2014-04-28 21:52:30 +00:00
|
|
|
nodes := obj.(structs.NodeDump)
|
2015-11-15 05:05:37 +00:00
|
|
|
if len(nodes) != 2 ||
|
2017-05-21 07:11:09 +00:00
|
|
|
nodes[0].Node != a.Config.NodeName ||
|
2015-11-15 05:05:37 +00:00
|
|
|
nodes[0].Services == nil || len(nodes[0].Services) != 1 ||
|
|
|
|
nodes[0].Checks == nil || len(nodes[0].Checks) != 1 ||
|
|
|
|
nodes[1].Node != "test" ||
|
|
|
|
nodes[1].Services == nil || len(nodes[1].Services) != 0 ||
|
|
|
|
nodes[1].Checks == nil || len(nodes[1].Checks) != 0 {
|
2014-04-28 21:52:30 +00:00
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
2014-04-28 22:09:46 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestUiNodes_Filter(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"os": "linux",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test2",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"os": "macos",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1?filter="+url.QueryEscape("Meta.os == linux"), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UINodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
|
|
|
nodes := obj.(structs.NodeDump)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, nodes[0].Node, "test")
|
|
|
|
require.Empty(t, nodes[0].Services)
|
|
|
|
require.Empty(t, nodes[0].Checks)
|
|
|
|
}
|
|
|
|
|
2014-04-28 22:09:46 +00:00
|
|
|
func TestUiNodeInfo(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-27 15:49:14 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-04-28 22:09:46 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/internal/ui/node/%s", a.Config.NodeName), nil)
|
2014-04-28 22:09:46 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.UINodeInfo(resp, req)
|
2020-08-13 22:39:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, resp.Code, http.StatusOK)
|
2014-04-28 22:09:46 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 node for the server
|
|
|
|
node := obj.(*structs.NodeInfo)
|
2017-05-21 07:11:09 +00:00
|
|
|
if node.Node != a.Config.NodeName {
|
2014-04-28 22:09:46 +00:00
|
|
|
t.Fatalf("bad: %v", node)
|
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-11-15 05:05:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ = http.NewRequest("GET", "/v1/internal/ui/node/test", nil)
|
2015-11-15 05:05:37 +00:00
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.UINodeInfo(resp, req)
|
2015-11-15 05:05:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be non-nil empty lists for services and checks
|
|
|
|
node = obj.(*structs.NodeInfo)
|
|
|
|
if node.Node != "test" ||
|
|
|
|
node.Services == nil || len(node.Services) != 0 ||
|
|
|
|
node.Checks == nil || len(node.Checks) != 0 {
|
|
|
|
t.Fatalf("bad: %v", node)
|
|
|
|
}
|
2014-04-28 22:09:46 +00:00
|
|
|
}
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestUiServices(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
requests := []*structs.RegisterRequest{
|
|
|
|
// register foo node
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
Name: "node check",
|
|
|
|
Status: api.HealthPassing,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
//register api service on node foo
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Service: "api",
|
2020-09-30 14:23:19 +00:00
|
|
|
ID: "api-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
2014-04-28 22:52:37 +00:00
|
|
|
&structs.HealthCheck{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "foo",
|
|
|
|
Name: "api svc check",
|
|
|
|
ServiceName: "api",
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceID: "api-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Status: api.HealthWarning,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
// register api-proxy svc on node foo
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
2020-09-30 14:23:19 +00:00
|
|
|
Service: "api-proxy",
|
|
|
|
ID: "api-proxy-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{},
|
|
|
|
Meta: map[string]string{metaExternalSource: "k8s"},
|
|
|
|
Port: 1234,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "api",
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
2014-04-28 22:52:37 +00:00
|
|
|
&structs.HealthCheck{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "foo",
|
2020-09-30 14:23:19 +00:00
|
|
|
Name: "api proxy listening",
|
|
|
|
ServiceName: "api-proxy",
|
|
|
|
ServiceID: "api-proxy-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
// register bar node with service web
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
2020-09-30 14:23:19 +00:00
|
|
|
Kind: structs.ServiceKindTypical,
|
2019-04-16 16:00:15 +00:00
|
|
|
Service: "web",
|
2020-09-30 14:23:19 +00:00
|
|
|
ID: "web-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{},
|
|
|
|
Meta: map[string]string{metaExternalSource: "k8s"},
|
|
|
|
Port: 1234,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
Checks: []*structs.HealthCheck{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "bar",
|
|
|
|
Name: "web svc check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-28 22:52:37 +00:00
|
|
|
ServiceName: "web",
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceID: "web-1",
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
// register zip node with service cache
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
for _, args := range requests {
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
2014-04-28 22:52:37 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 16:21:11 +00:00
|
|
|
// Register a terminating gateway associated with api and cache
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "api",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "cache",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
|
|
|
|
// Web should not show up as ConnectedWithGateway since this one does not have any instances
|
|
|
|
args = &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "other-terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req = structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Run("No Filter", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/services/dc1", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServices(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceListingSummary)
|
|
|
|
require.Len(t, summary, 6)
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.externalSourceSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
expected := []*ServiceListingSummary{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithProxy: true,
|
|
|
|
ConnectedWithGateway: true,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Name: "api-proxy",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
ExternalSources: []string{"k8s"},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "cache",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"zip"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithGateway: true,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "consul",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{a.Config.NodeName},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
GatewayConfig: GatewayConfig{AssociatedServiceCount: 2},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"bar"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
ExternalSources: []string{"k8s"},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expected, summary)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Filtered", func(t *testing.T) {
|
|
|
|
filterQuery := url.QueryEscape("Service.Service == web or Service.Service == api")
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/services?filter="+filterQuery, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServices(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceListingSummary)
|
2019-04-16 16:00:15 +00:00
|
|
|
require.Len(t, summary, 2)
|
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.externalSourceSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
expected := []*ServiceListingSummary{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
|
|
|
ConnectedWithProxy: false,
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithGateway: false,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"bar"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
ExternalSources: []string{"k8s"},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expected, summary)
|
|
|
|
})
|
2014-04-28 22:52:37 +00:00
|
|
|
}
|
2020-05-11 17:35:17 +00:00
|
|
|
|
|
|
|
func TestUIGatewayServiceNodes_Terminating(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register terminating gateway and a service that will be associated with it
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "terminating connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "terminating-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"primary"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db-warning",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db2",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"backup"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db2-passing",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db2",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
// Register terminating-gateway config entry, linking it to db and redis (does not exist)
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "db",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
CAFile: "/etc/certs/ca.pem",
|
|
|
|
CertFile: "/etc/certs/cert.pem",
|
|
|
|
KeyFile: "/etc/certs/key.pem",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-services-nodes/terminating-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayServicesNodes(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceSummary)
|
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.externalSourceSet = nil
|
|
|
|
sum.checks = nil
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
expect := []*ServiceSummary{
|
|
|
|
{
|
2020-05-12 18:48:20 +00:00
|
|
|
Name: "redis",
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "db",
|
2020-09-30 14:23:19 +00:00
|
|
|
Datacenter: "dc1",
|
2020-05-11 17:35:17 +00:00
|
|
|
Tags: []string{"backup", "primary"},
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2020-05-12 18:48:20 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
2020-09-30 14:23:19 +00:00
|
|
|
assert.ElementsMatch(t, expect, summary)
|
2020-05-11 17:35:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUIGatewayServiceNodes_Ingress(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2020-06-22 19:14:12 +00:00
|
|
|
a := NewTestAgent(t, `alt_domain = "alt.consul."`)
|
2020-05-11 17:35:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register ingress gateway and a service that will be associated with it
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
Port: 8443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"primary"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db-warning",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db2",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"backup"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db2-passing",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db2",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
2020-06-22 19:14:12 +00:00
|
|
|
// Set web protocol to http
|
|
|
|
svcDefaultsReq := structs.ConfigEntryRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceConfigEntry{
|
|
|
|
Name: "web",
|
|
|
|
Protocol: "http",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var configOutput bool
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &svcDefaultsReq, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
// Register ingress-gateway config entry, linking it to db and redis (does not exist)
|
|
|
|
args := &structs.IngressGatewayConfigEntry{
|
|
|
|
Name: "ingress-gateway",
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 8888,
|
|
|
|
Protocol: "tcp",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "db",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Port: 8080,
|
2020-06-22 19:14:12 +00:00
|
|
|
Protocol: "http",
|
2020-05-11 17:35:17 +00:00
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-06-22 19:14:12 +00:00
|
|
|
{
|
|
|
|
Port: 8081,
|
|
|
|
Protocol: "http",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
Hosts: []string{"*.test.example.com"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-services-nodes/ingress-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayServicesNodes(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2020-06-22 19:14:12 +00:00
|
|
|
// Construct expected addresses so that differences between OSS/Ent are handled by code
|
|
|
|
webDNS := serviceIngressDNSName("web", "dc1", "consul.", structs.DefaultEnterpriseMeta())
|
|
|
|
webDNSAlt := serviceIngressDNSName("web", "dc1", "alt.consul.", structs.DefaultEnterpriseMeta())
|
|
|
|
dbDNS := serviceIngressDNSName("db", "dc1", "consul.", structs.DefaultEnterpriseMeta())
|
|
|
|
dbDNSAlt := serviceIngressDNSName("db", "dc1", "alt.consul.", structs.DefaultEnterpriseMeta())
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
dump := obj.([]*ServiceSummary)
|
|
|
|
expect := []*ServiceSummary{
|
|
|
|
{
|
2020-06-22 19:14:12 +00:00
|
|
|
Name: "web",
|
|
|
|
GatewayConfig: GatewayConfig{
|
|
|
|
Addresses: []string{
|
|
|
|
fmt.Sprintf("%s:8080", webDNS),
|
|
|
|
fmt.Sprintf("%s:8080", webDNSAlt),
|
|
|
|
"*.test.example.com:8081",
|
|
|
|
},
|
|
|
|
},
|
2020-05-12 18:48:20 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "db",
|
2020-09-30 14:23:19 +00:00
|
|
|
Datacenter: "dc1",
|
2020-05-11 17:35:17 +00:00
|
|
|
Tags: []string{"backup", "primary"},
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2020-06-22 19:14:12 +00:00
|
|
|
GatewayConfig: GatewayConfig{
|
|
|
|
Addresses: []string{
|
|
|
|
fmt.Sprintf("%s:8888", dbDNS),
|
|
|
|
fmt.Sprintf("%s:8888", dbDNSAlt),
|
|
|
|
},
|
|
|
|
},
|
2020-05-12 18:48:20 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
2020-06-22 19:14:12 +00:00
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range dump {
|
|
|
|
sum.GatewayConfig.addressesSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2020-06-22 19:14:12 +00:00
|
|
|
}
|
2020-05-11 17:35:17 +00:00
|
|
|
assert.ElementsMatch(t, expect, dump)
|
|
|
|
}
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
func TestUIGatewayIntentions(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register terminating gateway and config entry linking it to postgres + redis
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "terminating connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "terminating-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &arg, ®Output))
|
|
|
|
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "postgres",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
CAFile: "/etc/certs/ca.pem",
|
|
|
|
CertFile: "/etc/certs/cert.pem",
|
|
|
|
KeyFile: "/etc/certs/key.pem",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
|
|
|
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &configOutput))
|
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create some symmetric intentions to ensure we are only matching on destination
|
|
|
|
{
|
|
|
|
for _, v := range []string{"*", "mysql", "redis", "postgres"} {
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceName = "api"
|
|
|
|
req.Intention.DestinationName = v
|
|
|
|
|
|
|
|
var reply string
|
|
|
|
assert.NoError(t, a.RPC("Intention.Apply", &req, &reply))
|
|
|
|
|
|
|
|
req = structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceName = v
|
|
|
|
req.Intention.DestinationName = "api"
|
|
|
|
assert.NoError(t, a.RPC("Intention.Apply", &req, &reply))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request intentions matching the gateway named "terminating-gateway"
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-intentions/terminating-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayIntentions(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
intentions := obj.(structs.Intentions)
|
|
|
|
assert.Len(t, intentions, 3)
|
|
|
|
|
|
|
|
// Only intentions with linked services as a destination should be returned, and wildcard matches should be deduped
|
|
|
|
expected := []string{"postgres", "*", "redis"}
|
|
|
|
actual := []string{
|
|
|
|
intentions[0].DestinationName,
|
|
|
|
intentions[1].DestinationName,
|
|
|
|
intentions[2].DestinationName,
|
|
|
|
}
|
|
|
|
assert.ElementsMatch(t, expected, actual)
|
|
|
|
}
|
2020-09-22 18:34:09 +00:00
|
|
|
|
|
|
|
func TestUIEndpoint_modifySummaryForGatewayService_UseRequestedDCInsteadOfConfigured(t *testing.T) {
|
|
|
|
dc := "dc2"
|
|
|
|
cfg := config.RuntimeConfig{Datacenter: "dc1", DNSDomain: "consul"}
|
|
|
|
sum := ServiceSummary{GatewayConfig: GatewayConfig{}}
|
|
|
|
gwsvc := structs.GatewayService{Service: structs.ServiceName{Name: "test"}, Port: 42}
|
|
|
|
modifySummaryForGatewayService(&cfg, dc, &sum, &gwsvc)
|
2020-09-25 15:31:42 +00:00
|
|
|
expected := serviceCanonicalDNSName("test", "ingress", "dc2", "consul", nil) + ":42"
|
|
|
|
require.Equal(t, expected, sum.GatewayConfig.Addresses[0])
|
2020-09-22 18:34:09 +00:00
|
|
|
}
|
2020-09-30 14:23:19 +00:00
|
|
|
|
|
|
|
func TestUIServiceTopology(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register terminating gateway and config entry linking it to postgres + redis
|
|
|
|
{
|
|
|
|
registrations := map[string]*structs.RegisterRequest{
|
|
|
|
"Node foo": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:alive",
|
|
|
|
Name: "foo-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service api on foo": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Port: 9090,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api",
|
|
|
|
Name: "api-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service api-proxy": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "api-proxy",
|
|
|
|
Service: "api-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "api",
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationName: "web",
|
|
|
|
LocalBindPort: 8080,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api-proxy",
|
|
|
|
Name: "api proxy listening",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "api-proxy",
|
|
|
|
ServiceName: "api-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:alive",
|
|
|
|
Name: "bar-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web on bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 80,
|
|
|
|
Address: "198.18.1.20",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:web",
|
|
|
|
Name: "web-liveness",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "web",
|
|
|
|
ServiceName: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web-proxy on bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.20",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationName: "redis",
|
|
|
|
LocalBindPort: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:web-proxy",
|
|
|
|
Name: "web proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "web-proxy",
|
|
|
|
ServiceName: "web-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.4",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:alive",
|
|
|
|
Name: "baz-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web on baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 80,
|
|
|
|
Address: "198.18.1.40",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:web",
|
|
|
|
Name: "web-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "web",
|
|
|
|
ServiceName: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web-proxy on baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.40",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationName: "redis",
|
|
|
|
LocalBindPort: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:web-proxy",
|
|
|
|
Name: "web proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "web-proxy",
|
|
|
|
ServiceName: "web-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
Address: "127.0.0.5",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:alive",
|
|
|
|
Name: "zip-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service redis on zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 6379,
|
|
|
|
Address: "198.18.1.60",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:redis",
|
|
|
|
Name: "redis-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service redis-proxy on zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "redis-proxy",
|
|
|
|
Service: "redis-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.60",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "redis",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:redis-proxy",
|
|
|
|
Name: "redis proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "redis-proxy",
|
|
|
|
ServiceName: "redis-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, args := range registrations {
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("api", func(t *testing.T) {
|
|
|
|
// Request topology for api
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/api", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServiceTopology(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
expect := ServiceTopology{
|
|
|
|
Upstreams: []*ServiceSummary{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
|
|
|
}
|
|
|
|
result := obj.(ServiceTopology)
|
|
|
|
|
|
|
|
// Internal accounting that is not returned in JSON response
|
|
|
|
for _, u := range result.Upstreams {
|
|
|
|
u.externalSourceSet = nil
|
|
|
|
u.checks = nil
|
|
|
|
}
|
|
|
|
require.Equal(t, expect, result)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("web", func(t *testing.T) {
|
|
|
|
// Request topology for web
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/web", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServiceTopology(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
expect := ServiceTopology{
|
|
|
|
Upstreams: []*ServiceSummary{
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"zip"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Downstreams: []*ServiceSummary{
|
|
|
|
{
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
|
|
|
}
|
|
|
|
result := obj.(ServiceTopology)
|
|
|
|
|
|
|
|
// Internal accounting that is not returned in JSON response
|
|
|
|
for _, u := range result.Upstreams {
|
|
|
|
u.externalSourceSet = nil
|
|
|
|
u.checks = nil
|
|
|
|
}
|
|
|
|
for _, d := range result.Downstreams {
|
|
|
|
d.externalSourceSet = nil
|
|
|
|
d.checks = nil
|
|
|
|
}
|
|
|
|
require.Equal(t, expect, result)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("redis", func(t *testing.T) {
|
|
|
|
// Request topology for redis
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/redis", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServiceTopology(resp, req)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
expect := ServiceTopology{
|
|
|
|
Downstreams: []*ServiceSummary{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
|
|
|
}
|
|
|
|
result := obj.(ServiceTopology)
|
|
|
|
|
|
|
|
// Internal accounting that is not returned in JSON response
|
|
|
|
for _, d := range result.Downstreams {
|
|
|
|
d.externalSourceSet = nil
|
|
|
|
d.checks = nil
|
|
|
|
}
|
|
|
|
require.Equal(t, expect, result)
|
|
|
|
})
|
|
|
|
}
|