2014-01-06 21:21:48 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2016-12-13 06:09:35 +00:00
|
|
|
"bytes"
|
2018-04-30 21:23:49 +00:00
|
|
|
"crypto/tls"
|
2018-03-21 17:55:39 +00:00
|
|
|
"crypto/x509"
|
2018-09-27 13:33:12 +00:00
|
|
|
"encoding/json"
|
2014-01-06 21:21:48 +00:00
|
|
|
"fmt"
|
2016-11-16 21:45:26 +00:00
|
|
|
"io"
|
2016-11-28 21:08:31 +00:00
|
|
|
"io/ioutil"
|
2014-01-06 21:21:48 +00:00
|
|
|
"net/http"
|
2015-01-15 09:17:35 +00:00
|
|
|
"net/http/httptest"
|
2019-04-16 16:00:15 +00:00
|
|
|
"net/url"
|
2014-01-06 21:21:48 +00:00
|
|
|
"os"
|
2015-07-30 19:02:37 +00:00
|
|
|
"reflect"
|
2019-09-02 15:38:29 +00:00
|
|
|
"sort"
|
2019-09-26 16:54:14 +00:00
|
|
|
"strconv"
|
2016-03-03 01:08:06 +00:00
|
|
|
"strings"
|
2014-01-06 21:21:48 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2015-05-11 23:48:10 +00:00
|
|
|
|
2017-08-23 14:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2017-09-25 18:40:42 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2018-03-21 17:20:35 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2018-10-17 20:20:35 +00:00
|
|
|
"github.com/hashicorp/consul/agent/debug"
|
2018-09-27 13:33:12 +00:00
|
|
|
"github.com/hashicorp/consul/agent/local"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2019-10-04 18:37:34 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2019-02-27 19:28:31 +00:00
|
|
|
tokenStore "github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2018-06-14 12:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2016-11-16 21:45:26 +00:00
|
|
|
"github.com/hashicorp/consul/logger"
|
2019-12-06 19:01:34 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-16 16:00:15 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2016-06-07 20:24:51 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2019-02-20 16:23:38 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2015-05-11 23:48:10 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2018-03-08 18:54:05 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-04-17 12:29:02 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2014-01-06 21:21:48 +00:00
|
|
|
)
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func makeReadOnlyAgentACL(t *testing.T, srv *HTTPServer) string {
|
2017-05-09 16:58:12 +00:00
|
|
|
args := map[string]interface{}{
|
2016-12-14 17:33:57 +00:00
|
|
|
"Name": "User Token",
|
|
|
|
"Type": "client",
|
2017-05-09 16:58:12 +00:00
|
|
|
"Rules": `agent "" { policy = "read" }`,
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
2017-05-09 16:58:12 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
|
2016-12-14 17:33:57 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := srv.ACLCreate(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
aclResp := obj.(aclCreateResponse)
|
|
|
|
return aclResp.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_Services(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-21 01:06:44 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-21 01:06:44 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2018-05-12 10:27:44 +00:00
|
|
|
Meta: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
Port: 5000,
|
2014-01-21 01:06:44 +00:00
|
|
|
}
|
2018-04-20 13:24:24 +00:00
|
|
|
require.NoError(t, a.State.AddService(srv1, ""))
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
2014-01-21 01:06:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
2018-04-20 13:24:24 +00:00
|
|
|
val := obj.(map[string]*api.AgentService)
|
|
|
|
assert.Lenf(t, val, 1, "bad services: %v", obj)
|
|
|
|
assert.Equal(t, 5000, val["mysql"].Port)
|
2018-05-12 10:27:44 +00:00
|
|
|
assert.Equal(t, srv1.Meta, val["mysql"].Meta)
|
2014-01-21 01:06:44 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestAgent_ServicesFiltered(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Meta: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.State.AddService(srv1, ""))
|
|
|
|
|
|
|
|
// Add another service
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"kv"},
|
|
|
|
Meta: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
Port: 1234,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.State.AddService(srv2, ""))
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("foo in Meta"), nil)
|
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
val := obj.(map[string]*api.AgentService)
|
|
|
|
require.Len(t, val, 2)
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("kv in Tags"), nil)
|
|
|
|
obj, err = a.srv.AgentServices(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
val = obj.(map[string]*api.AgentService)
|
|
|
|
require.Len(t, val, 1)
|
|
|
|
}
|
|
|
|
|
2018-03-12 20:05:06 +00:00
|
|
|
// This tests that the agent services endpoint (/v1/agent/services) returns
|
|
|
|
// Connect proxies.
|
2018-04-20 13:24:24 +00:00
|
|
|
func TestAgent_Services_ExternalConnectProxy(t *testing.T) {
|
2018-03-08 18:54:05 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-08 18:54:05 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-08 18:54:05 +00:00
|
|
|
srv1 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "db-proxy",
|
|
|
|
Service: "db-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "db",
|
|
|
|
Upstreams: structs.TestUpstreams(t),
|
|
|
|
},
|
2014-01-21 01:06:44 +00:00
|
|
|
}
|
2018-03-08 18:54:05 +00:00
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
|
|
|
assert.Nil(err)
|
2018-04-20 13:24:24 +00:00
|
|
|
val := obj.(map[string]*api.AgentService)
|
2018-03-08 18:54:05 +00:00
|
|
|
assert.Len(val, 1)
|
2018-03-11 16:31:31 +00:00
|
|
|
actual := val["db-proxy"]
|
2018-04-20 13:24:24 +00:00
|
|
|
assert.Equal(api.ServiceKindConnectProxy, actual.Kind)
|
2018-09-12 16:07:47 +00:00
|
|
|
assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy)
|
2014-01-21 01:06:44 +00:00
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// Thie tests that a sidecar-registered service is returned as expected.
|
|
|
|
func TestAgent_Services_Sidecar(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
assert := assert.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-09-27 13:33:12 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "db-sidecar-proxy",
|
|
|
|
Service: "db-sidecar-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
// Set this internal state that we expect sidecar registrations to have.
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "db",
|
|
|
|
Upstreams: structs.TestUpstreams(t),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
|
|
|
require.NoError(err)
|
|
|
|
val := obj.(map[string]*api.AgentService)
|
|
|
|
assert.Len(val, 1)
|
|
|
|
actual := val["db-sidecar-proxy"]
|
|
|
|
require.NotNil(actual)
|
|
|
|
assert.Equal(api.ServiceKindConnectProxy, actual.Kind)
|
|
|
|
assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy)
|
|
|
|
|
|
|
|
// Sanity check that LocalRegisteredAsSidecar is not in the output (assuming
|
2018-11-02 17:00:39 +00:00
|
|
|
// JSON encoding). Right now this is not the case because the services
|
2018-09-27 13:33:12 +00:00
|
|
|
// endpoint happens to use the api struct which doesn't include that field,
|
|
|
|
// but this test serves as a regression test incase we change the endpoint to
|
|
|
|
// return the internal struct later and accidentally expose some "internal"
|
|
|
|
// state.
|
|
|
|
output, err := json.Marshal(obj)
|
|
|
|
require.NoError(err)
|
|
|
|
assert.NotContains(string(output), "LocallyRegisteredAsSidecar")
|
|
|
|
assert.NotContains(string(output), "locally_registered_as_sidecar")
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// Thie tests that a mesh gateway service is returned as expected.
|
|
|
|
func TestAgent_Services_MeshGateway(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mg-dc1-01",
|
|
|
|
Service: "mg-dc1",
|
|
|
|
Port: 8443,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
val := obj.(map[string]*api.AgentService)
|
|
|
|
require.Len(t, val, 1)
|
|
|
|
actual := val["mg-dc1-01"]
|
|
|
|
require.NotNil(t, actual)
|
|
|
|
require.Equal(t, api.ServiceKindMeshGateway, actual.Kind)
|
|
|
|
require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_Services_ACLFilter(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-07-14 05:33:47 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "")
|
2017-07-14 05:33:47 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
2018-04-20 13:24:24 +00:00
|
|
|
val := obj.(map[string]*api.AgentService)
|
2016-12-14 22:16:46 +00:00
|
|
|
if len(val) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
2018-04-20 13:24:24 +00:00
|
|
|
val := obj.(map[string]*api.AgentService)
|
2016-12-14 22:16:46 +00:00
|
|
|
if len(val) != 1 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
func TestAgent_Service(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig()+`
|
2018-09-27 14:00:51 +00:00
|
|
|
services {
|
|
|
|
name = "web"
|
|
|
|
port = 8181
|
2019-08-10 13:15:19 +00:00
|
|
|
tagged_addresses {
|
|
|
|
wan {
|
|
|
|
address = "198.18.0.1"
|
|
|
|
port = 1818
|
|
|
|
}
|
|
|
|
}
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
proxy := structs.TestConnectProxyConfig(t)
|
|
|
|
proxy.DestinationServiceID = "web1"
|
|
|
|
|
|
|
|
// Define a valid local sidecar proxy service
|
|
|
|
sidecarProxy := &structs.ServiceDefinition{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Name: "web-sidecar-proxy",
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TCP: "127.0.0.1:8000",
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
},
|
|
|
|
Port: 8000,
|
|
|
|
Proxy: &proxy,
|
2018-10-04 13:08:12 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Define an updated version. Be careful to copy it.
|
|
|
|
updatedProxy := *sidecarProxy
|
|
|
|
updatedProxy.Port = 9999
|
|
|
|
|
|
|
|
// Mangle the proxy config/upstreams into the expected for with defaults and
|
|
|
|
// API struct types.
|
|
|
|
expectProxy := proxy
|
|
|
|
expectProxy.Upstreams =
|
|
|
|
structs.TestAddDefaultsToUpstreams(t, sidecarProxy.Proxy.Upstreams)
|
|
|
|
|
|
|
|
expectedResponse := &api.AgentService{
|
|
|
|
Kind: api.ServiceKindConnectProxy,
|
|
|
|
ID: "web-sidecar-proxy",
|
|
|
|
Service: "web-sidecar-proxy",
|
|
|
|
Port: 8000,
|
|
|
|
Proxy: expectProxy.ToAPI(),
|
2019-09-26 02:55:52 +00:00
|
|
|
ContentHash: "4c7d5f8d3748be6d",
|
2018-10-04 13:08:12 +00:00
|
|
|
Weights: api.AgentWeights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-08-10 13:15:19 +00:00
|
|
|
Meta: map[string]string{},
|
|
|
|
Tags: []string{},
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy and modify
|
|
|
|
updatedResponse := *expectedResponse
|
|
|
|
updatedResponse.Port = 9999
|
2019-09-26 02:55:52 +00:00
|
|
|
updatedResponse.ContentHash = "713435ba1f5badcf"
|
2018-09-27 14:00:51 +00:00
|
|
|
|
2018-10-04 13:08:12 +00:00
|
|
|
// Simple response for non-proxy service registered in TestAgent config
|
2018-09-27 14:00:51 +00:00
|
|
|
expectWebResponse := &api.AgentService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 8181,
|
2019-08-10 13:15:19 +00:00
|
|
|
ContentHash: "6c247f8ffa5d1fb2",
|
2018-10-04 13:08:12 +00:00
|
|
|
Weights: api.AgentWeights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-08-10 13:15:19 +00:00
|
|
|
TaggedAddresses: map[string]api.ServiceAddress{
|
|
|
|
"wan": api.ServiceAddress{
|
|
|
|
Address: "198.18.0.1",
|
|
|
|
Port: 1818,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Meta: map[string]string{},
|
|
|
|
Tags: []string{},
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
tokenRules string
|
|
|
|
url string
|
|
|
|
updateFunc func()
|
|
|
|
wantWait time.Duration
|
|
|
|
wantCode int
|
|
|
|
wantErr string
|
|
|
|
wantResp *api.AgentService
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "simple fetch - proxy",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy",
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "simple fetch - non-proxy",
|
|
|
|
url: "/v1/agent/service/web",
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectWebResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocking fetch timeout, no change",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash + "&wait=100ms",
|
|
|
|
wantWait: 100 * time.Millisecond,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocking fetch old hash should return immediately",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?hash=123456789abcd&wait=10m",
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocking fetch returns change",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash,
|
|
|
|
updateFunc: func() {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Re-register with new proxy config, make sure we copy the struct so we
|
|
|
|
// don't alter it and affect later test cases.
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(updatedProxy))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
},
|
|
|
|
wantWait: 100 * time.Millisecond,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: &updatedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// This test exercises a case that caused a busy loop to eat CPU for the
|
|
|
|
// entire duration of the blocking query. If a service gets re-registered
|
|
|
|
// wth same proxy config then the old proxy config chan is closed causing
|
|
|
|
// blocked watchset.Watch to return false indicating a change. But since
|
|
|
|
// the hash is the same when the blocking fn is re-called we should just
|
|
|
|
// keep blocking on the next iteration. The bug hit was that the WatchSet
|
|
|
|
// ws was not being reset in the loop and so when you try to `Watch` it
|
|
|
|
// the second time it just returns immediately making the blocking loop
|
|
|
|
// into a busy-poll!
|
|
|
|
//
|
|
|
|
// This test though doesn't catch that because busy poll still has the
|
2019-03-06 17:13:28 +00:00
|
|
|
// correct external behavior. I don't want to instrument the loop to
|
2018-09-27 14:00:51 +00:00
|
|
|
// assert it's not executing too fast here as I can't think of a clean way
|
|
|
|
// and the issue is fixed now so this test doesn't actually catch the
|
2019-03-06 17:13:28 +00:00
|
|
|
// error, but does provide an easy way to verify the behavior by hand:
|
2018-09-27 14:00:51 +00:00
|
|
|
// 1. Make this test fail e.g. change wantErr to true
|
|
|
|
// 2. Add a log.Println or similar into the blocking loop/function
|
|
|
|
// 3. See whether it's called just once or many times in a tight loop.
|
|
|
|
name: "blocking fetch interrupted with no change (same hash)",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash,
|
|
|
|
updateFunc: func() {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Re-register with _same_ proxy config
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
},
|
|
|
|
wantWait: 200 * time.Millisecond,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// When we reload config, the agent pauses Anti-entropy, then clears all
|
|
|
|
// services (which causes their watch chans to be closed) before loading
|
|
|
|
// state from config/snapshot again). If we do that naively then we don't
|
|
|
|
// just get a spurios wakeup on the watch if the service didn't change,
|
|
|
|
// but we get it wakeup and then race with the reload and probably see no
|
|
|
|
// services and return a 404 error which is gross. This test excercises
|
|
|
|
// that - even though the registrations were from API not config, they are
|
|
|
|
// persisted and cleared/reloaded from snapshot which has same effect.
|
|
|
|
//
|
|
|
|
// The fix for this test is to allow the same mechanism that pauses
|
|
|
|
// Anti-entropy during reload to also pause the hash blocking loop so we
|
|
|
|
// don't resume until the state is reloaded and we get a chance to see if
|
|
|
|
// it actually changed or not.
|
|
|
|
name: "blocking fetch interrupted by reload shouldn't 404 - no change",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash,
|
|
|
|
updateFunc: func() {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Reload
|
|
|
|
require.NoError(t, a.ReloadConfig(a.Config))
|
|
|
|
},
|
|
|
|
// Should eventually timeout since there is no actual change
|
|
|
|
wantWait: 200 * time.Millisecond,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// As above but test actually altering the service with the config reload.
|
|
|
|
// This simulates the API registration being overridden by a different one
|
|
|
|
// on disk during reload.
|
|
|
|
name: "blocking fetch interrupted by reload shouldn't 404 - changes",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy?wait=10m&hash=" + expectedResponse.ContentHash,
|
|
|
|
updateFunc: func() {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Reload
|
|
|
|
newConfig := *a.Config
|
|
|
|
newConfig.Services = append(newConfig.Services, &updatedProxy)
|
|
|
|
require.NoError(t, a.ReloadConfig(&newConfig))
|
|
|
|
},
|
|
|
|
wantWait: 100 * time.Millisecond,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: &updatedResponse,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "err: non-existent proxy",
|
|
|
|
url: "/v1/agent/service/nope",
|
|
|
|
wantCode: 404,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "err: bad ACL for service",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy",
|
|
|
|
// Limited token doesn't grant read to the service
|
|
|
|
tokenRules: `
|
|
|
|
key "" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
// Note that because we return ErrPermissionDenied and handle writing
|
|
|
|
// status at a higher level helper this actually gets a 200 in this test
|
|
|
|
// case so just assert that it was an error.
|
|
|
|
wantErr: "Permission denied",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "good ACL for service",
|
|
|
|
url: "/v1/agent/service/web-sidecar-proxy",
|
|
|
|
// Limited token doesn't grant read to the service
|
|
|
|
tokenRules: `
|
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantCode: 200,
|
|
|
|
wantResp: expectedResponse,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Register the basic service to ensure it's in a known state to start.
|
|
|
|
{
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", tt.url, nil)
|
|
|
|
|
|
|
|
// Inject the root token for tests that don't care about ACL
|
|
|
|
var token = "root"
|
|
|
|
if tt.tokenRules != "" {
|
|
|
|
// Create new token and use that.
|
|
|
|
token = testCreateToken(t, a, tt.tokenRules)
|
|
|
|
}
|
|
|
|
req.Header.Set("X-Consul-Token", token)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
if tt.updateFunc != nil {
|
|
|
|
go tt.updateFunc()
|
|
|
|
}
|
|
|
|
start := time.Now()
|
|
|
|
obj, err := a.srv.AgentService(resp, req)
|
2019-07-20 13:37:19 +00:00
|
|
|
elapsed := time.Since(start)
|
2018-09-27 14:00:51 +00:00
|
|
|
|
|
|
|
if tt.wantErr != "" {
|
|
|
|
require.Error(err)
|
|
|
|
require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr))
|
|
|
|
} else {
|
|
|
|
require.NoError(err)
|
|
|
|
}
|
|
|
|
if tt.wantCode != 0 {
|
|
|
|
require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
}
|
|
|
|
if tt.wantWait != 0 {
|
|
|
|
assert.True(elapsed >= tt.wantWait, "should have waited at least %s, "+
|
|
|
|
"took %s", tt.wantWait, elapsed)
|
|
|
|
} else {
|
|
|
|
assert.True(elapsed < 10*time.Millisecond, "should not have waited, "+
|
|
|
|
"took %s", elapsed)
|
|
|
|
}
|
|
|
|
|
|
|
|
if tt.wantResp != nil {
|
|
|
|
assert.Equal(tt.wantResp, obj)
|
|
|
|
assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash"))
|
|
|
|
} else {
|
|
|
|
// Janky but Equal doesn't help here because nil !=
|
|
|
|
// *api.AgentService((*api.AgentService)(nil))
|
|
|
|
assert.Nil(obj)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Checks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-21 01:06:44 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-21 01:06:44 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 01:06:44 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 01:06:44 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2014-01-21 01:06:44 +00:00
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/checks", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentChecks(nil, req)
|
2014-01-21 01:06:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
2016-06-07 20:24:51 +00:00
|
|
|
val := obj.(map[types.CheckID]*structs.HealthCheck)
|
2014-01-21 01:06:44 +00:00
|
|
|
if len(val) != 1 {
|
|
|
|
t.Fatalf("bad checks: %v", obj)
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if val["mysql"].Status != api.HealthPassing {
|
2014-01-21 01:06:44 +00:00
|
|
|
t.Fatalf("bad check: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestAgent_ChecksWithFilter(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
a.State.AddCheck(chk1, "")
|
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
a.State.AddCheck(chk2, "")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape("Name == `redis`"), nil)
|
|
|
|
obj, err := a.srv.AgentChecks(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
val := obj.(map[types.CheckID]*structs.HealthCheck)
|
|
|
|
require.Len(t, val, 1)
|
|
|
|
_, ok := val["redis"]
|
|
|
|
require.True(t, ok)
|
|
|
|
}
|
|
|
|
|
2019-01-07 14:39:23 +00:00
|
|
|
func TestAgent_HealthServiceByID(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2019-01-07 14:39:23 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "mysql2",
|
|
|
|
Service: "mysql2",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "mysql3",
|
|
|
|
Service: "mysql3",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err := a.State.AddCheck(chk1, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk2, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk3 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql2",
|
|
|
|
Name: "mysql2",
|
|
|
|
ServiceID: "mysql2",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk3, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk4 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql2",
|
|
|
|
Name: "mysql2",
|
|
|
|
ServiceID: "mysql2",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk4, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk5 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql3",
|
|
|
|
Name: "mysql3",
|
|
|
|
ServiceID: "mysql3",
|
|
|
|
Status: api.HealthMaint,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk5, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk6 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql3",
|
|
|
|
Name: "mysql3",
|
|
|
|
ServiceID: "mysql3",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk6, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
eval := func(t *testing.T, url string, expectedCode int, expected string) {
|
|
|
|
t.Helper()
|
|
|
|
t.Run("format=text", func(t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
req, _ := http.NewRequest("GET", url+"?format=text", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
data, err := a.srv.AgentHealthServiceByID(resp, req)
|
|
|
|
codeWithPayload, ok := err.(CodeWithPayloadError)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
|
|
|
|
t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload)
|
|
|
|
}
|
|
|
|
body, ok := data.(string)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Cannot get result as string in := %#v", data)
|
|
|
|
}
|
|
|
|
if got, want := body, expected; got != want {
|
|
|
|
t.Fatalf("got body %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.Reason, expected; got != want {
|
|
|
|
t.Fatalf("got body %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("format=json", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", url, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
dataRaw, err := a.srv.AgentHealthServiceByID(resp, req)
|
|
|
|
codeWithPayload, ok := err.(CodeWithPayloadError)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
|
|
|
|
t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload)
|
|
|
|
}
|
|
|
|
data, ok := dataRaw.(*api.AgentServiceChecksInfo)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Cannot connvert result to JSON: %#v", dataRaw)
|
|
|
|
}
|
|
|
|
if codeWithPayload.StatusCode != http.StatusNotFound {
|
|
|
|
if data != nil && data.AggregatedStatus != expected {
|
|
|
|
t.Fatalf("got body %v want %v", data, expected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("passing checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql", http.StatusOK, "passing")
|
|
|
|
})
|
|
|
|
t.Run("warning checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql2", http.StatusTooManyRequests, "warning")
|
|
|
|
})
|
|
|
|
t.Run("critical checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql3", http.StatusServiceUnavailable, "critical")
|
|
|
|
})
|
|
|
|
t.Run("unknown serviceid", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, "ServiceId mysql1 not found")
|
|
|
|
})
|
|
|
|
|
|
|
|
nodeCheck := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "diskCheck",
|
|
|
|
Name: "diskCheck",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(nodeCheck, "")
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
t.Run("critical check on node", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "critical")
|
|
|
|
})
|
|
|
|
|
|
|
|
err = a.State.RemoveCheck(nodeCheck.CheckID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
nodeCheck = &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "_node_maintenance",
|
|
|
|
Name: "_node_maintenance",
|
|
|
|
Status: api.HealthMaint,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(nodeCheck, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
t.Run("maintenance check on node", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "maintenance")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_HealthServiceByName(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2019-01-07 14:39:23 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "mysql1",
|
|
|
|
Service: "mysql-pool-r",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "mysql2",
|
|
|
|
Service: "mysql-pool-r",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "mysql3",
|
|
|
|
Service: "mysql-pool-rw",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "mysql4",
|
|
|
|
Service: "mysql-pool-rw",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "httpd1",
|
|
|
|
Service: "httpd",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
service = &structs.NodeService{
|
|
|
|
ID: "httpd2",
|
|
|
|
Service: "httpd",
|
|
|
|
}
|
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql1",
|
|
|
|
Name: "mysql1",
|
|
|
|
ServiceID: "mysql1",
|
|
|
|
ServiceName: "mysql-pool-r",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err := a.State.AddCheck(chk1, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql1",
|
|
|
|
Name: "mysql1",
|
|
|
|
ServiceID: "mysql1",
|
|
|
|
ServiceName: "mysql-pool-r",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk2, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk3 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql2",
|
|
|
|
Name: "mysql2",
|
|
|
|
ServiceID: "mysql2",
|
|
|
|
ServiceName: "mysql-pool-r",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk3, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk4 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql2",
|
|
|
|
Name: "mysql2",
|
|
|
|
ServiceID: "mysql2",
|
|
|
|
ServiceName: "mysql-pool-r",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk4, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk5 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql3",
|
|
|
|
Name: "mysql3",
|
|
|
|
ServiceID: "mysql3",
|
|
|
|
ServiceName: "mysql-pool-rw",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk5, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk6 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql4",
|
|
|
|
Name: "mysql4",
|
|
|
|
ServiceID: "mysql4",
|
|
|
|
ServiceName: "mysql-pool-rw",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk6, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk7 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "httpd1",
|
|
|
|
Name: "httpd1",
|
|
|
|
ServiceID: "httpd1",
|
|
|
|
ServiceName: "httpd",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk7, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk8 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "httpd2",
|
|
|
|
Name: "httpd2",
|
|
|
|
ServiceID: "httpd2",
|
|
|
|
ServiceName: "httpd",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(chk8, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
eval := func(t *testing.T, url string, expectedCode int, expected string) {
|
|
|
|
t.Helper()
|
|
|
|
t.Run("format=text", func(t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
req, _ := http.NewRequest("GET", url+"?format=text", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
data, err := a.srv.AgentHealthServiceByName(resp, req)
|
|
|
|
codeWithPayload, ok := err.(CodeWithPayloadError)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
|
|
|
|
t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String())
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.Reason, expected; got != want {
|
|
|
|
t.Fatalf("got reason %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := data, expected; got != want {
|
|
|
|
t.Fatalf("got body %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("format=json", func(t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
req, _ := http.NewRequest("GET", url, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
dataRaw, err := a.srv.AgentHealthServiceByName(resp, req)
|
|
|
|
codeWithPayload, ok := err.(CodeWithPayloadError)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
data, ok := dataRaw.([]api.AgentServiceChecksInfo)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Cannot connvert result to JSON")
|
|
|
|
}
|
|
|
|
if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
|
|
|
|
t.Fatalf("returned bad code: %d. Body: %#v", resp.Code, data)
|
|
|
|
}
|
|
|
|
if resp.Code != http.StatusNotFound {
|
|
|
|
if codeWithPayload.Reason != expected {
|
|
|
|
t.Fatalf("got wrong status %#v want %#v", codeWithPayload, expected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("passing checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/httpd", http.StatusOK, "passing")
|
|
|
|
})
|
|
|
|
t.Run("warning checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/mysql-pool-rw", http.StatusTooManyRequests, "warning")
|
|
|
|
})
|
|
|
|
t.Run("critical checks", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical")
|
|
|
|
})
|
|
|
|
t.Run("unknown serviceName", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/test", http.StatusNotFound, "ServiceName test Not Found")
|
|
|
|
})
|
|
|
|
nodeCheck := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "diskCheck",
|
|
|
|
Name: "diskCheck",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(nodeCheck, "")
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
t.Run("critical check on node", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical")
|
|
|
|
})
|
|
|
|
|
|
|
|
err = a.State.RemoveCheck(nodeCheck.CheckID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
nodeCheck = &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "_node_maintenance",
|
|
|
|
Name: "_node_maintenance",
|
|
|
|
Status: api.HealthMaint,
|
|
|
|
}
|
|
|
|
err = a.State.AddCheck(nodeCheck, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
t.Run("maintenance check on node", func(t *testing.T) {
|
|
|
|
eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "maintenance")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_Checks_ACLFilter(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-12-14 22:16:46 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/checks", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentChecks(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.(map[types.CheckID]*structs.HealthCheck)
|
|
|
|
if len(val) != 0 {
|
|
|
|
t.Fatalf("bad checks: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/checks?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentChecks(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.(map[types.CheckID]*structs.HealthCheck)
|
|
|
|
if len(val) != 1 {
|
|
|
|
t.Fatalf("bad checks: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Self(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_meta {
|
|
|
|
somekey = "somevalue"
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-25 23:59:48 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/self", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentSelf(nil, req)
|
2014-05-25 23:59:48 +00:00
|
|
|
if err != nil {
|
2015-07-30 19:02:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2014-05-25 23:59:48 +00:00
|
|
|
}
|
|
|
|
|
2017-04-21 00:46:29 +00:00
|
|
|
val := obj.(Self)
|
2017-09-25 18:40:42 +00:00
|
|
|
if int(val.Member.Port) != a.Config.SerfPortLAN {
|
2014-05-27 22:09:28 +00:00
|
|
|
t.Fatalf("incorrect port: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-10-04 17:43:17 +00:00
|
|
|
if val.DebugConfig["SerfPortLAN"].(int) != a.Config.SerfPortLAN {
|
2014-05-25 23:59:48 +00:00
|
|
|
t.Fatalf("incorrect port: %v", obj)
|
|
|
|
}
|
2015-07-30 19:02:37 +00:00
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
cs, err := a.GetLANCoordinate()
|
2015-07-30 19:02:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if c := cs[a.config.SegmentName]; !reflect.DeepEqual(c, val.Coord) {
|
2015-07-30 19:02:37 +00:00
|
|
|
t.Fatalf("coordinates are not equal: %v != %v", c, val.Coord)
|
|
|
|
}
|
2017-08-14 14:36:07 +00:00
|
|
|
delete(val.Meta, structs.MetaSegmentKey) // Added later, not in config.
|
2017-09-25 18:40:42 +00:00
|
|
|
if !reflect.DeepEqual(a.config.NodeMeta, val.Meta) {
|
|
|
|
t.Fatalf("meta fields are not equal: %v != %v", a.config.NodeMeta, val.Meta)
|
2016-12-13 06:09:35 +00:00
|
|
|
}
|
2014-05-25 23:59:48 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Self_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/self", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentSelf(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("agent master token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/self?token=towel", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentSelf(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
ro := makeReadOnlyAgentACL(t, a.srv)
|
2017-05-09 17:46:11 +00:00
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/self?token=%s", ro), nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentSelf(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2017-08-08 20:05:38 +00:00
|
|
|
func TestAgent_Metrics_ACLDeny(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-08-08 20:05:38 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-08-08 20:05:38 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/metrics", nil)
|
2017-11-07 05:50:04 +00:00
|
|
|
if _, err := a.srv.AgentMetrics(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-08-08 20:05:38 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("agent master token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/metrics?token=towel", nil)
|
2017-11-07 05:50:04 +00:00
|
|
|
if _, err := a.srv.AgentMetrics(nil, req); err != nil {
|
2017-08-08 20:05:38 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
|
|
|
ro := makeReadOnlyAgentACL(t, a.srv)
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/metrics?token=%s", ro), nil)
|
2017-11-07 05:50:04 +00:00
|
|
|
if _, err := a.srv.AgentMetrics(nil, req); err != nil {
|
2017-08-08 20:05:38 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Reload(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2018-08-06 23:46:09 +00:00
|
|
|
dc1 := "dc1"
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_enforce_version_8 = false
|
|
|
|
services = [
|
|
|
|
{
|
|
|
|
name = "redis"
|
|
|
|
}
|
|
|
|
]
|
|
|
|
watches = [
|
|
|
|
{
|
2018-08-06 23:46:09 +00:00
|
|
|
datacenter = "`+dc1+`"
|
2017-09-25 18:40:42 +00:00
|
|
|
type = "key"
|
|
|
|
key = "test"
|
|
|
|
handler = "true"
|
|
|
|
}
|
|
|
|
]
|
2018-04-08 10:57:01 +00:00
|
|
|
limits = {
|
|
|
|
rpc_rate=1
|
|
|
|
rpc_max_burst=100
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
`)
|
2017-06-03 19:22:47 +00:00
|
|
|
defer a.Shutdown()
|
2016-11-30 18:29:42 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, dc1)
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Service("redis") == nil {
|
|
|
|
t.Fatal("missing redis service")
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 19:01:34 +00:00
|
|
|
cfg2 := TestConfig(testutil.TestLogger(t), config.Source{
|
2017-09-25 18:40:42 +00:00
|
|
|
Name: "reload",
|
|
|
|
Format: "hcl",
|
|
|
|
Data: `
|
|
|
|
data_dir = "` + a.Config.DataDir + `"
|
|
|
|
node_id = "` + string(a.Config.NodeID) + `"
|
|
|
|
node_name = "` + a.Config.NodeName + `"
|
|
|
|
|
|
|
|
acl_enforce_version_8 = false
|
|
|
|
services = [
|
|
|
|
{
|
|
|
|
name = "redis-reloaded"
|
|
|
|
}
|
|
|
|
]
|
2018-04-08 10:57:01 +00:00
|
|
|
limits = {
|
|
|
|
rpc_rate=2
|
|
|
|
rpc_max_burst=200
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
`,
|
|
|
|
})
|
2016-11-30 18:29:42 +00:00
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
if err := a.ReloadConfig(cfg2); err != nil {
|
2017-06-03 19:22:47 +00:00
|
|
|
t.Fatalf("got error %v want nil", err)
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Service("redis-reloaded") == nil {
|
|
|
|
t.Fatal("missing redis-reloaded service")
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
2017-06-24 19:52:41 +00:00
|
|
|
|
2018-04-08 10:57:01 +00:00
|
|
|
if a.config.RPCRateLimit != 2 {
|
|
|
|
t.Fatalf("RPC rate not set correctly. Got %v. Want 2", a.config.RPCRateLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if a.config.RPCMaxBurst != 200 {
|
|
|
|
t.Fatalf("RPC max burst not set correctly. Got %v. Want 200", a.config.RPCMaxBurst)
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
for _, wp := range a.watchPlans {
|
2017-06-24 19:52:41 +00:00
|
|
|
if !wp.IsStopped() {
|
|
|
|
t.Fatalf("Reloading configs should stop watch plans of the previous configuration")
|
|
|
|
}
|
|
|
|
}
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Reload_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/reload", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
ro := makeReadOnlyAgentACL(t, a.srv)
|
2017-05-09 17:46:11 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/reload?token=%s", ro), nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
|
|
|
// This proves we call the ACL function, and we've got the other reload
|
|
|
|
// test to prove we do the reload, which should be sufficient.
|
|
|
|
// The reload logic is a little complex to set up so isn't worth
|
|
|
|
// repeating again here.
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_Members(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-06 21:21:48 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/members", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentMembers(nil, req)
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.([]serf.Member)
|
|
|
|
if len(val) == 0 {
|
|
|
|
t.Fatalf("bad members: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
if int(val[0].Port) != a.Config.SerfPortLAN {
|
2014-01-06 21:21:48 +00:00
|
|
|
t.Fatalf("not lan: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Members_WAN(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-06 21:21:48 +00:00
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/members?wan=true", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentMembers(nil, req)
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.([]serf.Member)
|
|
|
|
if len(val) == 0 {
|
|
|
|
t.Fatalf("bad members: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
if int(val[0].Port) != a.Config.SerfPortWAN {
|
2014-01-06 21:21:48 +00:00
|
|
|
t.Fatalf("not wan: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_Members_ACLFilter(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/members", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentMembers(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.([]serf.Member)
|
|
|
|
if len(val) != 0 {
|
|
|
|
t.Fatalf("bad members: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/members?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentMembers(nil, req)
|
2016-12-14 22:16:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
val := obj.([]serf.Member)
|
|
|
|
if len(val) != 1 {
|
|
|
|
t.Fatalf("bad members: %v", obj)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Join(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), "")
|
2014-01-06 21:21:48 +00:00
|
|
|
defer a2.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc1")
|
2014-01-06 21:21:48 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a1.srv.AgentJoin(nil, req)
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("Err: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a1.LANMembers()) != 2 {
|
2014-01-06 21:21:48 +00:00
|
|
|
t.Fatalf("should have 2 members")
|
|
|
|
}
|
2016-10-25 20:46:54 +00:00
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(a2.LANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-01-06 21:21:48 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Join_WAN(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), "")
|
2014-01-06 21:21:48 +00:00
|
|
|
defer a2.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc1")
|
2014-01-06 21:21:48 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortWAN)
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?wan=true", addr), nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a1.srv.AgentJoin(nil, req)
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("Err: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a1.WANMembers()) != 2 {
|
2016-10-25 20:46:54 +00:00
|
|
|
t.Fatalf("should have 2 members")
|
|
|
|
}
|
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(a2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-01-06 21:21:48 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Join_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), "")
|
2016-12-14 17:33:57 +00:00
|
|
|
defer a2.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc1")
|
2017-05-21 07:11:09 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("agent master token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
_, err := a1.srv.AgentJoin(nil, req)
|
2016-12-14 17:33:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
ro := makeReadOnlyAgentACL(t, a1.srv)
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2017-06-21 04:43:55 +00:00
|
|
|
type mockNotifier struct{ s string }
|
|
|
|
|
|
|
|
func (n *mockNotifier) Notify(state string) error {
|
|
|
|
n.s = state
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_JoinLANNotify(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), "")
|
2017-06-21 04:43:55 +00:00
|
|
|
defer a1.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
2017-06-21 04:43:55 +00:00
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
`)
|
2017-06-21 04:43:55 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
notif := &mockNotifier{}
|
|
|
|
a1.joinLANNotifier = notif
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
2017-06-21 04:43:55 +00:00
|
|
|
_, err := a1.JoinLAN([]string{addr})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if got, want := notif.s, "READY=1"; got != want {
|
|
|
|
t.Fatalf("got joinLAN notification %q want %q", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Leave(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
2016-11-30 18:29:42 +00:00
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a2.Shutdown()
|
2016-11-30 18:29:42 +00:00
|
|
|
|
|
|
|
// Join first
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
2017-05-21 07:11:09 +00:00
|
|
|
_, err := a1.JoinLAN([]string{addr})
|
2016-11-30 18:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Graceful leave now
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a2.srv.AgentLeave(nil, req)
|
2016-11-30 18:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("Err: %v", obj)
|
|
|
|
}
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
m := a1.LANMembers()
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := m[1].Status, serf.StatusLeft; got != want {
|
|
|
|
r.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2016-11-30 18:29:42 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Leave_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-10 07:08:39 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
ro := makeReadOnlyAgentACL(t, a.srv)
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/leave?token=%s", ro), nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2017-05-10 07:08:39 +00:00
|
|
|
|
|
|
|
// this sub-test will change the state so that there is no leader.
|
|
|
|
// it must therefore be the last one in this list.
|
|
|
|
t.Run("agent master token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/leave?token=towel", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentLeave(nil, req); err != nil {
|
2017-05-10 07:08:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_ForceLeave(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a1 := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name(), "")
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc1")
|
2014-01-06 21:21:48 +00:00
|
|
|
|
|
|
|
// Join first
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
2017-05-21 07:11:09 +00:00
|
|
|
_, err := a1.JoinLAN([]string{addr})
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
// this test probably needs work
|
2014-01-06 21:21:48 +00:00
|
|
|
a2.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
// Wait for agent being marked as failed, so we wait for full shutdown of Agent
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
m := a1.LANMembers()
|
|
|
|
if got, want := m[1].Status, serf.StatusFailed; got != want {
|
|
|
|
r.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-01-06 21:21:48 +00:00
|
|
|
|
|
|
|
// Force leave now
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s", a2.Config.NodeName), nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a1.srv.AgentForceLeave(nil, req)
|
2014-01-06 21:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("Err: %v", obj)
|
|
|
|
}
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
m := a1.LANMembers()
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := m[1].Status, serf.StatusLeft; got != want {
|
|
|
|
r.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2014-01-06 21:21:48 +00:00
|
|
|
}
|
2014-01-30 23:51:15 +00:00
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_ForceLeave_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2019-12-02 19:06:15 +00:00
|
|
|
uri := fmt.Sprintf("/v1/agent/force-leave/%s", a.Config.NodeName)
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2019-12-02 19:06:15 +00:00
|
|
|
req, _ := http.NewRequest("PUT", uri, nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("agent master token", func(t *testing.T) {
|
2019-12-02 19:06:15 +00:00
|
|
|
req, _ := http.NewRequest("PUT", uri+"?token=towel", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentForceLeave(nil, req); err != nil {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("read-only token", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
ro := makeReadOnlyAgentACL(t, a.srv)
|
2019-12-02 19:06:15 +00:00
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", ro), nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 17:33:57 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 21:10:02 +00:00
|
|
|
func TestAgent_ForceLeavePrune(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a1 := NewTestAgent(t, t.Name()+"-a1", "")
|
|
|
|
defer a1.Shutdown()
|
|
|
|
a2 := NewTestAgent(t, t.Name()+"-a2", "")
|
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc1")
|
|
|
|
|
|
|
|
// Join first
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
|
|
|
|
_, err := a1.JoinLAN([]string{addr})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// this test probably needs work
|
|
|
|
a2.Shutdown()
|
|
|
|
// Wait for agent being marked as failed, so we wait for full shutdown of Agent
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
m := a1.LANMembers()
|
|
|
|
for _, member := range m {
|
|
|
|
if member.Name == a2.Config.NodeName {
|
|
|
|
if member.Status != serf.StatusFailed {
|
|
|
|
r.Fatalf("got status %q want %q", member.Status, serf.StatusFailed)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Force leave now
|
|
|
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s?prune=true", a2.Config.NodeName), nil)
|
|
|
|
obj, err := a1.srv.AgentForceLeave(nil, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("Err: %v", obj)
|
|
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
m := len(a1.LANMembers())
|
|
|
|
if m != 1 {
|
|
|
|
r.Fatalf("want one member, got %v", m)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_RegisterCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:51:15 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.CheckDefinition{
|
2014-01-30 23:51:15 +00:00
|
|
|
Name: "test",
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: 15 * time.Second,
|
2014-01-30 23:51:15 +00:00
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentRegisterCheck(nil, req)
|
2014-01-30 23:51:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2016-06-07 20:24:51 +00:00
|
|
|
checkID := types.CheckID("test")
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; !ok {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("missing test check")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkTTLs[checkID]; !ok {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("missing test check ttl")
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
|
|
|
// Ensure the token was configured
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(checkID); token == "" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("missing token")
|
|
|
|
}
|
2015-05-11 23:48:10 +00:00
|
|
|
|
2015-04-12 00:53:48 +00:00
|
|
|
// By default, checks start in critical state.
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()[checkID]
|
2017-04-19 23:00:11 +00:00
|
|
|
if state.Status != api.HealthCritical {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 18:28:39 +00:00
|
|
|
// This verifies all the forms of the new args-style check that we need to
|
|
|
|
// support as a result of https://github.com/hashicorp/consul/issues/3587.
|
|
|
|
func TestAgent_RegisterCheck_Scripts(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-10-18 18:28:39 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-10-18 18:28:39 +00:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
check map[string]interface{}
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"== Consul 1.0.0",
|
|
|
|
map[string]interface{}{
|
|
|
|
"Name": "test",
|
|
|
|
"Interval": "2s",
|
|
|
|
"ScriptArgs": []string{"true"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"> Consul 1.0.0 (fixup)",
|
|
|
|
map[string]interface{}{
|
|
|
|
"Name": "test",
|
|
|
|
"Interval": "2s",
|
|
|
|
"script_args": []string{"true"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"> Consul 1.0.0",
|
|
|
|
map[string]interface{}{
|
|
|
|
"Name": "test",
|
|
|
|
"Interval": "2s",
|
|
|
|
"Args": []string{"true"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name+" as node check", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(tt.check))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Code != http.StatusOK {
|
|
|
|
t.Fatalf("bad: %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run(tt.name+" as top-level service check", func(t *testing.T) {
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": "a",
|
|
|
|
"Port": 1234,
|
|
|
|
"Check": tt.check,
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
if _, err := a.srv.AgentRegisterService(resp, req); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Code != http.StatusOK {
|
|
|
|
t.Fatalf("bad: %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run(tt.name+" as slice-based service check", func(t *testing.T) {
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": "a",
|
|
|
|
"Port": 1234,
|
|
|
|
"Checks": []map[string]interface{}{tt.check},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
if _, err := a.srv.AgentRegisterService(resp, req); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Code != http.StatusOK {
|
|
|
|
t.Fatalf("bad: %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-10-11 12:22:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.CheckDefinition{
|
|
|
|
Name: "test",
|
|
|
|
ScriptArgs: []string{"true"},
|
|
|
|
Interval: time.Second,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
|
|
|
|
res := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterCheck(res, req)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error but got nil")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("expected script disabled error, got: %s", err)
|
|
|
|
}
|
|
|
|
checkID := types.CheckID("test")
|
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
|
|
|
t.Fatalf("check registered with exec disable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-11 12:22:11 +00:00
|
|
|
enable_local_script_checks = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.CheckDefinition{
|
|
|
|
Name: "test",
|
|
|
|
ScriptArgs: []string{"true"},
|
|
|
|
Interval: time.Second,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
|
|
|
|
res := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterCheck(res, req)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error but got nil")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("expected script disabled error, got: %s", err)
|
|
|
|
}
|
|
|
|
checkID := types.CheckID("test")
|
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
|
|
|
t.Fatalf("check registered with exec disable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_RegisterCheck_Passing(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-04-12 00:53:48 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.CheckDefinition{
|
2017-05-15 19:49:13 +00:00
|
|
|
Name: "test",
|
|
|
|
TTL: 15 * time.Second,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args))
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentRegisterCheck(nil, req)
|
2015-04-12 00:53:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2016-06-07 20:24:51 +00:00
|
|
|
checkID := types.CheckID("test")
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; !ok {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("missing test check")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkTTLs[checkID]; !ok {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("missing test check ttl")
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()[checkID]
|
2017-04-19 23:00:11 +00:00
|
|
|
if state.Status != api.HealthPassing {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_RegisterCheck_BadStatus(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-04-12 00:53:48 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.CheckDefinition{
|
2017-05-15 19:49:13 +00:00
|
|
|
Name: "test",
|
|
|
|
TTL: 15 * time.Second,
|
2015-04-12 00:53:48 +00:00
|
|
|
Status: "fluffy",
|
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args))
|
2015-04-12 00:53:48 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("accepted bad status")
|
|
|
|
}
|
2014-01-30 23:51:15 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-04-30 23:00:57 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfigNew())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2019-04-30 23:00:57 +00:00
|
|
|
nodeCheck := &structs.CheckDefinition{
|
2016-12-14 22:16:46 +00:00
|
|
|
Name: "test",
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: 15 * time.Second,
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
2017-05-09 16:58:12 +00:00
|
|
|
|
2019-04-30 23:00:57 +00:00
|
|
|
svc := &structs.ServiceDefinition{
|
|
|
|
ID: "foo:1234",
|
|
|
|
Name: "foo",
|
|
|
|
Port: 1234,
|
|
|
|
}
|
|
|
|
|
|
|
|
svcCheck := &structs.CheckDefinition{
|
|
|
|
Name: "test2",
|
|
|
|
ServiceID: "foo:1234",
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure the service is ready for registering a check for it.
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(svc))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create a policy that has write on service foo
|
|
|
|
policyReq := &structs.ACLPolicy{
|
|
|
|
Name: "write-foo",
|
|
|
|
Rules: `service "foo" { policy = "write"}`,
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq))
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
_, err = a.srv.ACLPolicyCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create a policy that has write on the node name of the agent
|
|
|
|
policyReq = &structs.ACLPolicy{
|
|
|
|
Name: "write-node",
|
|
|
|
Rules: fmt.Sprintf(`node "%s" { policy = "write" }`, a.config.NodeName),
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq))
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
_, err = a.srv.ACLPolicyCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create a token using the write-foo policy
|
|
|
|
tokenReq := &structs.ACLToken{
|
|
|
|
Description: "write-foo",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
|
|
|
structs.ACLTokenPolicyLink{
|
|
|
|
Name: "write-foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq))
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
tokInf, err := a.srv.ACLTokenCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
svcToken, ok := tokInf.(*structs.ACLToken)
|
|
|
|
require.True(t, ok)
|
|
|
|
require.NotNil(t, svcToken)
|
|
|
|
|
|
|
|
// create a token using the write-node policy
|
|
|
|
tokenReq = &structs.ACLToken{
|
|
|
|
Description: "write-node",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
|
|
|
structs.ACLTokenPolicyLink{
|
|
|
|
Name: "write-node",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq))
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
tokInf, err = a.srv.ACLTokenCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
nodeToken, ok := tokInf.(*structs.ACLToken)
|
|
|
|
require.True(t, ok)
|
|
|
|
require.NotNil(t, nodeToken)
|
|
|
|
|
|
|
|
t.Run("no token - node check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(nodeCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.True(r, acl.IsErrPermissionDenied(err))
|
|
|
|
})
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2019-04-30 23:00:57 +00:00
|
|
|
t.Run("svc token - node check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(nodeCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.True(r, acl.IsErrPermissionDenied(err))
|
|
|
|
})
|
2019-04-30 23:00:57 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("node token - node check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(nodeCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.NoError(r, err)
|
|
|
|
})
|
2019-04-30 23:00:57 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("no token - svc check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(svcCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.True(r, acl.IsErrPermissionDenied(err))
|
|
|
|
})
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2019-04-30 23:00:57 +00:00
|
|
|
|
|
|
|
t.Run("node token - svc check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(svcCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.True(r, acl.IsErrPermissionDenied(err))
|
|
|
|
})
|
2019-04-30 23:00:57 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("svc token - svc check", func(t *testing.T) {
|
2019-09-26 02:55:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(svcCheck))
|
|
|
|
_, err := a.srv.AgentRegisterCheck(nil, req)
|
|
|
|
require.NoError(r, err)
|
|
|
|
})
|
2019-04-30 23:00:57 +00:00
|
|
|
})
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_DeregisterCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:51:15 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentDeregisterCheck(nil, req)
|
2014-01-30 23:51:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["test"]; ok {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("have test check")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_DeregisterCheckACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentDeregisterCheck(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentDeregisterCheck(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_PassCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:51:15 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckPass(nil, req)
|
2014-01-30 23:51:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()["test"]
|
2017-04-19 23:00:11 +00:00
|
|
|
if state.Status != api.HealthPassing {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_PassCheck_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentCheckPass(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentCheckPass(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_WarnCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:51:15 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckWarn(nil, req)
|
2014-01-30 23:51:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()["test"]
|
2017-04-19 23:00:11 +00:00
|
|
|
if state.Status != api.HealthWarning {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_WarnCheck_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentCheckWarn(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentCheckWarn(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_FailCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-01-30 23:51:15 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckFail(nil, req)
|
2014-01-30 23:51:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()["test"]
|
2017-04-19 23:00:11 +00:00
|
|
|
if state.Status != api.HealthCritical {
|
2014-01-30 23:51:15 +00:00
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
}
|
2014-01-30 23:56:03 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_FailCheck_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentCheckFail(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentCheckFail(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_UpdateCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-06-26 15:43:25 +00:00
|
|
|
maxChecksSize := 256
|
|
|
|
a := NewTestAgent(t, t.Name(), fmt.Sprintf("check_output_max_size=%d", maxChecksSize))
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2016-03-03 01:08:06 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2016-03-03 01:08:06 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []checkUpdate{
|
2017-04-19 23:00:11 +00:00
|
|
|
checkUpdate{api.HealthPassing, "hello-passing"},
|
|
|
|
checkUpdate{api.HealthCritical, "hello-critical"},
|
|
|
|
checkUpdate{api.HealthWarning, "hello-warning"},
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run(c.Status, func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(c))
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckUpdate(resp, req)
|
2017-05-09 17:46:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
2016-03-03 01:08:06 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()["test"]
|
2017-05-09 17:46:11 +00:00
|
|
|
if state.Status != c.Status || state.Output != c.Output {
|
|
|
|
t.Fatalf("bad: %v", state)
|
|
|
|
}
|
|
|
|
})
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("log output limit", func(t *testing.T) {
|
2017-05-09 16:58:12 +00:00
|
|
|
args := checkUpdate{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2019-06-26 15:43:25 +00:00
|
|
|
Output: strings.Repeat("-= bad -=", 5*maxChecksSize),
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
2017-05-09 16:58:12 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
|
2016-03-03 01:08:06 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckUpdate(resp, req)
|
2016-03-03 01:08:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since we append some notes about truncating, we just do a
|
|
|
|
// rough check that the output buffer was cut down so this test
|
|
|
|
// isn't super brittle.
|
2017-08-28 12:17:13 +00:00
|
|
|
state := a.State.Checks()["test"]
|
2019-06-26 15:43:25 +00:00
|
|
|
if state.Status != api.HealthPassing || len(state.Output) > 2*maxChecksSize {
|
|
|
|
t.Fatalf("bad: %v, (len:=%d)", state, len(state.Output))
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-03-03 01:08:06 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("bogus status", func(t *testing.T) {
|
2017-05-09 16:58:12 +00:00
|
|
|
args := checkUpdate{Status: "itscomplicated"}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
|
2016-03-03 01:08:06 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentCheckUpdate(resp, req)
|
2016-03-03 01:08:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("expected 400, got %d", resp.Code)
|
|
|
|
}
|
2017-05-09 17:46:11 +00:00
|
|
|
})
|
2016-03-03 01:08:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_UpdateCheck_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{TTL: 15 * time.Second}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
args := checkUpdate{api.HealthPassing, "hello-passing"}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentCheckUpdate(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
|
|
|
args := checkUpdate{api.HealthPassing, "hello-passing"}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test?token=root", jsonReader(args))
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentCheckUpdate(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_RegisterService(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:56:03 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.ServiceDefinition{
|
2014-01-30 23:56:03 +00:00
|
|
|
Name: "test",
|
2018-04-21 15:34:29 +00:00
|
|
|
Meta: map[string]string{"hello": "world"},
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-30 23:56:03 +00:00
|
|
|
Port: 8000,
|
2017-06-15 16:46:06 +00:00
|
|
|
Check: structs.CheckType{
|
2014-01-30 23:56:03 +00:00
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2015-01-14 03:08:30 +00:00
|
|
|
TTL: 20 * time.Second,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
&structs.CheckType{
|
2015-01-14 03:08:30 +00:00
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
2014-01-30 23:56:03 +00:00
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
2014-01-30 23:56:03 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentRegisterService(nil, req)
|
2014-01-30 23:56:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:56:00 +00:00
|
|
|
// Ensure the service
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["test"]; !ok {
|
2014-01-30 23:56:03 +00:00
|
|
|
t.Fatalf("missing test service")
|
|
|
|
}
|
2018-04-21 15:34:29 +00:00
|
|
|
if val := a.State.Service("test").Meta["hello"]; val != "world" {
|
|
|
|
t.Fatalf("Missing meta: %v", a.State.Service("test").Meta)
|
|
|
|
}
|
2018-09-07 14:30:47 +00:00
|
|
|
if val := a.State.Service("test").Weights.Passing; val != 100 {
|
|
|
|
t.Fatalf("Expected 100 for Weights.Passing, got: %v", val)
|
|
|
|
}
|
|
|
|
if val := a.State.Service("test").Weights.Warning; val != 3 {
|
|
|
|
t.Fatalf("Expected 3 for Weights.Warning, got: %v", val)
|
|
|
|
}
|
2014-01-30 23:56:03 +00:00
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
checks := a.State.Checks()
|
2015-01-14 03:08:30 +00:00
|
|
|
if len(checks) != 3 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
2014-01-30 23:56:03 +00:00
|
|
|
}
|
2019-10-17 18:33:11 +00:00
|
|
|
for _, c := range checks {
|
|
|
|
if c.Type != "ttl" {
|
|
|
|
t.Fatalf("expected ttl check type, got %s", c.Type)
|
|
|
|
}
|
|
|
|
}
|
2014-01-30 23:56:03 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a.checkTTLs) != 3 {
|
|
|
|
t.Fatalf("missing test check ttls: %v", a.checkTTLs)
|
2014-01-30 23:56:03 +00:00
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
|
|
|
// Ensure the token was configured
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.ServiceToken("test"); token == "" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("missing token")
|
|
|
|
}
|
2014-01-30 23:56:03 +00:00
|
|
|
}
|
|
|
|
|
2019-09-02 15:38:29 +00:00
|
|
|
func TestAgent_RegisterService_ReRegister(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ReRegister(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ReRegister(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2019-09-02 15:38:29 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_1"),
|
|
|
|
TTL: 20 * time.Second,
|
|
|
|
},
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_2"),
|
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
_, err := a.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
args = &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_1"),
|
|
|
|
TTL: 20 * time.Second,
|
|
|
|
},
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_3"),
|
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
_, err = a.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checks := a.State.Checks()
|
|
|
|
require.Equal(t, 3, len(checks))
|
|
|
|
|
|
|
|
checkIDs := []string{}
|
|
|
|
for id := range checks {
|
|
|
|
checkIDs = append(checkIDs, string(id))
|
|
|
|
}
|
|
|
|
sort.Strings(checkIDs)
|
|
|
|
require.Equal(t, []string{"check_1", "check_2", "check_3"}, checkIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2019-09-02 15:38:29 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2019-11-14 15:59:06 +00:00
|
|
|
// explicitly not setting the check id to let it be auto-generated
|
2019-11-15 15:33:21 +00:00
|
|
|
// we want to ensure that we are testing out the cases with autogenerated names/ids
|
2019-11-14 15:59:06 +00:00
|
|
|
TTL: 20 * time.Second,
|
2019-09-02 15:38:29 +00:00
|
|
|
},
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_2"),
|
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args))
|
|
|
|
_, err := a.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
args = &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2019-11-14 15:59:06 +00:00
|
|
|
TTL: 20 * time.Second,
|
2019-09-02 15:38:29 +00:00
|
|
|
},
|
|
|
|
&structs.CheckType{
|
|
|
|
CheckID: types.CheckID("check_3"),
|
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ = http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args))
|
|
|
|
_, err = a.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checks := a.State.Checks()
|
|
|
|
require.Equal(t, 2, len(checks))
|
|
|
|
|
|
|
|
checkIDs := []string{}
|
|
|
|
for id := range checks {
|
|
|
|
checkIDs = append(checkIDs, string(id))
|
|
|
|
}
|
|
|
|
sort.Strings(checkIDs)
|
2019-11-14 15:59:06 +00:00
|
|
|
require.ElementsMatch(t, []string{"service:test:1", "check_3"}, checkIDs)
|
2019-09-02 15:38:29 +00:00
|
|
|
}
|
|
|
|
|
2017-10-10 23:40:59 +00:00
|
|
|
func TestAgent_RegisterService_TranslateKeys(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ACLDeny(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2019-06-04 14:02:38 +00:00
|
|
|
tests := []struct {
|
|
|
|
ip string
|
|
|
|
expectedTCPCheckStart string
|
|
|
|
}{
|
|
|
|
{"127.0.0.1", "127.0.0.1:"}, // private network address
|
|
|
|
{"::1", "[::1]:"}, // shared address space
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.ip, func(t *testing.T) {
|
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2019-08-09 19:19:30 +00:00
|
|
|
connect {}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2019-06-04 14:02:38 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-10-10 23:40:59 +00:00
|
|
|
|
2019-06-04 14:02:38 +00:00
|
|
|
json := `
|
2018-09-12 16:07:47 +00:00
|
|
|
{
|
2018-10-17 20:20:35 +00:00
|
|
|
"name":"test",
|
|
|
|
"port":8000,
|
|
|
|
"enable_tag_override": true,
|
2019-06-17 14:51:50 +00:00
|
|
|
"tagged_addresses": {
|
|
|
|
"lan": {
|
|
|
|
"address": "1.2.3.4",
|
|
|
|
"port": 5353
|
|
|
|
},
|
|
|
|
"wan": {
|
|
|
|
"address": "2.3.4.5",
|
|
|
|
"port": 53
|
|
|
|
}
|
|
|
|
},
|
2018-09-12 16:07:47 +00:00
|
|
|
"meta": {
|
2018-09-27 13:33:12 +00:00
|
|
|
"some": "meta",
|
|
|
|
"enable_tag_override": "meta is 'opaque' so should not get translated"
|
2018-09-12 16:07:47 +00:00
|
|
|
},
|
2018-09-27 13:33:12 +00:00
|
|
|
"kind": "connect-proxy",` +
|
2019-06-04 14:02:38 +00:00
|
|
|
// Note the uppercase P is important here - it ensures translation works
|
|
|
|
// correctly in case-insensitive way. Without it this test can pass even
|
|
|
|
// when translation is broken for other valid inputs.
|
|
|
|
`"Proxy": {
|
2018-09-12 16:07:47 +00:00
|
|
|
"destination_service_name": "web",
|
|
|
|
"destination_service_id": "web",
|
|
|
|
"local_service_port": 1234,
|
2019-06-04 14:02:38 +00:00
|
|
|
"local_service_address": "` + tt.ip + `",
|
2018-09-27 13:33:12 +00:00
|
|
|
"config": {
|
|
|
|
"destination_type": "proxy.config is 'opaque' so should not get translated"
|
|
|
|
},
|
2018-09-12 16:07:47 +00:00
|
|
|
"upstreams": [
|
|
|
|
{
|
|
|
|
"destination_type": "service",
|
|
|
|
"destination_namespace": "default",
|
|
|
|
"destination_name": "db",
|
2019-06-04 14:02:38 +00:00
|
|
|
"local_bind_address": "` + tt.ip + `",
|
2018-09-27 13:33:12 +00:00
|
|
|
"local_bind_port": 1234,
|
|
|
|
"config": {
|
|
|
|
"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated"
|
|
|
|
}
|
2018-09-12 16:07:47 +00:00
|
|
|
}
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"connect": {
|
2018-09-27 13:33:12 +00:00
|
|
|
"sidecar_service": {
|
2018-10-17 20:20:35 +00:00
|
|
|
"name":"test-proxy",
|
|
|
|
"port":8001,
|
|
|
|
"enable_tag_override": true,
|
2018-09-27 13:33:12 +00:00
|
|
|
"meta": {
|
|
|
|
"some": "meta",
|
|
|
|
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated"
|
|
|
|
},
|
|
|
|
"kind": "connect-proxy",
|
|
|
|
"proxy": {
|
|
|
|
"destination_service_name": "test",
|
|
|
|
"destination_service_id": "test",
|
|
|
|
"local_service_port": 4321,
|
2019-06-04 14:02:38 +00:00
|
|
|
"local_service_address": "` + tt.ip + `",
|
2018-09-27 13:33:12 +00:00
|
|
|
"upstreams": [
|
|
|
|
{
|
|
|
|
"destination_type": "service",
|
|
|
|
"destination_namespace": "default",
|
|
|
|
"destination_name": "db",
|
2019-06-04 14:02:38 +00:00
|
|
|
"local_bind_address": "` + tt.ip + `",
|
2018-09-27 13:33:12 +00:00
|
|
|
"local_bind_port": 1234,
|
|
|
|
"config": {
|
|
|
|
"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
2018-09-12 16:07:47 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
"weights":{
|
|
|
|
"passing": 16
|
|
|
|
}
|
|
|
|
}`
|
2019-06-04 14:02:38 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json))
|
|
|
|
|
|
|
|
rr := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentRegisterService(rr, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, obj)
|
|
|
|
require.Equal(t, 200, rr.Code, "body: %s", rr.Body)
|
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
2019-06-17 14:51:50 +00:00
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
|
|
|
"lan": {
|
|
|
|
Address: "1.2.3.4",
|
|
|
|
Port: 5353,
|
|
|
|
},
|
|
|
|
"wan": {
|
|
|
|
Address: "2.3.4.5",
|
|
|
|
Port: 53,
|
|
|
|
},
|
|
|
|
},
|
2019-06-04 14:02:38 +00:00
|
|
|
Meta: map[string]string{
|
|
|
|
"some": "meta",
|
|
|
|
"enable_tag_override": "meta is 'opaque' so should not get translated",
|
|
|
|
},
|
|
|
|
Port: 8000,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
Weights: &structs.Weights{Passing: 16, Warning: 0},
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
DestinationServiceID: "web",
|
|
|
|
LocalServiceAddress: tt.ip,
|
|
|
|
LocalServicePort: 1234,
|
2018-09-27 13:33:12 +00:00
|
|
|
Config: map[string]interface{}{
|
2019-06-04 14:02:38 +00:00
|
|
|
"destination_type": "proxy.config is 'opaque' so should not get translated",
|
|
|
|
},
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationType: structs.UpstreamDestTypeService,
|
|
|
|
DestinationName: "db",
|
|
|
|
DestinationNamespace: "default",
|
|
|
|
LocalBindAddress: tt.ip,
|
|
|
|
LocalBindPort: 1234,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated",
|
|
|
|
},
|
|
|
|
},
|
2018-09-27 13:33:12 +00:00
|
|
|
},
|
2018-09-12 16:07:47 +00:00
|
|
|
},
|
2019-06-04 14:02:38 +00:00
|
|
|
Connect: structs.ServiceConnect{
|
|
|
|
// The sidecar service is nilled since it is only config sugar and
|
|
|
|
// shouldn't be represented in state. We assert that the translations
|
|
|
|
// there worked by inspecting the registered sidecar below.
|
|
|
|
SidecarService: nil,
|
2018-09-27 13:33:12 +00:00
|
|
|
},
|
2019-06-04 14:02:38 +00:00
|
|
|
}
|
2018-09-27 14:00:51 +00:00
|
|
|
|
2019-06-04 14:02:38 +00:00
|
|
|
got := a.State.Service("test")
|
|
|
|
require.Equal(t, svc, got)
|
2018-09-27 14:00:51 +00:00
|
|
|
|
2019-06-04 14:02:38 +00:00
|
|
|
sidecarSvc := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "test-sidecar-proxy",
|
|
|
|
Service: "test-proxy",
|
|
|
|
Meta: map[string]string{
|
|
|
|
"some": "meta",
|
|
|
|
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated",
|
|
|
|
},
|
|
|
|
Port: 8001,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "test",
|
|
|
|
DestinationServiceID: "test",
|
|
|
|
LocalServiceAddress: tt.ip,
|
|
|
|
LocalServicePort: 4321,
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationType: structs.UpstreamDestTypeService,
|
|
|
|
DestinationName: "db",
|
|
|
|
DestinationNamespace: "default",
|
|
|
|
LocalBindAddress: tt.ip,
|
|
|
|
LocalBindPort: 1234,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated",
|
|
|
|
},
|
|
|
|
},
|
2018-09-12 16:07:47 +00:00
|
|
|
},
|
|
|
|
},
|
2019-06-04 14:02:38 +00:00
|
|
|
}
|
|
|
|
gotSidecar := a.State.Service("test-sidecar-proxy")
|
|
|
|
hasNoCorrectTCPCheck := true
|
|
|
|
for _, v := range a.checkTCPs {
|
|
|
|
if strings.HasPrefix(v.TCP, tt.expectedTCPCheckStart) {
|
|
|
|
hasNoCorrectTCPCheck = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
fmt.Println("TCP Check:= ", v)
|
|
|
|
}
|
|
|
|
if hasNoCorrectTCPCheck {
|
|
|
|
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
|
|
|
|
}
|
|
|
|
require.Equal(t, sidecarSvc, gotSidecar)
|
|
|
|
})
|
2017-10-10 23:40:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_RegisterService_ACLDeny(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ACLDeny(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ACLDeny(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig()+" "+extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.ServiceDefinition{
|
2016-12-14 22:16:46 +00:00
|
|
|
Name: "test",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
2017-06-15 16:46:06 +00:00
|
|
|
Check: structs.CheckType{
|
2016-12-14 22:16:46 +00:00
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2016-12-14 22:16:46 +00:00
|
|
|
TTL: 20 * time.Second,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
&structs.CheckType{
|
2016-12-14 22:16:46 +00:00
|
|
|
TTL: 30 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentRegisterService(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(args))
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentRegisterService(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
2017-05-08 16:34:45 +00:00
|
|
|
func TestAgent_RegisterService_InvalidAddress(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_UnmanagedConnectProxy(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_InvalidAddress(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_InvalidAddress(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-05-08 16:34:45 +00:00
|
|
|
|
2017-05-09 07:29:13 +00:00
|
|
|
for _, addr := range []string{"0.0.0.0", "::", "[::]"} {
|
|
|
|
t.Run("addr "+addr, func(t *testing.T) {
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.ServiceDefinition{
|
2017-05-09 07:29:13 +00:00
|
|
|
Name: "test",
|
|
|
|
Address: addr,
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
2017-05-09 07:29:13 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
2017-05-09 16:58:12 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("got error %v want nil", err)
|
|
|
|
}
|
2017-05-09 07:29:13 +00:00
|
|
|
if got, want := resp.Code, 400; got != want {
|
|
|
|
t.Fatalf("got code %d want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := resp.Body.String(), "Invalid service address"; got != want {
|
|
|
|
t.Fatalf("got body %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-05-08 16:34:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-17 12:29:02 +00:00
|
|
|
// This tests local agent service registration of a unmanaged connect proxy.
|
|
|
|
// This verifies that it is put in the local state store properly for syncing
|
2019-08-09 19:19:30 +00:00
|
|
|
// later.
|
2018-04-17 12:29:02 +00:00
|
|
|
func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_UnmanagedConnectProxy(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_UnmanagedConnectProxy(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-09-24 15:04:48 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2018-03-10 01:16:12 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-10 01:16:12 +00:00
|
|
|
|
2018-09-12 16:07:47 +00:00
|
|
|
// Register a proxy. Note that the destination doesn't exist here on this
|
|
|
|
// agent or in the catalog at all. This is intended and part of the design.
|
|
|
|
args := &api.AgentServiceRegistration{
|
|
|
|
Kind: api.ServiceKindConnectProxy,
|
|
|
|
Name: "connect-proxy",
|
|
|
|
Port: 8000,
|
|
|
|
Proxy: &api.AgentServiceConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
Upstreams: []api.Upstream{
|
|
|
|
{
|
|
|
|
// No type to force default
|
|
|
|
DestinationName: "db",
|
|
|
|
LocalBindPort: 1234,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DestinationType: "prepared_query",
|
|
|
|
DestinationName: "geo-cache",
|
|
|
|
LocalBindPort: 1235,
|
|
|
|
},
|
|
|
|
},
|
2018-03-10 01:16:12 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentRegisterService(resp, req)
|
2018-09-12 16:07:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-03-10 01:16:12 +00:00
|
|
|
assert.Nil(obj)
|
|
|
|
|
2018-03-12 17:13:44 +00:00
|
|
|
// Ensure the service
|
2018-03-10 01:16:12 +00:00
|
|
|
svc, ok := a.State.Services()["connect-proxy"]
|
|
|
|
assert.True(ok, "has service")
|
|
|
|
assert.Equal(structs.ServiceKindConnectProxy, svc.Kind)
|
2018-09-12 16:07:47 +00:00
|
|
|
// Registration must set that default type
|
|
|
|
args.Proxy.Upstreams[0].DestinationType = api.UpstreamDestTypeService
|
|
|
|
assert.Equal(args.Proxy, svc.Proxy.ToAPI())
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Ensure the token was configured
|
|
|
|
assert.Equal("abc123", a.State.ServiceToken("connect-proxy"))
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) *structs.NodeService {
|
|
|
|
ns := &structs.NodeService{
|
|
|
|
ID: svc + "-sidecar-proxy",
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Service: svc + "-sidecar-proxy",
|
|
|
|
Port: 2222,
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2018-09-27 13:33:12 +00:00
|
|
|
// Note that LocallyRegisteredAsSidecar should be true on the internal
|
|
|
|
// NodeService, but that we never want to see it in the HTTP response as
|
|
|
|
// it's internal only state. This is being compared directly to local state
|
|
|
|
// so should be present here.
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: svc,
|
|
|
|
DestinationServiceID: svc,
|
|
|
|
LocalServiceAddress: "127.0.0.1",
|
|
|
|
LocalServicePort: port,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, fn := range fns {
|
|
|
|
fn(ns)
|
|
|
|
}
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
|
2019-02-20 16:23:38 +00:00
|
|
|
// testCreateToken creates a Policy for the provided rules and a Token linked to that Policy.
|
2018-09-27 14:00:51 +00:00
|
|
|
func testCreateToken(t *testing.T, a *TestAgent, rules string) string {
|
2019-02-20 16:23:38 +00:00
|
|
|
policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
policyID := testCreatePolicy(t, a, policyName, rules)
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
args := map[string]interface{}{
|
2019-12-06 16:14:56 +00:00
|
|
|
"Description": "User Token",
|
2019-02-20 16:23:38 +00:00
|
|
|
"Policies": []map[string]interface{}{
|
|
|
|
map[string]interface{}{
|
|
|
|
"ID": policyID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Local": false,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLTokenCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, obj)
|
|
|
|
aclResp := obj.(*structs.ACLToken)
|
|
|
|
return aclResp.SecretID
|
|
|
|
}
|
|
|
|
|
|
|
|
func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string {
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": name,
|
2018-09-27 14:00:51 +00:00
|
|
|
"Rules": rules,
|
|
|
|
}
|
2019-02-20 16:23:38 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(args))
|
2018-09-27 14:00:51 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2019-02-20 16:23:38 +00:00
|
|
|
obj, err := a.srv.ACLPolicyCreate(resp, req)
|
2018-09-27 14:00:51 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, obj)
|
2019-02-20 16:23:38 +00:00
|
|
|
aclResp := obj.(*structs.ACLPolicy)
|
2018-09-27 14:00:51 +00:00
|
|
|
return aclResp.ID
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// This tests local agent service registration with a sidecar service. Note we
|
|
|
|
// only test simple defaults for the sidecar here since the actual logic for
|
|
|
|
// handling sidecar defaults and port assignment is tested thoroughly in
|
|
|
|
// TestAgent_sidecarServiceFromNodeService. Note it also tests Deregister
|
|
|
|
// explicitly too since setup is identical.
|
|
|
|
func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterServiceDeregisterService_Sidecar(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterServiceDeregisterService_Sidecar(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
2018-09-27 13:33:12 +00:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
preRegister, preRegister2 *structs.NodeService
|
|
|
|
// Use raw JSON payloads rather than encoding to avoid subtleties with some
|
|
|
|
// internal representations and different ways they encode and decode. We
|
|
|
|
// rely on the payload being Unmarshalable to structs.ServiceDefinition
|
|
|
|
// directly.
|
|
|
|
json string
|
|
|
|
enableACL bool
|
|
|
|
tokenRules string
|
|
|
|
wantNS *structs.NodeService
|
|
|
|
wantErr string
|
|
|
|
wantSidecarIDLeftAfterDereg bool
|
|
|
|
assertStateFn func(t *testing.T, state *local.State)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "sanity check no sidecar case",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "default sidecar",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: testDefaultSidecar("web", 1111),
|
|
|
|
wantErr: "",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ACL OK defaults",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
tokenRules: `
|
2019-02-20 16:23:38 +00:00
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
service "web" {
|
|
|
|
policy = "write"
|
|
|
|
}`,
|
|
|
|
wantNS: testDefaultSidecar("web", 1111),
|
|
|
|
wantErr: "",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ACL denied",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
tokenRules: ``, // No token rules means no valid token
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Permission denied",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ACL OK for service but not for sidecar",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
// This will become more common/reasonable when ACLs support exact match.
|
|
|
|
tokenRules: `
|
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "deny"
|
|
|
|
}
|
|
|
|
service "web" {
|
|
|
|
policy = "write"
|
|
|
|
}`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Permission denied",
|
|
|
|
},
|
|
|
|
{
|
2019-02-20 16:23:38 +00:00
|
|
|
name: "ACL OK for service and sidecar but not sidecar's overridden destination",
|
2018-09-27 13:33:12 +00:00
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"proxy": {
|
|
|
|
"DestinationServiceName": "foo"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
tokenRules: `
|
2019-02-20 16:23:38 +00:00
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
service "web" {
|
|
|
|
policy = "write"
|
|
|
|
}`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Permission denied",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ACL OK for service but not for overridden sidecar",
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"name": "foo-sidecar-proxy"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
tokenRules: `
|
2019-02-20 16:23:38 +00:00
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
service "web" {
|
|
|
|
policy = "write"
|
|
|
|
}`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Permission denied",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ACL OK for service but and overridden for sidecar",
|
|
|
|
// This test ensures that if the sidecar embeds it's own token with
|
2019-03-06 17:13:28 +00:00
|
|
|
// different privs from the main request token it will be honored for the
|
2018-09-27 13:33:12 +00:00
|
|
|
// sidecar registration. We use the test root token since that should have
|
|
|
|
// permission.
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"name": "foo",
|
|
|
|
"token": "root"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
enableACL: true,
|
|
|
|
tokenRules: `
|
2019-02-20 16:23:38 +00:00
|
|
|
service "web-sidecar-proxy" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
service "web" {
|
|
|
|
policy = "write"
|
|
|
|
}`,
|
|
|
|
wantNS: testDefaultSidecar("web", 1111, func(ns *structs.NodeService) {
|
|
|
|
ns.Service = "foo"
|
|
|
|
}),
|
|
|
|
wantErr: "",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "invalid check definition in sidecar",
|
|
|
|
// Note no interval in the TCP check should fail validation
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"check": {
|
|
|
|
"TCP": "foo"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "invalid check in sidecar_service",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "invalid checks definitions in sidecar",
|
|
|
|
// Note no interval in the TCP check should fail validation
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"checks": [{
|
|
|
|
"TCP": "foo"
|
|
|
|
}]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "invalid check in sidecar_service",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "invalid check status in sidecar",
|
|
|
|
// Note no interval in the TCP check should fail validation
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"check": {
|
|
|
|
"TCP": "foo",
|
|
|
|
"Interval": 10,
|
|
|
|
"Status": "unsupported-status"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Status for checks must 'passing', 'warning', 'critical'",
|
|
|
|
},
|
|
|
|
{
|
2019-02-20 16:23:38 +00:00
|
|
|
name: "invalid checks status in sidecar",
|
2018-09-27 13:33:12 +00:00
|
|
|
// Note no interval in the TCP check should fail validation
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"checks": [{
|
|
|
|
"TCP": "foo",
|
|
|
|
"Interval": 10,
|
|
|
|
"Status": "unsupported-status"
|
|
|
|
}]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
wantNS: nil,
|
|
|
|
wantErr: "Status for checks must 'passing', 'warning', 'critical'",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "another service registered with same ID as a sidecar should not be deregistered",
|
|
|
|
// Add another service with the same ID that a sidecar for web would have
|
|
|
|
preRegister: &structs.NodeService{
|
|
|
|
ID: "web-sidecar-proxy",
|
|
|
|
Service: "fake-sidecar",
|
|
|
|
Port: 9999,
|
|
|
|
},
|
|
|
|
// Register web with NO SIDECAR
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
// Note here that although the registration here didn't register it, we
|
|
|
|
// should still see the NodeService we pre-registered here.
|
|
|
|
wantNS: &structs.NodeService{
|
|
|
|
ID: "web-sidecar-proxy",
|
|
|
|
Service: "fake-sidecar",
|
|
|
|
Port: 9999,
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2018-09-27 13:33:12 +00:00
|
|
|
},
|
|
|
|
// After we deregister the web service above, the fake sidecar with
|
|
|
|
// clashing ID SHOULD NOT have been removed since it wasn't part of the
|
|
|
|
// original registration.
|
|
|
|
wantSidecarIDLeftAfterDereg: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "updates to sidecar should work",
|
|
|
|
// Add a valid sidecar already registered
|
|
|
|
preRegister: &structs.NodeService{
|
|
|
|
ID: "web-sidecar-proxy",
|
|
|
|
Service: "web-sidecar-proxy",
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
Port: 9999,
|
|
|
|
},
|
|
|
|
// Register web with Sidecar on different port
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 1111,
|
|
|
|
"connect": {
|
|
|
|
"SidecarService": {
|
|
|
|
"Port": 6666
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
// Note here that although the registration here didn't register it, we
|
|
|
|
// should still see the NodeService we pre-registered here.
|
|
|
|
wantNS: &structs.NodeService{
|
|
|
|
Kind: "connect-proxy",
|
|
|
|
ID: "web-sidecar-proxy",
|
|
|
|
Service: "web-sidecar-proxy",
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
Port: 6666,
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2018-09-27 13:33:12 +00:00
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
DestinationServiceID: "web",
|
|
|
|
LocalServiceAddress: "127.0.0.1",
|
|
|
|
LocalServicePort: 1111,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update that removes sidecar should NOT deregister it",
|
|
|
|
// Add web with a valid sidecar already registered
|
|
|
|
preRegister: &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 1111,
|
|
|
|
},
|
|
|
|
preRegister2: testDefaultSidecar("web", 1111),
|
|
|
|
// Register (update) web and remove sidecar (and port for sanity check)
|
|
|
|
json: `
|
|
|
|
{
|
|
|
|
"name": "web",
|
|
|
|
"port": 2222
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
// Sidecar should still be there such that API can update registration
|
|
|
|
// without accidentally removing a sidecar. This is equivalent to embedded
|
|
|
|
// checks which are not removed by just not being included in an update.
|
|
|
|
// We will document that sidecar registrations via API must be explicitiy
|
|
|
|
// deregistered.
|
|
|
|
wantNS: testDefaultSidecar("web", 1111),
|
|
|
|
// Sanity check the rest of the update happened though.
|
|
|
|
assertStateFn: func(t *testing.T, state *local.State) {
|
|
|
|
svcs := state.Services()
|
|
|
|
svc, ok := svcs["web"]
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Equal(t, 2222, svc.Port)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Constrain auto ports to 1 available to make it deterministic
|
2018-10-17 20:20:35 +00:00
|
|
|
hcl := `ports {
|
2018-09-27 13:33:12 +00:00
|
|
|
sidecar_min_port = 2222
|
|
|
|
sidecar_max_port = 2222
|
|
|
|
}
|
|
|
|
`
|
|
|
|
if tt.enableACL {
|
|
|
|
hcl = hcl + TestACLConfig()
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), hcl+" "+extraHCL)
|
2018-09-27 13:33:12 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
if tt.preRegister != nil {
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(tt.preRegister, nil, false, "", ConfigSourceLocal))
|
2018-09-27 13:33:12 +00:00
|
|
|
}
|
|
|
|
if tt.preRegister2 != nil {
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(tt.preRegister2, nil, false, "", ConfigSourceLocal))
|
2018-09-27 13:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create an ACL token with require policy
|
|
|
|
var token string
|
|
|
|
if tt.enableACL && tt.tokenRules != "" {
|
2018-09-27 14:00:51 +00:00
|
|
|
token = testCreateToken(t, a, tt.tokenRules)
|
2018-09-27 13:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
br := bytes.NewBufferString(tt.json)
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
if tt.wantErr != "" {
|
|
|
|
require.Error(err, "response code=%d, body:\n%s",
|
|
|
|
resp.Code, resp.Body.String())
|
|
|
|
require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
require.NoError(err)
|
|
|
|
assert.Nil(obj)
|
|
|
|
require.Equal(200, resp.Code, "request failed with body: %s",
|
|
|
|
resp.Body.String())
|
|
|
|
|
|
|
|
// Sanity the target service registration
|
|
|
|
svcs := a.State.Services()
|
|
|
|
|
|
|
|
// Parse the expected definition into a ServiceDefinition
|
|
|
|
var sd structs.ServiceDefinition
|
|
|
|
err = json.Unmarshal([]byte(tt.json), &sd)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotEmpty(sd.Name)
|
|
|
|
|
|
|
|
svcID := sd.ID
|
|
|
|
if svcID == "" {
|
|
|
|
svcID = sd.Name
|
|
|
|
}
|
|
|
|
svc, ok := svcs[svcID]
|
|
|
|
require.True(ok, "has service "+svcID)
|
|
|
|
assert.Equal(sd.Name, svc.Service)
|
|
|
|
assert.Equal(sd.Port, svc.Port)
|
|
|
|
// Ensure that the actual registered service _doesn't_ still have it's
|
|
|
|
// sidecar info since it's duplicate and we don't want that synced up to
|
2019-03-06 17:13:28 +00:00
|
|
|
// the catalog or included in responses particularly - it's just
|
2018-09-27 13:33:12 +00:00
|
|
|
// registration syntax sugar.
|
|
|
|
assert.Nil(svc.Connect.SidecarService)
|
|
|
|
|
|
|
|
if tt.wantNS == nil {
|
|
|
|
// Sanity check that there was no service registered, we rely on there
|
|
|
|
// being no services at start of test so we can just use the count.
|
|
|
|
assert.Len(svcs, 1, "should be no sidecar registered")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure sidecar
|
|
|
|
svc, ok = svcs[tt.wantNS.ID]
|
|
|
|
require.True(ok, "no sidecar registered at "+tt.wantNS.ID)
|
|
|
|
assert.Equal(tt.wantNS, svc)
|
|
|
|
|
|
|
|
if tt.assertStateFn != nil {
|
|
|
|
tt.assertStateFn(t, a.State)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now verify deregistration also removes sidecar (if there was one and it
|
|
|
|
// was added via sidecar not just coincidental ID clash)
|
|
|
|
{
|
|
|
|
req := httptest.NewRequest("PUT",
|
|
|
|
"/v1/agent/service/deregister/"+svcID+"?token="+token, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentDeregisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Nil(obj)
|
|
|
|
|
|
|
|
svcs := a.State.Services()
|
|
|
|
svc, ok = svcs[tt.wantNS.ID]
|
|
|
|
if tt.wantSidecarIDLeftAfterDereg {
|
|
|
|
require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID)
|
|
|
|
} else {
|
|
|
|
require.False(ok, "sidecar not deregistered with service "+svcID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-12 20:05:06 +00:00
|
|
|
// This tests that connect proxy validation is done for local agent
|
|
|
|
// registration. This doesn't need to test validation exhaustively since
|
|
|
|
// that is done via a table test in the structs package.
|
2018-04-17 12:29:02 +00:00
|
|
|
func TestAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_UnmanagedConnectProxyInvalid(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_UnmanagedConnectProxyInvalid(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
2018-03-11 01:42:30 +00:00
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-09-24 15:04:48 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2018-03-11 01:42:30 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-11 01:42:30 +00:00
|
|
|
|
|
|
|
args := &structs.ServiceDefinition{
|
2018-09-12 16:07:47 +00:00
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Name: "connect-proxy",
|
|
|
|
Proxy: &structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "db",
|
|
|
|
},
|
2018-03-11 01:42:30 +00:00
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Nil(obj)
|
|
|
|
assert.Equal(http.StatusBadRequest, resp.Code)
|
|
|
|
assert.Contains(resp.Body.String(), "Port")
|
|
|
|
|
|
|
|
// Ensure the service doesn't exist
|
|
|
|
_, ok := a.State.Services()["connect-proxy"]
|
|
|
|
assert.False(ok)
|
|
|
|
}
|
|
|
|
|
2018-06-04 05:20:16 +00:00
|
|
|
// Tests agent registration of a service that is connect native.
|
|
|
|
func TestAgent_RegisterService_ConnectNative(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ConnectNative(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ConnectNative(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
2018-06-04 05:20:16 +00:00
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-09-24 15:04:48 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2018-06-04 05:20:16 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-06-04 05:20:16 +00:00
|
|
|
|
|
|
|
// Register a proxy. Note that the destination doesn't exist here on
|
|
|
|
// this agent or in the catalog at all. This is intended and part
|
|
|
|
// of the design.
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Name: "web",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2018-06-05 17:51:05 +00:00
|
|
|
Connect: &structs.ServiceConnect{
|
2018-06-04 05:20:16 +00:00
|
|
|
Native: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Nil(obj)
|
|
|
|
|
|
|
|
// Ensure the service
|
|
|
|
svc, ok := a.State.Services()["web"]
|
|
|
|
assert.True(ok, "has service")
|
2018-06-05 17:51:05 +00:00
|
|
|
assert.True(svc.Connect.Native)
|
2018-06-04 05:20:16 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
func TestAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ScriptCheck_ExecDisable(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ScriptCheck_ExecDisable(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), extraHCL)
|
2018-10-11 12:22:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
Name: "test-check",
|
|
|
|
Interval: time.Second,
|
|
|
|
ScriptArgs: []string{"true"},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
|
|
|
|
|
|
|
_, err := a.srv.AgentRegisterService(nil, req)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error but got nil")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("expected script disabled error, got: %s", err)
|
|
|
|
}
|
|
|
|
checkID := types.CheckID("test-check")
|
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
|
|
|
t.Fatalf("check registered with exec disable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T) {
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t, "")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-11 12:22:11 +00:00
|
|
|
enable_local_script_checks = true
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2018-10-11 12:22:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Name: "test",
|
|
|
|
Meta: map[string]string{"hello": "world"},
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
Name: "test-check",
|
|
|
|
Interval: time.Second,
|
|
|
|
ScriptArgs: []string{"true"},
|
|
|
|
},
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 100,
|
|
|
|
Warning: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
|
|
|
|
|
|
|
|
_, err := a.srv.AgentRegisterService(nil, req)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error but got nil")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("expected script disabled error, got: %s", err)
|
|
|
|
}
|
|
|
|
checkID := types.CheckID("test-check")
|
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
|
|
|
t.Fatalf("check registered with exec disable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_DeregisterService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-30 23:56:03 +00:00
|
|
|
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
2014-01-30 23:56:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.AgentDeregisterService(nil, req)
|
2014-01-30 23:56:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["test"]; ok {
|
2014-01-30 23:56:03 +00:00
|
|
|
t.Fatalf("have test service")
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["test"]; ok {
|
2014-01-30 23:56:03 +00:00
|
|
|
t.Fatalf("have test check")
|
|
|
|
}
|
|
|
|
}
|
2015-01-15 09:17:35 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_DeregisterService_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentDeregisterService(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 09:17:35 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("not enabled", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("expected 400, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
2015-01-15 09:17:35 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no service id", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/?enable=true", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("expected 400, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
2015-01-15 18:51:00 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("bad service id", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/_nope_?enable=true", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 404 {
|
|
|
|
t.Fatalf("expected 404, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
})
|
2015-01-15 09:17:35 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_ServiceMaintenance_Enable(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 09:17:35 +00:00
|
|
|
|
|
|
|
// Register the service
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-15 18:51:00 +00:00
|
|
|
// Force the service into maintenance mode
|
2015-09-10 19:08:08 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil)
|
2015-01-15 09:17:35 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the maintenance check was registered
|
2015-01-15 20:20:57 +00:00
|
|
|
checkID := serviceMaintCheckID("test")
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok := a.State.Checks()[checkID]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("should have registered maintenance check")
|
|
|
|
}
|
2015-01-21 20:21:57 +00:00
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Ensure the token was added
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(checkID); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 09:17:35 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_ServiceMaintenance_Disable(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 09:17:35 +00:00
|
|
|
|
|
|
|
// Register the service
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force the service into maintenance mode
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.EnableServiceMaintenance("test", "", ""); err != nil {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Leave maintenance mode
|
2015-01-15 18:51:00 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=false", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
|
2015-01-15 09:17:35 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
2015-01-15 18:51:00 +00:00
|
|
|
|
|
|
|
// Ensure the maintenance check was removed
|
2015-01-15 20:20:57 +00:00
|
|
|
checkID := serviceMaintCheckID("test")
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
2015-01-15 18:51:00 +00:00
|
|
|
t.Fatalf("should have removed maintenance check")
|
|
|
|
}
|
2015-01-15 09:17:35 +00:00
|
|
|
}
|
2015-01-15 19:26:14 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
|
|
|
// Register the service.
|
|
|
|
service := &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil {
|
2016-12-14 22:16:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_NodeMaintenance_BadRequest(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Fails when no enable flag provided
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("expected 400, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_NodeMaintenance_Enable(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Force the node into maintenance mode
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=mytoken", nil)
|
2015-01-15 19:26:14 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the maintenance check was registered
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok := a.State.Checks()[structs.NodeMaint]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have registered maintenance check")
|
|
|
|
}
|
2015-01-21 20:21:57 +00:00
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Check that the token was used
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 19:26:14 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_NodeMaintenance_Disable(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Force the node into maintenance mode
|
2017-05-21 07:11:09 +00:00
|
|
|
a.EnableNodeMaintenance("", "")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Leave maintenance mode
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=false", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 200 {
|
|
|
|
t.Fatalf("expected 200, got %d", resp.Code)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the maintenance check was removed
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[structs.NodeMaint]; ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have removed maintenance check")
|
|
|
|
}
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2016-12-14 22:16:46 +00:00
|
|
|
func TestAgent_NodeMaintenance_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("no token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentNodeMaintenance(nil, req); !acl.IsErrPermissionDenied(err) {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Run("root token", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=root", nil)
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentNodeMaintenance(nil, req); err != nil {
|
2017-05-09 17:46:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2016-12-14 22:16:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RegisterCheck_Service(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
args := &structs.ServiceDefinition{
|
2015-01-14 01:52:17 +00:00
|
|
|
Name: "memcache",
|
|
|
|
Port: 8000,
|
2017-06-15 16:46:06 +00:00
|
|
|
Check: structs.CheckType{
|
2015-01-14 01:52:17 +00:00
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-05-09 16:58:12 +00:00
|
|
|
// First register the service
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentRegisterService(nil, req); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now register an additional check
|
2017-06-15 16:46:06 +00:00
|
|
|
checkArgs := &structs.CheckDefinition{
|
2015-01-14 01:52:17 +00:00
|
|
|
Name: "memcache_check2",
|
|
|
|
ServiceID: "memcache",
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: 15 * time.Second,
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2017-09-26 06:11:19 +00:00
|
|
|
req, _ = http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(checkArgs))
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
result := a.State.Checks()
|
2015-01-14 01:52:17 +00:00
|
|
|
if _, ok := result["service:memcache"]; !ok {
|
|
|
|
t.Fatalf("missing memcached check")
|
|
|
|
}
|
|
|
|
if _, ok := result["memcache_check2"]; !ok {
|
|
|
|
t.Fatalf("missing memcache_check2 check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the new check is associated with the service
|
|
|
|
if result["memcache_check2"].ServiceID != "memcache" {
|
|
|
|
t.Fatalf("bad: %#v", result["memcached_check2"])
|
|
|
|
}
|
2019-10-17 18:33:11 +00:00
|
|
|
|
|
|
|
// Make sure the new check has the right type
|
|
|
|
if result["memcache_check2"].Type != "ttl" {
|
|
|
|
t.Fatalf("expected TTL type, got %s", result["memcache_check2"].Type)
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2016-11-16 21:45:26 +00:00
|
|
|
|
2016-12-14 17:33:57 +00:00
|
|
|
func TestAgent_Monitor(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2016-11-16 21:45:26 +00:00
|
|
|
logWriter := logger.NewLogWriter(512)
|
2019-09-05 17:24:36 +00:00
|
|
|
a := NewTestAgentWithFields(t, true, TestAgent{
|
2017-05-21 07:11:09 +00:00
|
|
|
LogWriter: logWriter,
|
2017-05-22 23:43:34 +00:00
|
|
|
LogOutput: io.MultiWriter(os.Stderr, logWriter),
|
2019-05-21 23:07:06 +00:00
|
|
|
HCL: `node_name = "invalid!"`,
|
2019-09-05 17:24:36 +00:00
|
|
|
})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2016-11-16 21:45:26 +00:00
|
|
|
|
2016-11-28 21:08:31 +00:00
|
|
|
// Try passing an invalid log level
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil)
|
2016-11-16 21:45:26 +00:00
|
|
|
resp := newClosableRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentMonitor(resp, req); err != nil {
|
2016-11-28 21:08:31 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Code != 400 {
|
|
|
|
t.Fatalf("bad: %v", resp.Code)
|
|
|
|
}
|
|
|
|
body, _ := ioutil.ReadAll(resp.Body)
|
|
|
|
if !strings.Contains(string(body), "Unknown log level") {
|
|
|
|
t.Fatalf("bad: %s", body)
|
|
|
|
}
|
|
|
|
|
2016-12-15 02:13:30 +00:00
|
|
|
// Try to stream logs until we see the expected log line
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-12-15 02:13:30 +00:00
|
|
|
req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
|
|
|
|
resp = newClosableRecorder()
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.srv.AgentMonitor(resp, req); err != nil {
|
2016-12-15 02:13:30 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
2016-12-15 02:13:30 +00:00
|
|
|
close(done)
|
|
|
|
}()
|
2016-11-16 21:45:26 +00:00
|
|
|
|
2016-12-15 02:13:30 +00:00
|
|
|
resp.Close()
|
|
|
|
<-done
|
|
|
|
|
2017-04-29 16:34:02 +00:00
|
|
|
got := resp.Body.Bytes()
|
2019-05-21 23:07:06 +00:00
|
|
|
want := []byte(`[WARN] agent: Node name "invalid!" will not be discoverable via DNS`)
|
2017-04-29 16:34:02 +00:00
|
|
|
if !bytes.Contains(got, want) {
|
|
|
|
r.Fatalf("got %q and did not find %q", got, want)
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-11-16 21:45:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type closableRecorder struct {
|
|
|
|
*httptest.ResponseRecorder
|
|
|
|
closer chan bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func newClosableRecorder() *closableRecorder {
|
|
|
|
r := httptest.NewRecorder()
|
|
|
|
closer := make(chan bool)
|
|
|
|
return &closableRecorder{r, closer}
|
|
|
|
}
|
|
|
|
|
2016-12-15 02:13:30 +00:00
|
|
|
func (r *closableRecorder) Close() {
|
|
|
|
close(r.closer)
|
|
|
|
}
|
|
|
|
|
2016-11-16 21:45:26 +00:00
|
|
|
func (r *closableRecorder) CloseNotify() <-chan bool {
|
|
|
|
return r.closer
|
|
|
|
}
|
2016-12-14 17:33:57 +00:00
|
|
|
|
|
|
|
func TestAgent_Monitor_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-12-14 17:33:57 +00:00
|
|
|
|
|
|
|
// Try without a token.
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor", nil)
|
2017-08-23 14:52:48 +00:00
|
|
|
if _, err := a.srv.AgentMonitor(nil, req); !acl.IsErrPermissionDenied(err) {
|
2016-12-14 17:33:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This proves we call the ACL function, and we've got the other monitor
|
|
|
|
// test to prove monitor works, which should be sufficient. The monitor
|
|
|
|
// logic is a little complex to set up so isn't worth repeating again
|
|
|
|
// here.
|
|
|
|
}
|
2017-07-26 18:03:43 +00:00
|
|
|
|
2019-10-04 18:37:34 +00:00
|
|
|
func TestAgent_TokenTriggersFullSync(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
body := func(token string) io.Reader {
|
|
|
|
return jsonReader(&api.AgentToken{Token: token})
|
|
|
|
}
|
|
|
|
|
|
|
|
createNodePolicy := func(t *testing.T, a *TestAgent, policyName string) *structs.ACLPolicy {
|
|
|
|
policy := &structs.ACLPolicy{
|
|
|
|
Name: policyName,
|
|
|
|
Rules: `node_prefix "" { policy = "write" }`,
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonBody(policy))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLPolicyCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
policy, ok := obj.(*structs.ACLPolicy)
|
|
|
|
require.True(t, ok)
|
|
|
|
return policy
|
|
|
|
}
|
|
|
|
|
|
|
|
createNodeToken := func(t *testing.T, a *TestAgent, policyName string) *structs.ACLToken {
|
|
|
|
createNodePolicy(t, a, policyName)
|
|
|
|
|
|
|
|
token := &structs.ACLToken{
|
|
|
|
Description: "test",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
|
|
|
structs.ACLTokenPolicyLink{Name: policyName},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(token))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLTokenCreate(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
token, ok := obj.(*structs.ACLToken)
|
|
|
|
require.True(t, ok)
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
path string
|
|
|
|
tokenGetFn func(*token.Store) string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
path: "acl_agent_token",
|
|
|
|
tokenGetFn: (*token.Store).AgentToken,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "agent",
|
|
|
|
tokenGetFn: (*token.Store).AgentToken,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "acl_token",
|
|
|
|
tokenGetFn: (*token.Store).UserToken,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "default",
|
|
|
|
tokenGetFn: (*token.Store).UserToken,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range cases {
|
|
|
|
tt := tt
|
|
|
|
t.Run(tt.path, func(t *testing.T) {
|
|
|
|
url := fmt.Sprintf("/v1/agent/token/%s?token=root", tt.path)
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig()+`
|
|
|
|
acl {
|
|
|
|
tokens {
|
|
|
|
default = ""
|
|
|
|
agent = ""
|
|
|
|
agent_master = ""
|
|
|
|
replication = ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// create node policy and token
|
|
|
|
token := createNodeToken(t, a, "test")
|
|
|
|
|
|
|
|
req, err := http.NewRequest("PUT", url, body(token.SecretID))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err = a.srv.AgentToken(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, http.StatusOK, resp.Code)
|
|
|
|
require.Equal(t, token.SecretID, tt.tokenGetFn(a.tokens))
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1",
|
|
|
|
testrpc.WithToken("root"),
|
|
|
|
testrpc.WaitForAntiEntropySync())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-26 18:03:43 +00:00
|
|
|
func TestAgent_Token(t *testing.T) {
|
|
|
|
t.Parallel()
|
2017-11-28 21:47:30 +00:00
|
|
|
|
|
|
|
// The behavior of this handler when ACLs are disabled is vetted over
|
|
|
|
// in TestACL_Disabled_Response since there's already good infra set
|
|
|
|
// up over there to test this, and it calls the common function.
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig()+`
|
2019-02-27 19:28:31 +00:00
|
|
|
acl {
|
|
|
|
tokens {
|
|
|
|
default = ""
|
|
|
|
agent = ""
|
|
|
|
agent_master = ""
|
|
|
|
replication = ""
|
|
|
|
}
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
`)
|
2017-07-26 18:03:43 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-07-26 18:03:43 +00:00
|
|
|
|
2017-08-03 22:39:31 +00:00
|
|
|
type tokens struct {
|
2019-02-27 19:28:31 +00:00
|
|
|
user string
|
|
|
|
userSource tokenStore.TokenSource
|
|
|
|
agent string
|
|
|
|
agentSource tokenStore.TokenSource
|
|
|
|
master string
|
|
|
|
masterSource tokenStore.TokenSource
|
|
|
|
repl string
|
|
|
|
replSource tokenStore.TokenSource
|
2017-08-03 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 19:28:31 +00:00
|
|
|
resetTokens := func(init tokens) {
|
|
|
|
a.tokens.UpdateUserToken(init.user, init.userSource)
|
|
|
|
a.tokens.UpdateAgentToken(init.agent, init.agentSource)
|
|
|
|
a.tokens.UpdateAgentMasterToken(init.master, init.masterSource)
|
|
|
|
a.tokens.UpdateReplicationToken(init.repl, init.replSource)
|
2017-08-03 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
body := func(token string) io.Reader {
|
2017-07-26 18:03:43 +00:00
|
|
|
return jsonReader(&api.AgentToken{Token: token})
|
|
|
|
}
|
|
|
|
|
|
|
|
badJSON := func() io.Reader {
|
|
|
|
return jsonReader(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
method, url string
|
|
|
|
body io.Reader
|
|
|
|
code int
|
2019-02-27 19:28:31 +00:00
|
|
|
init tokens
|
|
|
|
raw tokens
|
|
|
|
effective tokens
|
2017-07-26 18:03:43 +00:00
|
|
|
}{
|
2017-08-03 22:39:31 +00:00
|
|
|
{
|
|
|
|
name: "bad token name",
|
|
|
|
method: "PUT",
|
|
|
|
url: "nope?token=root",
|
|
|
|
body: body("X"),
|
|
|
|
code: http.StatusNotFound,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "bad JSON",
|
|
|
|
method: "PUT",
|
|
|
|
url: "acl_token?token=root",
|
|
|
|
body: badJSON(),
|
|
|
|
code: http.StatusBadRequest,
|
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "set user legacy",
|
|
|
|
method: "PUT",
|
|
|
|
url: "acl_token?token=root",
|
|
|
|
body: body("U"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{user: "U", userSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{user: "U", agent: "U"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set default",
|
|
|
|
method: "PUT",
|
|
|
|
url: "default?token=root",
|
|
|
|
body: body("U"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{user: "U", userSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{user: "U", agent: "U"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set agent legacy",
|
|
|
|
method: "PUT",
|
|
|
|
url: "acl_agent_token?token=root",
|
|
|
|
body: body("A"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
init: tokens{user: "U", agent: "U"},
|
|
|
|
raw: tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{user: "U", agent: "A"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set agent",
|
|
|
|
method: "PUT",
|
|
|
|
url: "agent?token=root",
|
|
|
|
body: body("A"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
init: tokens{user: "U", agent: "U"},
|
|
|
|
raw: tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{user: "U", agent: "A"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set master legacy",
|
|
|
|
method: "PUT",
|
|
|
|
url: "acl_agent_master_token?token=root",
|
|
|
|
body: body("M"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{master: "M", masterSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{master: "M"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set master ",
|
|
|
|
method: "PUT",
|
|
|
|
url: "agent_master?token=root",
|
|
|
|
body: body("M"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{master: "M", masterSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{master: "M"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set repl legacy",
|
|
|
|
method: "PUT",
|
|
|
|
url: "acl_replication_token?token=root",
|
|
|
|
body: body("R"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{repl: "R", replSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{repl: "R"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set repl",
|
|
|
|
method: "PUT",
|
|
|
|
url: "replication?token=root",
|
|
|
|
body: body("R"),
|
|
|
|
code: http.StatusOK,
|
|
|
|
raw: tokens{repl: "R", replSource: tokenStore.TokenSourceAPI},
|
|
|
|
effective: tokens{repl: "R"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "clear user legacy",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
|
|
|
url: "acl_token?token=root",
|
2019-02-27 19:28:31 +00:00
|
|
|
body: body(""),
|
2017-08-03 22:39:31 +00:00
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{user: "U"},
|
|
|
|
raw: tokens{userSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear default",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "default?token=root",
|
|
|
|
body: body(""),
|
2017-08-03 22:39:31 +00:00
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{user: "U"},
|
|
|
|
raw: tokens{userSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear agent legacy",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "acl_agent_token?token=root",
|
|
|
|
body: body(""),
|
2017-08-03 22:39:31 +00:00
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{agent: "A"},
|
|
|
|
raw: tokens{agentSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear agent",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "agent?token=root",
|
|
|
|
body: body(""),
|
2017-08-03 22:39:31 +00:00
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{agent: "A"},
|
|
|
|
raw: tokens{agentSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear master legacy",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "acl_agent_master_token?token=root",
|
2017-08-03 22:39:31 +00:00
|
|
|
body: body(""),
|
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{master: "M"},
|
|
|
|
raw: tokens{masterSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear master",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "agent_master?token=root",
|
2017-08-03 22:39:31 +00:00
|
|
|
body: body(""),
|
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{master: "M"},
|
|
|
|
raw: tokens{masterSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
2019-02-27 19:28:31 +00:00
|
|
|
name: "clear repl legacy",
|
2017-08-03 22:39:31 +00:00
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "acl_replication_token?token=root",
|
2017-08-03 22:39:31 +00:00
|
|
|
body: body(""),
|
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{repl: "R"},
|
|
|
|
raw: tokens{replSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "clear repl",
|
|
|
|
method: "PUT",
|
2019-02-27 19:28:31 +00:00
|
|
|
url: "replication?token=root",
|
2017-08-03 22:39:31 +00:00
|
|
|
body: body(""),
|
|
|
|
code: http.StatusOK,
|
2019-02-27 19:28:31 +00:00
|
|
|
init: tokens{repl: "R"},
|
|
|
|
raw: tokens{replSource: tokenStore.TokenSourceAPI},
|
2017-08-03 22:39:31 +00:00
|
|
|
},
|
2017-07-26 18:03:43 +00:00
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2019-02-27 19:28:31 +00:00
|
|
|
resetTokens(tt.init)
|
2017-07-26 18:03:43 +00:00
|
|
|
url := fmt.Sprintf("/v1/agent/token/%s", tt.url)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
req, _ := http.NewRequest(tt.method, url, tt.body)
|
2019-02-27 19:28:31 +00:00
|
|
|
_, err := a.srv.AgentToken(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tt.code, resp.Code)
|
|
|
|
require.Equal(t, tt.effective.user, a.tokens.UserToken())
|
|
|
|
require.Equal(t, tt.effective.agent, a.tokens.AgentToken())
|
|
|
|
require.Equal(t, tt.effective.master, a.tokens.AgentMasterToken())
|
|
|
|
require.Equal(t, tt.effective.repl, a.tokens.ReplicationToken())
|
|
|
|
|
|
|
|
tok, src := a.tokens.UserTokenAndSource()
|
|
|
|
require.Equal(t, tt.raw.user, tok)
|
|
|
|
require.Equal(t, tt.raw.userSource, src)
|
|
|
|
|
|
|
|
tok, src = a.tokens.AgentTokenAndSource()
|
|
|
|
require.Equal(t, tt.raw.agent, tok)
|
|
|
|
require.Equal(t, tt.raw.agentSource, src)
|
|
|
|
|
|
|
|
tok, src = a.tokens.AgentMasterTokenAndSource()
|
|
|
|
require.Equal(t, tt.raw.master, tok)
|
|
|
|
require.Equal(t, tt.raw.masterSource, src)
|
|
|
|
|
|
|
|
tok, src = a.tokens.ReplicationTokenAndSource()
|
|
|
|
require.Equal(t, tt.raw.repl, tok)
|
|
|
|
require.Equal(t, tt.raw.replSource, src)
|
2017-07-26 18:03:43 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// This one returns an error that is interpreted by the HTTP wrapper, so
|
|
|
|
// doesn't fit into our table above.
|
|
|
|
t.Run("permission denied", func(t *testing.T) {
|
2017-08-03 22:39:31 +00:00
|
|
|
resetTokens(tokens{})
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/token/acl_token", body("X"))
|
2019-02-27 19:28:31 +00:00
|
|
|
_, err := a.srv.AgentToken(nil, req)
|
|
|
|
require.True(t, acl.IsErrPermissionDenied(err))
|
|
|
|
require.Equal(t, "", a.tokens.UserToken())
|
2017-07-26 18:03:43 +00:00
|
|
|
})
|
|
|
|
}
|
2018-03-21 17:20:35 +00:00
|
|
|
|
|
|
|
func TestAgentConnectCARoots_empty(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2018-05-09 16:15:29 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "connect { enabled = false }")
|
2018-03-21 17:20:35 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-21 17:20:35 +00:00
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2018-07-25 19:26:27 +00:00
|
|
|
_, err := a.srv.AgentConnectCARoots(resp, req)
|
|
|
|
require.Error(err)
|
|
|
|
require.Contains(err.Error(), "Connect must be enabled")
|
2018-03-21 17:20:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgentConnectCARoots_list(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2018-05-09 16:15:29 +00:00
|
|
|
assert := assert.New(t)
|
2018-04-11 09:18:24 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-21 17:20:35 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-21 17:20:35 +00:00
|
|
|
|
2018-04-30 21:23:49 +00:00
|
|
|
// Set some CAs. Note that NewTestAgent already bootstraps one CA so this just
|
|
|
|
// adds a second and makes it active.
|
|
|
|
ca2 := connect.TestCAConfigSet(t, a, nil)
|
2018-03-21 17:20:35 +00:00
|
|
|
|
|
|
|
// List
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentConnectCARoots(resp, req)
|
2018-05-09 16:15:29 +00:00
|
|
|
require.NoError(err)
|
2018-03-21 17:20:35 +00:00
|
|
|
|
|
|
|
value := obj.(structs.IndexedCARoots)
|
2018-05-09 16:15:29 +00:00
|
|
|
assert.Equal(value.ActiveRootID, ca2.ID)
|
|
|
|
// Would like to assert that it's the same as the TestAgent domain but the
|
|
|
|
// only way to access that state via this package is by RPC to the server
|
|
|
|
// implementation running in TestAgent which is more or less a tautology.
|
|
|
|
assert.NotEmpty(value.TrustDomain)
|
|
|
|
assert.Len(value.Roots, 2)
|
2018-03-21 17:20:35 +00:00
|
|
|
|
|
|
|
// We should never have the secret information
|
|
|
|
for _, r := range value.Roots {
|
2018-05-09 16:15:29 +00:00
|
|
|
assert.Equal("", r.SigningCert)
|
|
|
|
assert.Equal("", r.SigningKey)
|
2018-04-11 09:18:24 +00:00
|
|
|
}
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
assert.Equal("MISS", resp.Header().Get("X-Cache"))
|
2018-04-11 09:18:24 +00:00
|
|
|
|
|
|
|
// Test caching
|
|
|
|
{
|
|
|
|
// List it again
|
2018-06-15 12:13:54 +00:00
|
|
|
resp2 := httptest.NewRecorder()
|
|
|
|
obj2, err := a.srv.AgentConnectCARoots(resp2, req)
|
2018-05-09 16:15:29 +00:00
|
|
|
require.NoError(err)
|
|
|
|
assert.Equal(obj, obj2)
|
2018-04-11 09:18:24 +00:00
|
|
|
|
|
|
|
// Should cache hit this time and not make request
|
2018-06-15 12:13:54 +00:00
|
|
|
assert.Equal("HIT", resp2.Header().Get("X-Cache"))
|
2018-04-11 09:18:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test that caching is updated in the background
|
|
|
|
{
|
2018-04-30 21:23:49 +00:00
|
|
|
// Set a new CA
|
|
|
|
ca := connect.TestCAConfigSet(t, a, nil)
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-04-22 21:00:32 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
// List it again
|
2018-06-15 12:13:54 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentConnectCARoots(resp, req)
|
2018-04-30 21:23:49 +00:00
|
|
|
r.Check(err)
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-04-22 21:00:32 +00:00
|
|
|
value := obj.(structs.IndexedCARoots)
|
|
|
|
if ca.ID != value.ActiveRootID {
|
|
|
|
r.Fatalf("%s != %s", ca.ID, value.ActiveRootID)
|
|
|
|
}
|
2018-04-30 21:23:49 +00:00
|
|
|
// There are now 3 CAs because we didn't complete rotation on the original
|
|
|
|
// 2
|
|
|
|
if len(value.Roots) != 3 {
|
2018-04-22 21:00:32 +00:00
|
|
|
r.Fatalf("bad len: %d", len(value.Roots))
|
|
|
|
}
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
// Should be a cache hit! The data should've updated in the cache
|
|
|
|
// in the background so this should've been fetched directly from
|
|
|
|
// the cache.
|
|
|
|
if resp.Header().Get("X-Cache") != "HIT" {
|
|
|
|
r.Fatalf("should be a cache hit")
|
|
|
|
}
|
|
|
|
})
|
2018-03-21 17:20:35 +00:00
|
|
|
}
|
|
|
|
}
|
2018-03-21 17:55:39 +00:00
|
|
|
|
2018-05-07 04:46:22 +00:00
|
|
|
func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-08-09 19:19:30 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2018-05-07 04:46:22 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2018-05-07 04:46:22 +00:00
|
|
|
|
|
|
|
// Register a service with a managed proxy
|
|
|
|
{
|
|
|
|
reg := &structs.ServiceDefinition{
|
|
|
|
ID: "test-id",
|
|
|
|
Name: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2019-08-09 19:19:30 +00:00
|
|
|
Connect: &structs.ServiceConnect{},
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
}
|
|
|
|
|
2018-05-19 06:27:02 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
2018-05-07 04:46:22 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.Error(err)
|
|
|
|
require.True(acl.IsErrPermissionDenied(err))
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) {
|
2018-05-07 04:46:22 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-08-09 19:19:30 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2018-05-07 04:46:22 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2018-05-07 04:46:22 +00:00
|
|
|
|
|
|
|
// Register a service with a managed proxy
|
|
|
|
{
|
|
|
|
reg := &structs.ServiceDefinition{
|
|
|
|
ID: "test-id",
|
|
|
|
Name: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2019-08-09 19:19:30 +00:00
|
|
|
Connect: &structs.ServiceConnect{},
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Create an ACL with service:write for our service
|
|
|
|
var token string
|
|
|
|
{
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": "User Token",
|
|
|
|
"Type": "client",
|
|
|
|
"Rules": `service "test" { policy = "write" }`,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLCreate(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
aclResp := obj.(aclCreateResponse)
|
|
|
|
token = aclResp.ID
|
|
|
|
}
|
2018-05-07 04:46:22 +00:00
|
|
|
|
2018-05-19 06:27:02 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil)
|
2018-05-07 04:46:22 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Get the issued cert
|
|
|
|
_, ok := obj.(*structs.IssuedCert)
|
|
|
|
require.True(ok)
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) {
|
2018-05-07 04:46:22 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-08-09 19:19:30 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2018-05-07 04:46:22 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2018-05-07 04:46:22 +00:00
|
|
|
|
|
|
|
// Register a service with a managed proxy
|
|
|
|
{
|
|
|
|
reg := &structs.ServiceDefinition{
|
|
|
|
ID: "test-id",
|
|
|
|
Name: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
2019-08-09 19:19:30 +00:00
|
|
|
Connect: &structs.ServiceConnect{},
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(200, resp.Code, "body: %s", resp.Body.String())
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Create an ACL with service:read for our service
|
|
|
|
var token string
|
2018-05-07 04:46:22 +00:00
|
|
|
{
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": "User Token",
|
|
|
|
"Type": "client",
|
|
|
|
"Rules": `service "test" { policy = "read" }`,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLCreate(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
aclResp := obj.(aclCreateResponse)
|
|
|
|
token = aclResp.ID
|
|
|
|
}
|
|
|
|
|
2018-05-19 06:27:02 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil)
|
2018-05-07 04:46:22 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.Error(err)
|
|
|
|
require.True(acl.IsErrPermissionDenied(err))
|
|
|
|
}
|
|
|
|
|
2018-03-21 17:55:39 +00:00
|
|
|
func TestAgentConnectCALeafCert_good(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-04-30 21:23:49 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-21 17:55:39 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-21 17:55:39 +00:00
|
|
|
|
2018-04-30 21:23:49 +00:00
|
|
|
// CA already setup by default by NewTestAgent but force a new one so we can
|
|
|
|
// verify it was signed easily.
|
|
|
|
ca1 := connect.TestCAConfigSet(t, a, nil)
|
|
|
|
|
2018-03-21 17:55:39 +00:00
|
|
|
{
|
|
|
|
// Register a local service
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
ID: "foo",
|
|
|
|
Name: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentRegisterService(resp, req)
|
2018-04-30 21:23:49 +00:00
|
|
|
require.NoError(err)
|
2018-03-21 17:55:39 +00:00
|
|
|
if !assert.Equal(200, resp.Code) {
|
|
|
|
t.Log("Body: ", resp.Body.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// List
|
2018-05-19 06:27:02 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
2018-06-15 12:13:54 +00:00
|
|
|
require.Equal("MISS", resp.Header().Get("X-Cache"))
|
2018-06-13 19:43:34 +00:00
|
|
|
|
2018-05-19 06:27:02 +00:00
|
|
|
// Get the issued cert
|
|
|
|
issued, ok := obj.(*structs.IssuedCert)
|
|
|
|
assert.True(ok)
|
|
|
|
|
|
|
|
// Verify that the cert is signed by the CA
|
|
|
|
requireLeafValidUnderCA(t, issued, ca1)
|
|
|
|
|
|
|
|
// Verify blocking index
|
|
|
|
assert.True(issued.ModifyIndex > 0)
|
|
|
|
assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
|
|
|
|
resp.Header().Get("X-Consul-Index"))
|
|
|
|
|
|
|
|
// Test caching
|
|
|
|
{
|
|
|
|
// Fetch it again
|
2018-06-15 12:13:54 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
|
2018-05-19 06:27:02 +00:00
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(obj, obj2)
|
|
|
|
|
|
|
|
// Should cache hit this time and not make request
|
2018-06-15 12:13:54 +00:00
|
|
|
require.Equal("HIT", resp.Header().Get("X-Cache"))
|
2018-05-19 06:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test that caching is updated in the background
|
|
|
|
{
|
|
|
|
// Set a new CA
|
|
|
|
ca := connect.TestCAConfigSet(t, a, nil)
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2018-06-15 12:13:54 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2018-05-19 06:27:02 +00:00
|
|
|
// Try and sign again (note no index/wait arg since cache should update in
|
|
|
|
// background even if we aren't actively blocking)
|
2018-06-15 12:13:54 +00:00
|
|
|
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
|
2018-05-19 06:27:02 +00:00
|
|
|
r.Check(err)
|
|
|
|
|
|
|
|
issued2 := obj.(*structs.IssuedCert)
|
|
|
|
if issued.CertPEM == issued2.CertPEM {
|
|
|
|
r.Fatalf("leaf has not updated")
|
|
|
|
}
|
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// Got a new leaf. Sanity check it's a whole new key as well as different
|
2018-05-19 06:27:02 +00:00
|
|
|
// cert.
|
|
|
|
if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
|
|
|
|
r.Fatalf("new leaf has same private key as before")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the cert is signed by the new CA
|
|
|
|
requireLeafValidUnderCA(t, issued2, ca)
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
// Should be a cache hit! The data should've updated in the cache
|
|
|
|
// in the background so this should've been fetched directly from
|
|
|
|
// the cache.
|
|
|
|
if resp.Header().Get("X-Cache") != "HIT" {
|
|
|
|
r.Fatalf("should be a cache hit")
|
|
|
|
}
|
|
|
|
})
|
2019-08-09 19:19:30 +00:00
|
|
|
}
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Test we can request a leaf cert for a service we have permission for
|
|
|
|
// but is not local to this agent.
|
|
|
|
func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {
|
2018-05-07 04:02:44 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
assert := assert.New(t)
|
2018-05-07 04:02:44 +00:00
|
|
|
require := require.New(t)
|
2019-08-09 19:19:30 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-05-07 04:02:44 +00:00
|
|
|
defer a.Shutdown()
|
2019-08-09 19:19:30 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// CA already setup by default by NewTestAgent but force a new one so we can
|
|
|
|
// verify it was signed easily.
|
|
|
|
ca1 := connect.TestCAConfigSet(t, a, nil)
|
2018-05-07 04:02:44 +00:00
|
|
|
|
|
|
|
{
|
2019-08-09 19:19:30 +00:00
|
|
|
// Register a non-local service (central catalog)
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Node: "foo",
|
2018-05-07 04:02:44 +00:00
|
|
|
Address: "127.0.0.1",
|
2019-08-09 19:19:30 +00:00
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8080,
|
2018-05-07 04:02:44 +00:00
|
|
|
},
|
|
|
|
}
|
2019-08-09 19:19:30 +00:00
|
|
|
req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args))
|
2018-05-07 04:02:44 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2019-08-09 19:19:30 +00:00
|
|
|
_, err := a.srv.CatalogRegister(resp, req)
|
2018-05-07 04:02:44 +00:00
|
|
|
require.NoError(err)
|
2019-08-09 19:19:30 +00:00
|
|
|
if !assert.Equal(200, resp.Code) {
|
|
|
|
t.Log("Body: ", resp.Body.String())
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// List
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
2018-05-07 04:02:44 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2019-08-09 19:19:30 +00:00
|
|
|
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal("MISS", resp.Header().Get("X-Cache"))
|
2018-05-07 04:02:44 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Get the issued cert
|
|
|
|
issued, ok := obj.(*structs.IssuedCert)
|
|
|
|
assert.True(ok)
|
2018-06-07 13:11:06 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Verify that the cert is signed by the CA
|
|
|
|
requireLeafValidUnderCA(t, issued, ca1)
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Verify blocking index
|
|
|
|
assert.True(issued.ModifyIndex > 0)
|
|
|
|
assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
|
|
|
|
resp.Header().Get("X-Consul-Index"))
|
2018-05-03 17:44:10 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Test caching
|
|
|
|
{
|
|
|
|
// Fetch it again
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(obj, obj2)
|
|
|
|
|
|
|
|
// Should cache hit this time and not make request
|
|
|
|
require.Equal("HIT", resp.Header().Get("X-Cache"))
|
2018-04-26 13:01:20 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Test Blocking - see https://github.com/hashicorp/consul/issues/4462
|
|
|
|
{
|
|
|
|
// Fetch it again
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
blockingReq, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/connect/ca/leaf/test?wait=125ms&index=%d", issued.ModifyIndex), nil)
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
a.srv.AgentConnectCALeafCert(resp, blockingReq)
|
|
|
|
close(doneCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
require.FailNow("Shouldn't block for this long - not respecting wait parameter in the query")
|
|
|
|
|
|
|
|
case <-doneCh:
|
|
|
|
}
|
2018-04-26 13:01:20 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Test that caching is updated in the background
|
|
|
|
{
|
|
|
|
// Set a new CA
|
|
|
|
ca := connect.TestCAConfigSet(t, a, nil)
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
// Try and sign again (note no index/wait arg since cache should update in
|
|
|
|
// background even if we aren't actively blocking)
|
|
|
|
obj, err := a.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
r.Check(err)
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
issued2 := obj.(*structs.IssuedCert)
|
|
|
|
if issued.CertPEM == issued2.CertPEM {
|
|
|
|
r.Fatalf("leaf has not updated")
|
2018-04-26 13:01:20 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Got a new leaf. Sanity check it's a whole new key as well as different
|
|
|
|
// cert.
|
|
|
|
if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
|
|
|
|
r.Fatalf("new leaf has same private key as before")
|
2018-06-18 19:37:00 +00:00
|
|
|
}
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
// Verify that the cert is signed by the new CA
|
|
|
|
requireLeafValidUnderCA(t, issued2, ca)
|
|
|
|
|
|
|
|
// Should be a cache hit! The data should've updated in the cache
|
|
|
|
// in the background so this should've been fetched directly from
|
|
|
|
// the cache.
|
|
|
|
if resp.Header().Get("X-Cache") != "HIT" {
|
|
|
|
r.Fatalf("should be a cache hit")
|
|
|
|
}
|
2018-04-26 13:01:20 +00:00
|
|
|
})
|
|
|
|
}
|
2018-04-18 20:05:30 +00:00
|
|
|
}
|
|
|
|
|
2019-09-26 16:54:14 +00:00
|
|
|
func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
a1 := NewTestAgent(t, t.Name()+"-dc1", `
|
|
|
|
datacenter = "dc1"
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
`)
|
|
|
|
defer a1.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a1.RPC, "dc1")
|
|
|
|
|
|
|
|
a2 := NewTestAgent(t, t.Name()+"-dc2", `
|
|
|
|
datacenter = "dc2"
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
`)
|
|
|
|
defer a2.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Wait for the WAN join.
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN)
|
|
|
|
_, err := a2.JoinWAN([]string{addr})
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc2")
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(a1.WANMembers()), 2; got < want {
|
|
|
|
r.Fatalf("got %d WAN members want at least %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// CA already setup by default by NewTestAgent but force a new one so we can
|
|
|
|
// verify it was signed easily.
|
|
|
|
dc1_ca1 := connect.TestCAConfigSet(t, a1, nil)
|
|
|
|
|
|
|
|
// Wait until root is updated in both dcs.
|
|
|
|
waitForActiveCARoot(t, a1.srv, dc1_ca1)
|
|
|
|
waitForActiveCARoot(t, a2.srv, dc1_ca1)
|
|
|
|
|
|
|
|
{
|
|
|
|
// Register a local service in the SECONDARY
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
ID: "foo",
|
|
|
|
Name: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8000,
|
|
|
|
Check: structs.CheckType{
|
|
|
|
TTL: 15 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a2.srv.AgentRegisterService(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
if !assert.Equal(200, resp.Code) {
|
|
|
|
t.Log("Body: ", resp.Body.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// List
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal("MISS", resp.Header().Get("X-Cache"))
|
|
|
|
|
|
|
|
// Get the issued cert
|
|
|
|
issued, ok := obj.(*structs.IssuedCert)
|
|
|
|
assert.True(ok)
|
|
|
|
|
|
|
|
// Verify that the cert is signed by the CA
|
|
|
|
requireLeafValidUnderCA(t, issued, dc1_ca1)
|
|
|
|
|
|
|
|
// Verify blocking index
|
|
|
|
assert.True(issued.ModifyIndex > 0)
|
|
|
|
assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
|
|
|
|
resp.Header().Get("X-Consul-Index"))
|
|
|
|
|
|
|
|
// Test caching
|
|
|
|
{
|
|
|
|
// Fetch it again
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj2, err := a2.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(obj, obj2)
|
|
|
|
|
|
|
|
// Should cache hit this time and not make request
|
|
|
|
require.Equal("HIT", resp.Header().Get("X-Cache"))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that we aren't churning leaves for no reason at idle.
|
|
|
|
{
|
|
|
|
ch := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
ch <- err
|
|
|
|
} else {
|
|
|
|
issued2 := obj.(*structs.IssuedCert)
|
|
|
|
if issued.CertPEM == issued2.CertPEM {
|
|
|
|
ch <- fmt.Errorf("leaf woke up unexpectedly with same cert")
|
|
|
|
} else {
|
|
|
|
ch <- fmt.Errorf("leaf woke up unexpectedly with new cert")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
// Before applying the fix from PR-6513 this would reliably wake up
|
|
|
|
// after ~20ms with a new cert. Since this test is necessarily a bit
|
|
|
|
// timing dependent we'll chill out for 5 seconds which should be enough
|
|
|
|
// time to disprove the original bug.
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
case err := <-ch:
|
|
|
|
dur := time.Since(start)
|
|
|
|
t.Fatalf("unexpected return from blocking query; leaf churned during idle period, took %s: %v", dur, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a new CA
|
|
|
|
dc1_ca2 := connect.TestCAConfigSet(t, a2, nil)
|
|
|
|
|
|
|
|
// Wait until root is updated in both dcs.
|
|
|
|
waitForActiveCARoot(t, a1.srv, dc1_ca2)
|
|
|
|
waitForActiveCARoot(t, a2.srv, dc1_ca2)
|
|
|
|
|
|
|
|
// Test that caching is updated in the background
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
// Try and sign again (note no index/wait arg since cache should update in
|
|
|
|
// background even if we aren't actively blocking)
|
|
|
|
obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
|
|
|
|
r.Check(err)
|
|
|
|
|
|
|
|
issued2 := obj.(*structs.IssuedCert)
|
|
|
|
if issued.CertPEM == issued2.CertPEM {
|
|
|
|
r.Fatalf("leaf has not updated")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Got a new leaf. Sanity check it's a whole new key as well as different
|
|
|
|
// cert.
|
|
|
|
if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
|
|
|
|
r.Fatalf("new leaf has same private key as before")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the cert is signed by the new CA
|
|
|
|
requireLeafValidUnderCA(t, issued2, dc1_ca2)
|
|
|
|
|
|
|
|
// Should be a cache hit! The data should've updated in the cache
|
|
|
|
// in the background so this should've been fetched directly from
|
|
|
|
// the cache.
|
|
|
|
if resp.Header().Get("X-Cache") != "HIT" {
|
|
|
|
r.Fatalf("should be a cache hit")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForActiveCARoot(t *testing.T, srv *HTTPServer, expect *structs.CARoot) {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := srv.AgentConnectCARoots(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
roots, ok := obj.(structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
r.Fatalf("response is wrong type %T", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
var root *structs.CARoot
|
|
|
|
for _, r := range roots.Roots {
|
|
|
|
if r.ID == roots.ActiveRootID {
|
|
|
|
root = r
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if root == nil {
|
|
|
|
r.Fatal("no active root")
|
|
|
|
}
|
|
|
|
if root.ID != expect.ID {
|
|
|
|
r.Fatalf("current active root is %s; waiting for %s", root.ID, expect.ID)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, ca *structs.CARoot) {
|
|
|
|
leaf, intermediates, err := connect.ParseLeafCerts(issued.CertPEM)
|
|
|
|
require.NoError(t, err)
|
2019-08-09 19:19:30 +00:00
|
|
|
|
|
|
|
roots := x509.NewCertPool()
|
|
|
|
require.True(t, roots.AppendCertsFromPEM([]byte(ca.RootCert)))
|
2019-09-26 16:54:14 +00:00
|
|
|
|
2019-08-09 19:19:30 +00:00
|
|
|
_, err = leaf.Verify(x509.VerifyOptions{
|
2019-09-26 16:54:14 +00:00
|
|
|
Roots: roots,
|
|
|
|
Intermediates: intermediates,
|
2019-08-09 19:19:30 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Verify the private key matches. tls.LoadX509Keypair does this for us!
|
|
|
|
_, err = tls.X509KeyPair([]byte(issued.CertPEM), []byte(issued.PrivateKeyPEM))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeTelemetryDefaults(targetID string) lib.TelemetryConfig {
|
|
|
|
return lib.TelemetryConfig{
|
|
|
|
FilterDefault: true,
|
|
|
|
MetricsPrefix: "consul.proxy." + targetID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-26 00:52:26 +00:00
|
|
|
func TestAgentConnectAuthorize_badBody(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
args := []string{}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
2018-10-03 19:37:53 +00:00
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
require.Error(err)
|
|
|
|
assert.Nil(respRaw)
|
|
|
|
// Note that BadRequestError is handled outside the endpoint handler so we
|
|
|
|
// still see a 200 if we check here.
|
|
|
|
assert.Contains(err.Error(), "decode failed")
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgentConnectAuthorize_noTarget(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
args := &structs.ConnectAuthorizeRequest{}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
2018-10-03 19:37:53 +00:00
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
require.Error(err)
|
|
|
|
assert.Nil(respRaw)
|
|
|
|
// Note that BadRequestError is handled outside the endpoint handler so we
|
|
|
|
// still see a 200 if we check here.
|
|
|
|
assert.Contains(err.Error(), "Target service must be specified")
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Client ID is not in the valid URI format
|
|
|
|
func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-03-28 21:29:35 +00:00
|
|
|
Target: "web",
|
|
|
|
ClientCertURI: "tubes",
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-10-03 19:37:53 +00:00
|
|
|
require.Error(err)
|
|
|
|
assert.Nil(respRaw)
|
|
|
|
// Note that BadRequestError is handled outside the endpoint handler so we
|
|
|
|
// still see a 200 if we check here.
|
|
|
|
assert.Contains(err.Error(), "ClientCertURI not a valid Connect identifier")
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Client ID is a valid URI but its not a service URI
|
|
|
|
func TestAgentConnectAuthorize_idNotService(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-03-28 21:29:35 +00:00
|
|
|
Target: "web",
|
|
|
|
ClientCertURI: "spiffe://1234.consul",
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-10-03 19:37:53 +00:00
|
|
|
require.Error(err)
|
|
|
|
assert.Nil(respRaw)
|
|
|
|
// Note that BadRequestError is handled outside the endpoint handler so we
|
|
|
|
// still see a 200 if we check here.
|
|
|
|
assert.Contains(err.Error(), "ClientCertURI not a valid Service identifier")
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test when there is an intention allowing the connection
|
|
|
|
func TestAgentConnectAuthorize_allow(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2018-04-17 23:26:58 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
target := "db"
|
|
|
|
|
|
|
|
// Create some intentions
|
2018-04-17 23:26:58 +00:00
|
|
|
var ixnId string
|
2018-03-26 00:52:26 +00:00
|
|
|
{
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "web"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionAllow
|
|
|
|
|
2018-04-17 23:26:58 +00:00
|
|
|
require.Nil(a.RPC("Intention.Apply", &req, &ixnId))
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-05-10 16:04:33 +00:00
|
|
|
Target: target,
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-04-17 23:26:58 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.Equal(200, resp.Code)
|
2018-06-15 12:13:54 +00:00
|
|
|
require.Equal("MISS", resp.Header().Get("X-Cache"))
|
2018-03-26 00:52:26 +00:00
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
2018-04-17 23:26:58 +00:00
|
|
|
require.True(obj.Authorized)
|
|
|
|
require.Contains(obj.Reason, "Matched")
|
|
|
|
|
|
|
|
// Make the request again
|
|
|
|
{
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
require.True(obj.Authorized)
|
|
|
|
require.Contains(obj.Reason, "Matched")
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
// That should've been a cache hit.
|
|
|
|
require.Equal("HIT", resp.Header().Get("X-Cache"))
|
|
|
|
}
|
2018-04-17 23:26:58 +00:00
|
|
|
|
|
|
|
// Change the intention
|
|
|
|
{
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpUpdate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.ID = ixnId
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "web"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionDeny
|
|
|
|
|
|
|
|
require.Nil(a.RPC("Intention.Apply", &req, &ixnId))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Short sleep lets the cache background refresh happen
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
|
|
|
|
// Make the request again
|
|
|
|
{
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
require.False(obj.Authorized)
|
|
|
|
require.Contains(obj.Reason, "Matched")
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
// That should've been a cache hit, too, since it updated in the
|
|
|
|
// background.
|
|
|
|
require.Equal("HIT", resp.Header().Get("X-Cache"))
|
|
|
|
}
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test when there is an intention denying the connection
|
|
|
|
func TestAgentConnectAuthorize_deny(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-03-26 00:52:26 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-26 00:52:26 +00:00
|
|
|
target := "db"
|
|
|
|
|
|
|
|
// Create some intentions
|
|
|
|
{
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "web"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionDeny
|
|
|
|
|
|
|
|
var reply string
|
|
|
|
assert.Nil(a.RPC("Intention.Apply", &req, &reply))
|
|
|
|
}
|
|
|
|
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-05-10 16:04:33 +00:00
|
|
|
Target: target,
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-03-26 00:52:26 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
assert.False(obj.Authorized)
|
|
|
|
assert.Contains(obj.Reason, "Matched")
|
|
|
|
}
|
2018-03-26 01:50:05 +00:00
|
|
|
|
2018-11-12 20:20:12 +00:00
|
|
|
// Test when there is an intention allowing service with a different trust
|
|
|
|
// domain. We allow this because migration between trust domains shouldn't cause
|
|
|
|
// an outage even if we have stale info about current trusted domains. It's safe
|
|
|
|
// because the CA root is either unique to this cluster and not used to sign
|
|
|
|
// anything external, or path validation can be used to ensure that the CA can
|
|
|
|
// only issue certs that are valid for the specific cluster trust domain at x509
|
|
|
|
// level which is enforced by TLS handshake.
|
|
|
|
func TestAgentConnectAuthorize_allowTrustDomain(t *testing.T) {
|
2018-05-09 19:30:43 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-05-09 19:30:43 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-05-09 19:30:43 +00:00
|
|
|
target := "db"
|
|
|
|
|
|
|
|
// Create some intentions
|
|
|
|
{
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "web"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionAllow
|
|
|
|
|
|
|
|
var reply string
|
2018-10-03 19:37:53 +00:00
|
|
|
require.NoError(a.RPC("Intention.Apply", &req, &reply))
|
2018-05-09 19:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-05-10 16:04:33 +00:00
|
|
|
Target: target,
|
|
|
|
ClientCertURI: "spiffe://fake-domain.consul/ns/default/dc/dc1/svc/web",
|
2018-05-09 19:30:43 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-10-03 19:37:53 +00:00
|
|
|
require.NoError(err)
|
2018-05-09 19:30:43 +00:00
|
|
|
assert.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
2018-11-12 20:20:12 +00:00
|
|
|
require.True(obj.Authorized)
|
|
|
|
require.Contains(obj.Reason, "Matched")
|
2018-05-09 19:30:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 11:53:42 +00:00
|
|
|
func TestAgentConnectAuthorize_denyWildcard(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-10-03 19:37:53 +00:00
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-04-05 11:53:42 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-04-05 11:53:42 +00:00
|
|
|
|
|
|
|
target := "db"
|
|
|
|
|
|
|
|
// Create some intentions
|
|
|
|
{
|
|
|
|
// Deny wildcard to DB
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "*"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionDeny
|
|
|
|
|
|
|
|
var reply string
|
2018-10-03 19:37:53 +00:00
|
|
|
require.NoError(a.RPC("Intention.Apply", &req, &reply))
|
2018-04-05 11:53:42 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Allow web to DB
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.SourceName = "web"
|
|
|
|
req.Intention.DestinationNS = structs.IntentionDefaultNamespace
|
|
|
|
req.Intention.DestinationName = target
|
|
|
|
req.Intention.Action = structs.IntentionActionAllow
|
|
|
|
|
|
|
|
var reply string
|
|
|
|
assert.Nil(a.RPC("Intention.Apply", &req, &reply))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Web should be allowed
|
|
|
|
{
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-05-10 16:04:33 +00:00
|
|
|
Target: target,
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-04-05 11:53:42 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-10-03 19:37:53 +00:00
|
|
|
require.NoError(err)
|
2018-04-05 11:53:42 +00:00
|
|
|
assert.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
assert.True(obj.Authorized)
|
|
|
|
assert.Contains(obj.Reason, "Matched")
|
|
|
|
}
|
|
|
|
|
|
|
|
// API should be denied
|
|
|
|
{
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-05-10 16:04:33 +00:00
|
|
|
Target: target,
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(),
|
2018-04-05 11:53:42 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
2018-10-03 19:37:53 +00:00
|
|
|
require.NoError(err)
|
2018-04-05 11:53:42 +00:00
|
|
|
assert.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
assert.False(obj.Authorized)
|
|
|
|
assert.Contains(obj.Reason, "Matched")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-26 01:50:05 +00:00
|
|
|
// Test that authorize fails without service:write for the target service.
|
|
|
|
func TestAgentConnectAuthorize_serviceWrite(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2018-03-26 01:50:05 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2018-03-26 01:50:05 +00:00
|
|
|
|
|
|
|
// Create an ACL
|
|
|
|
var token string
|
|
|
|
{
|
|
|
|
args := map[string]interface{}{
|
|
|
|
"Name": "User Token",
|
|
|
|
"Type": "client",
|
|
|
|
"Rules": `service "foo" { policy = "read" }`,
|
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.ACLCreate(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
aclResp := obj.(aclCreateResponse)
|
|
|
|
token = aclResp.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-03-28 21:29:35 +00:00
|
|
|
Target: "foo",
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-03-26 01:50:05 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST",
|
|
|
|
"/v1/agent/connect/authorize?token="+token, jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
_, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
assert.True(acl.IsErrPermissionDenied(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test when no intentions match w/ a default deny policy
|
|
|
|
func TestAgentConnectAuthorize_defaultDeny(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
2018-03-26 01:50:05 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2018-03-26 01:50:05 +00:00
|
|
|
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-03-28 21:29:35 +00:00
|
|
|
Target: "foo",
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-03-26 01:50:05 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Equal(200, resp.Code)
|
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
assert.False(obj.Authorized)
|
|
|
|
assert.Contains(obj.Reason, "Default behavior")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test when no intentions match w/ a default allow policy
|
|
|
|
func TestAgentConnectAuthorize_defaultAllow(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2018-08-06 23:46:09 +00:00
|
|
|
dc1 := "dc1"
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-08-06 23:46:09 +00:00
|
|
|
acl_datacenter = "`+dc1+`"
|
2018-03-26 01:50:05 +00:00
|
|
|
acl_default_policy = "allow"
|
|
|
|
acl_master_token = "root"
|
|
|
|
acl_agent_token = "root"
|
|
|
|
acl_agent_master_token = "towel"
|
|
|
|
acl_enforce_version_8 = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, dc1)
|
2018-03-26 01:50:05 +00:00
|
|
|
|
|
|
|
args := &structs.ConnectAuthorizeRequest{
|
2018-03-28 21:29:35 +00:00
|
|
|
Target: "foo",
|
|
|
|
ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
|
2018-03-26 01:50:05 +00:00
|
|
|
}
|
|
|
|
req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args))
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Equal(200, resp.Code)
|
2018-08-06 23:46:09 +00:00
|
|
|
assert.NotNil(respRaw)
|
2018-03-26 01:50:05 +00:00
|
|
|
|
|
|
|
obj := respRaw.(*connectAuthorizeResp)
|
|
|
|
assert.True(obj.Authorized)
|
|
|
|
assert.Contains(obj.Reason, "Default behavior")
|
|
|
|
}
|
2018-06-13 08:34:20 +00:00
|
|
|
|
2018-10-17 20:20:35 +00:00
|
|
|
func TestAgent_Host(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
dc1 := "dc1"
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-17 20:20:35 +00:00
|
|
|
acl_datacenter = "`+dc1+`"
|
|
|
|
acl_default_policy = "allow"
|
|
|
|
acl_master_token = "master"
|
|
|
|
acl_agent_token = "agent"
|
|
|
|
acl_agent_master_token = "towel"
|
|
|
|
acl_enforce_version_8 = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentHost(resp, req)
|
|
|
|
assert.Nil(err)
|
|
|
|
assert.Equal(http.StatusOK, resp.Code)
|
|
|
|
assert.NotNil(respRaw)
|
|
|
|
|
|
|
|
obj := respRaw.(*debug.HostInfo)
|
|
|
|
assert.NotNil(obj.CollectionTime)
|
|
|
|
assert.Empty(obj.Errors)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_HostBadACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
dc1 := "dc1"
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-17 20:20:35 +00:00
|
|
|
acl_datacenter = "`+dc1+`"
|
|
|
|
acl_default_policy = "deny"
|
|
|
|
acl_master_token = "root"
|
|
|
|
acl_agent_token = "agent"
|
|
|
|
acl_agent_master_token = "towel"
|
|
|
|
acl_enforce_version_8 = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
respRaw, err := a.srv.AgentHost(resp, req)
|
|
|
|
assert.EqualError(err, "ACL not found")
|
|
|
|
assert.Equal(http.StatusOK, resp.Code)
|
|
|
|
assert.Nil(respRaw)
|
|
|
|
}
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
// Thie tests that a proxy with an ExposeConfig is returned as expected.
|
|
|
|
func TestAgent_Services_ExposeConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "proxy-id",
|
|
|
|
Service: "proxy-name",
|
|
|
|
Port: 8443,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
Expose: structs.ExposeConfig{
|
|
|
|
Checks: true,
|
|
|
|
Paths: []structs.ExposePath{
|
|
|
|
{
|
|
|
|
ListenerPort: 8080,
|
|
|
|
LocalPathPort: 21500,
|
|
|
|
Protocol: "http2",
|
|
|
|
Path: "/metrics",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
|
|
|
obj, err := a.srv.AgentServices(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
val := obj.(map[string]*api.AgentService)
|
|
|
|
require.Len(t, val, 1)
|
|
|
|
actual := val["proxy-id"]
|
|
|
|
require.NotNil(t, actual)
|
|
|
|
require.Equal(t, api.ServiceKindConnectProxy, actual.Kind)
|
|
|
|
require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
|
|
|
|
}
|