2013-12-23 21:52:10 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2014-11-24 08:36:03 +00:00
|
|
|
"bytes"
|
2019-05-24 18:36:56 +00:00
|
|
|
"context"
|
2019-03-13 09:29:06 +00:00
|
|
|
"crypto/tls"
|
2014-11-24 08:36:03 +00:00
|
|
|
"encoding/json"
|
2013-12-23 21:52:10 +00:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2015-06-05 11:44:42 +00:00
|
|
|
"net"
|
2017-11-08 02:22:09 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2013-12-23 21:52:10 +00:00
|
|
|
"os"
|
2014-09-18 05:31:32 +00:00
|
|
|
"path/filepath"
|
2014-11-25 03:24:32 +00:00
|
|
|
"reflect"
|
2019-05-24 18:36:56 +00:00
|
|
|
"strconv"
|
2017-01-24 02:11:13 +00:00
|
|
|
"strings"
|
2013-12-23 21:52:10 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-11-19 19:51:25 +00:00
|
|
|
|
2018-08-09 16:40:07 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/checks"
|
2018-07-17 20:16:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2019-05-24 18:36:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2017-01-18 06:20:11 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2017-09-25 18:40:42 +00:00
|
|
|
uuid "github.com/hashicorp/go-uuid"
|
2017-05-15 19:49:13 +00:00
|
|
|
"github.com/pascaldekloe/goe/verify"
|
2018-05-14 20:55:24 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-05-02 18:51:47 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2013-12-23 21:52:10 +00:00
|
|
|
)
|
|
|
|
|
2016-11-03 19:58:58 +00:00
|
|
|
func externalIP() (string, error) {
|
|
|
|
addrs, err := net.InterfaceAddrs()
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("Unable to lookup network interfaces: %v", err)
|
|
|
|
}
|
|
|
|
for _, a := range addrs {
|
|
|
|
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
|
|
|
if ipnet.IP.To4() != nil {
|
|
|
|
return ipnet.IP.String(), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return "", fmt.Errorf("Unable to find a non-loopback interface")
|
|
|
|
}
|
|
|
|
|
2017-05-23 14:04:53 +00:00
|
|
|
func TestAgent_MultiStartStop(t *testing.T) {
|
2017-09-25 18:40:42 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2017-05-23 14:04:53 +00:00
|
|
|
t.Run("", func(t *testing.T) {
|
2017-05-31 08:56:19 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-23 14:04:53 +00:00
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
a.Shutdown()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
func TestAgent_ConnectClusterIDConfig(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
hcl string
|
|
|
|
wantClusterID string
|
2018-05-22 14:11:13 +00:00
|
|
|
wantPanic bool
|
2018-05-10 16:04:33 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "default TestAgent has fixed cluster id",
|
|
|
|
hcl: "",
|
|
|
|
wantClusterID: connect.TestClusterID,
|
|
|
|
},
|
|
|
|
{
|
2018-06-13 08:34:20 +00:00
|
|
|
name: "no cluster ID specified sets to test ID",
|
2018-05-10 16:04:33 +00:00
|
|
|
hcl: "connect { enabled = true }",
|
2018-06-13 08:34:20 +00:00
|
|
|
wantClusterID: connect.TestClusterID,
|
2018-05-10 16:04:33 +00:00
|
|
|
},
|
|
|
|
{
|
2018-05-22 14:11:13 +00:00
|
|
|
name: "non-UUID cluster_id is fatal",
|
|
|
|
hcl: `connect {
|
|
|
|
enabled = true
|
|
|
|
ca_config {
|
|
|
|
cluster_id = "fake-id"
|
|
|
|
}
|
|
|
|
}`,
|
2018-05-10 16:04:33 +00:00
|
|
|
wantClusterID: "",
|
2018-05-22 14:11:13 +00:00
|
|
|
wantPanic: true,
|
2018-05-10 16:04:33 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2018-05-22 14:11:13 +00:00
|
|
|
// Indirection to support panic recovery cleanly
|
|
|
|
testFn := func() {
|
|
|
|
a := &TestAgent{Name: "test", HCL: tt.hcl}
|
|
|
|
a.ExpectConfigError = tt.wantPanic
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2018-05-22 14:11:13 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
cfg := a.consulConfig()
|
|
|
|
assert.Equal(t, tt.wantClusterID, cfg.CAConfig.ClusterID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if tt.wantPanic {
|
|
|
|
require.Panics(t, testFn)
|
|
|
|
} else {
|
|
|
|
testFn()
|
|
|
|
}
|
2018-05-10 16:04:33 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
func TestAgent_StartStop(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-05-22 14:11:13 +00:00
|
|
|
defer a.Shutdown()
|
2013-12-23 21:52:10 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.Leave(); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.Shutdown(); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-05-21 07:11:09 +00:00
|
|
|
case <-a.ShutdownCh():
|
2013-12-23 21:52:10 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("should be closed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-16 01:24:15 +00:00
|
|
|
func TestAgent_RPCPing(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2019-02-22 02:47:19 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2013-12-23 21:52:10 +00:00
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2017-07-26 18:03:43 +00:00
|
|
|
func TestAgent_TokenStore(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_token = "user"
|
|
|
|
acl_agent_token = "agent"
|
|
|
|
acl_agent_master_token = "master"`,
|
|
|
|
)
|
2017-07-26 18:03:43 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
if got, want := a.tokens.UserToken(), "user"; got != want {
|
|
|
|
t.Fatalf("got %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := a.tokens.AgentToken(), "agent"; got != want {
|
|
|
|
t.Fatalf("got %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := a.tokens.IsAgentMasterToken("master"), true; got != want {
|
|
|
|
t.Fatalf("got %v want %v", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-11 05:46:07 +00:00
|
|
|
func TestAgent_ReconnectConfigSettings(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2016-04-11 05:46:07 +00:00
|
|
|
func() {
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-04-11 05:46:07 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
lan := a.consulConfig().SerfLANConfig.ReconnectTimeout
|
2016-04-11 05:46:07 +00:00
|
|
|
if lan != 3*24*time.Hour {
|
|
|
|
t.Fatalf("bad: %s", lan.String())
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
wan := a.consulConfig().SerfWANConfig.ReconnectTimeout
|
2016-04-11 05:46:07 +00:00
|
|
|
if wan != 3*24*time.Hour {
|
|
|
|
t.Fatalf("bad: %s", wan.String())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
func() {
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
reconnect_timeout = "24h"
|
|
|
|
reconnect_timeout_wan = "36h"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-04-11 05:46:07 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
lan := a.consulConfig().SerfLANConfig.ReconnectTimeout
|
2016-04-11 06:31:16 +00:00
|
|
|
if lan != 24*time.Hour {
|
2016-04-11 05:46:07 +00:00
|
|
|
t.Fatalf("bad: %s", lan.String())
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
wan := a.consulConfig().SerfWANConfig.ReconnectTimeout
|
2016-04-11 06:31:16 +00:00
|
|
|
if wan != 36*time.Hour {
|
2016-04-11 05:46:07 +00:00
|
|
|
t.Fatalf("bad: %s", wan.String())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-08-17 18:44:25 +00:00
|
|
|
func TestAgent_ReconnectConfigWanDisabled(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-08-17 18:44:25 +00:00
|
|
|
ports { serf_wan = -1 }
|
|
|
|
reconnect_timeout_wan = "36h"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// This is also testing that we dont panic like before #4515
|
|
|
|
require.Nil(t, a.consulConfig().SerfWANConfig)
|
|
|
|
}
|
|
|
|
|
2017-04-13 05:05:38 +00:00
|
|
|
func TestAgent_setupNodeID(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = ""
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-01-18 06:20:11 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
cfg := a.config
|
|
|
|
|
2017-01-18 06:20:11 +00:00
|
|
|
// The auto-assigned ID should be valid.
|
2017-05-21 07:11:09 +00:00
|
|
|
id := a.consulConfig().NodeID
|
2017-01-18 06:20:11 +00:00
|
|
|
if _, err := uuid.ParseUUID(string(id)); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-01-18 17:52:34 +00:00
|
|
|
// Running again should get the same ID (persisted in the file).
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg.NodeID = ""
|
|
|
|
if err := a.setupNodeID(cfg); err != nil {
|
2017-01-18 17:52:34 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if newID := a.consulConfig().NodeID; id != newID {
|
2017-01-18 17:52:34 +00:00
|
|
|
t.Fatalf("bad: %q vs %q", id, newID)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
// Set an invalid ID via.Config.
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg.NodeID = types.NodeID("nope")
|
|
|
|
err := a.setupNodeID(cfg)
|
2017-01-18 06:20:11 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "uuid string is wrong length") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
// Set a valid ID via.Config.
|
2017-01-18 06:20:11 +00:00
|
|
|
newID, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg.NodeID = types.NodeID(strings.ToUpper(newID))
|
|
|
|
if err := a.setupNodeID(cfg); err != nil {
|
2017-01-18 06:20:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if id := a.consulConfig().NodeID; string(id) != newID {
|
2017-01-18 06:20:11 +00:00
|
|
|
t.Fatalf("bad: %q vs. %q", id, newID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set an invalid ID via the file.
|
2017-05-22 11:03:59 +00:00
|
|
|
fileID := filepath.Join(cfg.DataDir, "node-id")
|
2017-01-18 06:20:11 +00:00
|
|
|
if err := ioutil.WriteFile(fileID, []byte("adf4238a!882b!9ddc!4a9d!5b6758e4159e"), 0600); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg.NodeID = ""
|
|
|
|
err = a.setupNodeID(cfg)
|
2017-01-18 06:20:11 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "uuid is improperly formatted") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a valid ID via the file.
|
2017-03-14 02:51:56 +00:00
|
|
|
if err := ioutil.WriteFile(fileID, []byte("ADF4238a-882b-9ddc-4a9d-5b6758e4159e"), 0600); err != nil {
|
2017-01-18 06:20:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg.NodeID = ""
|
|
|
|
if err := a.setupNodeID(cfg); err != nil {
|
2017-01-18 06:20:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if id := a.consulConfig().NodeID; string(id) != "adf4238a-882b-9ddc-4a9d-5b6758e4159e" {
|
2017-01-18 06:20:11 +00:00
|
|
|
t.Fatalf("bad: %q vs. %q", id, newID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-13 05:05:38 +00:00
|
|
|
func TestAgent_makeNodeID(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = ""
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-04-13 05:05:38 +00:00
|
|
|
|
|
|
|
// We should get a valid host-based ID initially.
|
2017-05-21 07:11:09 +00:00
|
|
|
id, err := a.makeNodeID()
|
2017-04-13 05:05:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := uuid.ParseUUID(string(id)); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-06-24 16:36:53 +00:00
|
|
|
// Calling again should yield a random ID by default.
|
2017-05-21 07:11:09 +00:00
|
|
|
another, err := a.makeNodeID()
|
2017-04-13 05:05:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-06-24 16:36:53 +00:00
|
|
|
if id == another {
|
2017-04-13 05:05:38 +00:00
|
|
|
t.Fatalf("bad: %s vs %s", id, another)
|
|
|
|
}
|
|
|
|
|
2017-06-24 16:36:53 +00:00
|
|
|
// Turn on host-based IDs and try again. We should get the same ID
|
|
|
|
// each time (and a different one from the random one above).
|
2017-09-25 18:40:42 +00:00
|
|
|
a.Config.DisableHostNodeID = false
|
2017-06-24 16:36:53 +00:00
|
|
|
id, err = a.makeNodeID()
|
2017-04-13 05:05:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if id == another {
|
|
|
|
t.Fatalf("bad: %s vs %s", id, another)
|
|
|
|
}
|
2017-06-24 16:36:53 +00:00
|
|
|
|
|
|
|
// Calling again should yield the host-based ID.
|
|
|
|
another, err = a.makeNodeID()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if id != another {
|
|
|
|
t.Fatalf("bad: %s vs %s", id, another)
|
|
|
|
}
|
2017-04-13 05:05:38 +00:00
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_AddService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_name = "node1"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
srv *structs.NodeService
|
2019-01-08 10:13:49 +00:00
|
|
|
wantSrv func(ns *structs.NodeService)
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes []*structs.CheckType
|
2017-05-15 19:49:13 +00:00
|
|
|
healthChks map[string]*structs.HealthCheck
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"one check",
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: nil, // nil weights...
|
2017-05-15 19:49:13 +00:00
|
|
|
Port: 8100,
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2019-01-08 10:13:49 +00:00
|
|
|
// ... should be populated to avoid "IsSame" returning true during AE.
|
|
|
|
func(ns *structs.NodeService) {
|
|
|
|
ns.Weights = &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
[]*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
TTL: time.Minute,
|
|
|
|
Notes: "note1",
|
|
|
|
},
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2017-05-15 19:49:13 +00:00
|
|
|
map[string]*structs.HealthCheck{
|
|
|
|
"check1": &structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
Status: "critical",
|
|
|
|
Notes: "note1",
|
|
|
|
ServiceID: "svcid1",
|
|
|
|
ServiceName: "svcname1",
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: []string{"tag1"},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"multiple checks",
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "svcid2",
|
|
|
|
Service: "svcname2",
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 2,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 8200,
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2019-01-08 10:13:49 +00:00
|
|
|
nil, // No change expected
|
2017-06-15 16:46:06 +00:00
|
|
|
[]*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
TTL: time.Minute,
|
|
|
|
Notes: "note1",
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
&structs.CheckType{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check-noname",
|
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
&structs.CheckType{
|
2017-05-15 19:49:13 +00:00
|
|
|
Name: "check-noid",
|
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
&structs.CheckType{
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
map[string]*structs.HealthCheck{
|
|
|
|
"check1": &structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
Status: "critical",
|
|
|
|
Notes: "note1",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: []string{"tag2"},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
"check-noname": &structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check-noname",
|
|
|
|
Name: "Service 'svcname2' check",
|
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: []string{"tag2"},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
"service:svcid2:3": &structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "service:svcid2:3",
|
|
|
|
Name: "check-noid",
|
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: []string{"tag2"},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
"service:svcid2:4": &structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "service:svcid2:4",
|
|
|
|
Name: "Service 'svcname2' check",
|
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: []string{"tag2"},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
// check the service registration
|
|
|
|
t.Run(tt.srv.ID, func(t *testing.T) {
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddService(tt.srv, tt.chkTypes, false, "", ConfigSourceLocal)
|
2017-05-15 19:49:13 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-01-08 10:13:49 +00:00
|
|
|
got := a.State.Services()[tt.srv.ID]
|
|
|
|
// Make a copy since the tt.srv points to the one in memory in the local
|
|
|
|
// state still so changing it is a tautology!
|
|
|
|
want := *tt.srv
|
|
|
|
if tt.wantSrv != nil {
|
|
|
|
tt.wantSrv(&want)
|
|
|
|
}
|
|
|
|
require.Equal(t, &want, got)
|
|
|
|
require.True(t, got.IsSame(&want))
|
2017-05-15 19:49:13 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// check the health checks
|
|
|
|
for k, v := range tt.healthChks {
|
|
|
|
t.Run(k, func(t *testing.T) {
|
2019-01-08 10:13:49 +00:00
|
|
|
got := a.State.Checks()[types.CheckID(k)]
|
|
|
|
require.Equal(t, v, got)
|
2017-05-15 19:49:13 +00:00
|
|
|
})
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
// check the ttl checks
|
|
|
|
for k := range tt.healthChks {
|
|
|
|
t.Run(k+" ttl", func(t *testing.T) {
|
2017-05-21 07:11:09 +00:00
|
|
|
chk := a.checkTTLs[types.CheckID(k)]
|
2017-05-15 19:49:13 +00:00
|
|
|
if chk == nil {
|
|
|
|
t.Fatal("got nil want TTL check")
|
|
|
|
}
|
|
|
|
if got, want := string(chk.CheckID), k; got != want {
|
|
|
|
t.Fatalf("got CheckID %v want %v", got, want)
|
|
|
|
}
|
|
|
|
if got, want := chk.TTL, time.Minute; got != want {
|
|
|
|
t.Fatalf("got TTL %v want %v", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2014-11-07 02:24:04 +00:00
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
func TestAgent_AddServices_AliasUpdateCheckNotReverted(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, t.Name(), `
|
|
|
|
node_name = "node1"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// It's tricky to get an UpdateCheck call to be timed properly so it lands
|
|
|
|
// right in the middle of an addServiceInternal call so we cheat a bit and
|
|
|
|
// rely upon alias checks to do that work for us. We add enough services
|
|
|
|
// that probabilistically one of them is going to end up properly in the
|
|
|
|
// critical section.
|
|
|
|
//
|
|
|
|
// The first number I picked here (10) surprisingly failed every time prior
|
|
|
|
// to PR #6144 solving the underlying problem.
|
|
|
|
const numServices = 10
|
|
|
|
|
|
|
|
services := make([]*structs.ServiceDefinition, numServices)
|
|
|
|
checkIDs := make([]types.CheckID, numServices)
|
|
|
|
for i := 0; i < numServices; i++ {
|
|
|
|
name := fmt.Sprintf("web-%d", i)
|
|
|
|
|
|
|
|
services[i] = &structs.ServiceDefinition{
|
|
|
|
ID: name,
|
|
|
|
Name: name,
|
|
|
|
Port: 8080 + i,
|
|
|
|
Checks: []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
|
|
|
Name: "alias-for-fake-service",
|
|
|
|
AliasService: "fake",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
checkIDs[i] = types.CheckID("service:" + name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add all of the services quickly as you might do from config file snippets.
|
|
|
|
for _, service := range services {
|
|
|
|
ns := service.NodeService()
|
|
|
|
|
|
|
|
chkTypes, err := service.CheckTypes()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, a.AddService(ns, chkTypes, false, service.Token, ConfigSourceLocal))
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
gotChecks := a.State.Checks()
|
|
|
|
for id, check := range gotChecks {
|
|
|
|
require.Equal(r, "passing", check.Status, "check %q is wrong", id)
|
|
|
|
require.Equal(r, "No checks found.", check.Output, "check %q is wrong", id)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
func TestAgent_AddServiceNoExec(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-11 12:22:11 +00:00
|
|
|
node_name = "node1"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8100,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := a.AddService(srv, []*structs.CheckType{chk}, false, "", ConfigSourceLocal)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = a.AddService(srv, []*structs.CheckType{chk}, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddServiceNoRemoteExec(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-11 12:22:11 +00:00
|
|
|
node_name = "node1"
|
|
|
|
enable_local_script_checks = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8100,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := a.AddService(srv, []*structs.CheckType{chk}, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_RemoveService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
|
|
|
// Remove a service that doesn't exist
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService("redis", false); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:10:56 +00:00
|
|
|
// Remove without an ID
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService("", false); err == nil {
|
2015-01-27 09:10:56 +00:00
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// Removing a service with a single check works
|
|
|
|
{
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "memcache",
|
|
|
|
Service: "memcache",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{&structs.CheckType{TTL: time.Minute}}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-05-07 22:30:01 +00:00
|
|
|
// Add a check after the fact with a specific check ID
|
2017-06-15 16:46:06 +00:00
|
|
|
check := &structs.CheckDefinition{
|
2015-05-07 22:30:01 +00:00
|
|
|
ID: "check2",
|
|
|
|
Name: "check2",
|
|
|
|
ServiceID: "memcache",
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: time.Minute,
|
2015-05-07 22:30:01 +00:00
|
|
|
}
|
|
|
|
hc := check.HealthCheck("node1")
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(hc, check.CheckType(), false, "", ConfigSourceLocal); err != nil {
|
2015-05-07 22:30:01 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService("memcache", false); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["service:memcache"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("have memcache check")
|
2015-05-07 22:30:01 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["check2"]; ok {
|
2015-05-07 22:30:01 +00:00
|
|
|
t.Fatalf("have check2 check")
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removing a service with multiple checks works
|
|
|
|
{
|
2019-03-14 15:02:49 +00:00
|
|
|
// add a service to remove
|
2015-01-14 01:52:17 +00:00
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
|
|
|
&structs.CheckType{TTL: time.Minute},
|
|
|
|
&structs.CheckType{TTL: 30 * time.Second},
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-03-14 15:02:49 +00:00
|
|
|
// add another service that wont be affected
|
|
|
|
srv = &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Port: 3306,
|
|
|
|
}
|
|
|
|
chkTypes = []*structs.CheckType{
|
|
|
|
&structs.CheckType{TTL: time.Minute},
|
|
|
|
&structs.CheckType{TTL: 30 * time.Second},
|
|
|
|
}
|
|
|
|
if err := a.AddService(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// Remove the service
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService("redis", false); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a state mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("have redis service")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure checks were removed
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["service:redis:1"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("check redis:1 should be removed")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["service:redis:2"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("check redis:2 should be removed")
|
|
|
|
}
|
|
|
|
|
2019-03-14 15:02:49 +00:00
|
|
|
// Ensure the redis checks are removed
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkTTLs["service:redis:1"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("check ttl for redis:1 should be removed")
|
|
|
|
}
|
2019-03-14 15:02:49 +00:00
|
|
|
if check := a.State.Check(types.CheckID("service:redis:1")); check != nil {
|
|
|
|
t.Fatalf("check ttl for redis:1 should be removed")
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkTTLs["service:redis:2"]; ok {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("check ttl for redis:2 should be removed")
|
|
|
|
}
|
2019-03-14 15:02:49 +00:00
|
|
|
if check := a.State.Check(types.CheckID("service:redis:2")); check != nil {
|
|
|
|
t.Fatalf("check ttl for redis:2 should be removed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the mysql service is unnafected
|
|
|
|
if _, ok := a.checkTTLs["service:mysql:1"]; !ok {
|
|
|
|
t.Fatalf("check ttl for mysql:1 should not be removed")
|
|
|
|
}
|
|
|
|
if check := a.State.Check(types.CheckID("service:mysql:1")); check == nil {
|
|
|
|
t.Fatalf("check ttl for mysql:1 should not be removed")
|
|
|
|
}
|
|
|
|
if _, ok := a.checkTTLs["service:mysql:2"]; !ok {
|
|
|
|
t.Fatalf("check ttl for mysql:2 should not be removed")
|
|
|
|
}
|
|
|
|
if check := a.State.Check(types.CheckID("service:mysql:2")); check == nil {
|
|
|
|
t.Fatalf("check ttl for mysql:2 should not be removed")
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
func TestAgent_RemoveServiceRemovesAllChecks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_name = "node1"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-05-15 19:49:13 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk1 := &structs.CheckType{CheckID: "chk1", Name: "chk1", TTL: time.Minute}
|
|
|
|
chk2 := &structs.CheckType{CheckID: "chk2", Name: "chk2", TTL: 2 * time.Minute}
|
2017-05-15 19:49:13 +00:00
|
|
|
hchk1 := &structs.HealthCheck{Node: "node1", CheckID: "chk1", Name: "chk1", Status: "critical", ServiceID: "redis", ServiceName: "redis"}
|
|
|
|
hchk2 := &structs.HealthCheck{Node: "node1", CheckID: "chk2", Name: "chk2", Status: "critical", ServiceID: "redis", ServiceName: "redis"}
|
|
|
|
|
|
|
|
// register service with chk1
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, []*structs.CheckType{chk1}, false, "", ConfigSourceLocal); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to register service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify chk1 exists
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Checks()["chk1"] == nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Could not find health check chk1")
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the service with chk2
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, []*structs.CheckType{chk2}, false, "", ConfigSourceLocal); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to update service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that both checks are there
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := a.State.Checks()["chk1"], hchk1; !verify.Values(t, "", got, want) {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.FailNow()
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := a.State.Checks()["chk2"], hchk2; !verify.Values(t, "", got, want) {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove service
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService("redis", false); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to remove service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that both checks are gone
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Checks()["chk1"] != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Found health check chk1 want nil")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Checks()["chk2"] != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Found health check chk2 want nil")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 04:35:38 +00:00
|
|
|
// TestAgent_IndexChurn is designed to detect a class of issues where
|
|
|
|
// we would have unnecessary catalog churn from anti-entropy. See issues
|
|
|
|
// #3259, #3642, #3845, and #3866.
|
|
|
|
func TestAgent_IndexChurn(t *testing.T) {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Parallel()
|
2018-02-07 04:35:38 +00:00
|
|
|
|
|
|
|
t.Run("no tags", func(t *testing.T) {
|
|
|
|
verifyIndexChurn(t, nil)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with tags", func(t *testing.T) {
|
|
|
|
verifyIndexChurn(t, []string{"foo", "bar"})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyIndexChurn registers some things and runs anti-entropy a bunch of times
|
|
|
|
// in a row to make sure there are no index bumps.
|
|
|
|
func verifyIndexChurn(t *testing.T, tags []string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-02-06 00:18:29 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-07 14:30:47 +00:00
|
|
|
weights := &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
// Ensure we have a leader before we start adding the services
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 8000,
|
2018-02-07 04:35:38 +00:00
|
|
|
Tags: tags,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: weights,
|
2018-02-06 00:18:29 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
CheckID: "redis-check",
|
2018-02-07 04:35:38 +00:00
|
|
|
Name: "Service-level check",
|
2018-02-06 00:18:29 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chkt := &structs.CheckType{
|
|
|
|
TTL: time.Hour,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkt, true, "", ConfigSourceLocal); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-07 04:35:38 +00:00
|
|
|
chk = &structs.HealthCheck{
|
|
|
|
CheckID: "node-check",
|
|
|
|
Name: "Node-level check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chkt = &structs.CheckType{
|
|
|
|
TTL: time.Hour,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkt, true, "", ConfigSourceLocal); err != nil {
|
2018-02-07 04:35:38 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
if err := a.sync.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
args := &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
var before structs.IndexedCheckServiceNodes
|
2018-08-09 16:40:07 +00:00
|
|
|
|
|
|
|
// This sleep is so that the serfHealth check is added to the agent
|
|
|
|
// A value of 375ms is sufficient enough time to ensure the serfHealth
|
|
|
|
// check is added to an agent. 500ms so that we don't see flakiness ever.
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
|
2018-02-07 04:35:38 +00:00
|
|
|
if err := a.RPC("Health.ServiceNodes", args, &before); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
for _, name := range before.Nodes[0].Checks {
|
|
|
|
a.logger.Println("[DEBUG] Checks Registered: ", name.Name)
|
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
if got, want := len(before.Nodes), 1; got != want {
|
|
|
|
t.Fatalf("got %d want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(before.Nodes[0].Checks), 3; /* incl. serfHealth */ got != want {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("got %d want %d", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
2018-08-09 16:40:07 +00:00
|
|
|
a.logger.Println("[INFO] # ", i+1, "Sync in progress ")
|
2018-02-06 00:18:29 +00:00
|
|
|
if err := a.sync.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
// If this test fails here this means that the Consul-X-Index
|
|
|
|
// has changed for the RPC, which means that idempotent ops
|
|
|
|
// are not working as intended.
|
2018-02-07 04:35:38 +00:00
|
|
|
var after structs.IndexedCheckServiceNodes
|
|
|
|
if err := a.RPC("Health.ServiceNodes", args, &after); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
verify.Values(t, "", after, before)
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_AddCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
sChk, ok := a.State.Checks()["mem"]
|
2015-04-12 00:53:48 +00:00
|
|
|
if !ok {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("missing mem check")
|
|
|
|
}
|
|
|
|
|
2015-04-12 00:53:48 +00:00
|
|
|
// Ensure our check is in the right state
|
2017-04-19 23:00:11 +00:00
|
|
|
if sChk.Status != api.HealthCritical {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("check not critical")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkMonitors["mem"]; !ok {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("missing mem monitor")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_StartPassing(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-12 00:53:48 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2015-04-12 00:53:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
sChk, ok := a.State.Checks()["mem"]
|
2015-04-12 00:53:48 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing mem check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure our check is in the right state
|
2017-04-19 23:00:11 +00:00
|
|
|
if sChk.Status != api.HealthPassing {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("check not passing")
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Ensure a TTL is setup
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkMonitors["mem"]; !ok {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("missing mem monitor")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-21 21:42:42 +00:00
|
|
|
func TestAgent_AddCheck_MinInterval(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-04-21 21:42:42 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-21 21:42:42 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: time.Microsecond,
|
2014-04-21 21:42:42 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-04-21 21:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["mem"]; !ok {
|
2014-04-21 21:42:42 +00:00
|
|
|
t.Fatalf("missing mem check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2017-05-21 07:11:09 +00:00
|
|
|
if mon, ok := a.checkMonitors["mem"]; !ok {
|
2014-04-21 21:42:42 +00:00
|
|
|
t.Fatalf("missing mem monitor")
|
2017-10-25 09:18:07 +00:00
|
|
|
} else if mon.Interval != checks.MinInterval {
|
2014-04-21 21:42:42 +00:00
|
|
|
t.Fatalf("bad mem monitor interval")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
func TestAgent_AddCheck_MissingService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-14 01:52:17 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "baz",
|
|
|
|
Name: "baz check 1",
|
|
|
|
ServiceID: "baz",
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: time.Microsecond,
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2015-01-14 01:52:17 +00:00
|
|
|
if err == nil || err.Error() != `ServiceID "baz" does not exist` {
|
|
|
|
t.Fatalf("expected service id error, got: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 00:33:34 +00:00
|
|
|
func TestAgent_AddCheck_RestoreState(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-06 00:33:34 +00:00
|
|
|
|
|
|
|
// Create some state and persist it
|
2017-10-25 09:18:07 +00:00
|
|
|
ttl := &checks.CheckTTL{
|
2015-06-06 00:33:34 +00:00
|
|
|
CheckID: "baz",
|
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(ttl, api.HealthPassing, "yup")
|
2015-06-06 00:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build and register the check definition and initial state
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "baz",
|
|
|
|
Name: "baz check 1",
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2015-06-06 00:33:34 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err = a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2015-06-06 00:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check status was restored during registration
|
2017-08-28 12:17:13 +00:00
|
|
|
checks := a.State.Checks()
|
2015-06-06 00:33:34 +00:00
|
|
|
check, ok := checks["baz"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing check")
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if check.Status != api.HealthPassing {
|
2015-06-06 00:33:34 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
if check.Output != "yup" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
func TestAgent_AddCheck_ExecDisable(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-17 18:20:35 +00:00
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-07-20 05:15:04 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
2017-07-17 18:20:35 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if memChk := a.State.Checks()["mem"]; memChk != nil {
|
2017-07-17 18:20:35 +00:00
|
|
|
t.Fatalf("should be missing mem check")
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
|
|
|
|
err = a.AddCheck(health, chk, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
|
|
|
if memChk := a.State.Checks()["mem"]; memChk != nil {
|
|
|
|
t.Fatalf("should be missing mem check")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_ExecRemoteDisable(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-10-11 12:22:11 +00:00
|
|
|
enable_local_script_checks = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent from remote calls") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
|
|
|
if memChk := a.State.Checks()["mem"]; memChk != nil {
|
|
|
|
t.Fatalf("should be missing mem check")
|
|
|
|
}
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
|
|
|
|
2017-12-27 04:35:22 +00:00
|
|
|
func TestAgent_AddCheck_GRPC(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-12-27 04:35:22 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "grpchealth",
|
|
|
|
Name: "grpc health checking protocol",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
GRPC: "localhost:12345/package.Service",
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-12-27 04:35:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
|
|
|
sChk, ok := a.State.Checks()["grpchealth"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing grpchealth check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure our check is in the right state
|
|
|
|
if sChk.Status != api.HealthCritical {
|
|
|
|
t.Fatalf("check not critical")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a check is setup
|
|
|
|
if _, ok := a.checkGRPCs["grpchealth"]; !ok {
|
|
|
|
t.Fatalf("missing grpchealth check")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-24 18:36:56 +00:00
|
|
|
func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
|
|
|
|
// t.Parallel() don't even think about making this parallel
|
|
|
|
|
|
|
|
// This test is very contrived and tests for the absence of race conditions
|
|
|
|
// related to the implementation of alias checks. As such it is slow,
|
|
|
|
// serial, full of sleeps and retries, and not generally a great test to
|
|
|
|
// run all of the time.
|
|
|
|
//
|
|
|
|
// That said it made it incredibly easy to root out various race conditions
|
|
|
|
// quite successfully.
|
|
|
|
//
|
|
|
|
// The original set of races was between:
|
|
|
|
//
|
|
|
|
// - agent startup reloading Services and Checks from disk
|
|
|
|
// - API requests to also re-register those same Services and Checks
|
|
|
|
// - the goroutines for the as-yet-to-be-stopped CheckAlias goroutines
|
|
|
|
|
|
|
|
if os.Getenv("SLOWTEST") != "1" {
|
|
|
|
t.Skip("skipping slow test; set SLOWTEST=1 to run")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We do this so that the agent logs and the informational messages from
|
|
|
|
// the test itself are interwoven properly.
|
|
|
|
logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) {
|
|
|
|
a.logger.Printf("[INFO] testharness: "+format, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_central_service_config = false
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
|
|
|
a.LogOutput = testutil.TestWriter(t)
|
|
|
|
a.Start(t)
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testCtx, testCancel := context.WithCancel(context.Background())
|
|
|
|
defer testCancel()
|
|
|
|
|
|
|
|
testHTTPServer := launchHTTPCheckServer(t, testCtx)
|
|
|
|
defer testHTTPServer.Close()
|
|
|
|
|
|
|
|
registerServicesAndChecks := func(t *testing.T, a *TestAgent) {
|
|
|
|
// add one persistent service with a simple check
|
|
|
|
require.NoError(t, a.AddService(
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "ping",
|
|
|
|
Service: "ping",
|
|
|
|
Port: 8000,
|
|
|
|
},
|
|
|
|
[]*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
|
|
|
HTTP: testHTTPServer.URL,
|
|
|
|
Method: "GET",
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: 1 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
true, "", ConfigSourceLocal,
|
|
|
|
))
|
|
|
|
|
|
|
|
// add one persistent sidecar service with an alias check in the manner
|
|
|
|
// of how sidecar_service would add it
|
|
|
|
require.NoError(t, a.AddService(
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "ping-sidecar-proxy",
|
|
|
|
Service: "ping-sidecar-proxy",
|
|
|
|
Port: 9000,
|
|
|
|
},
|
|
|
|
[]*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
|
|
|
Name: "Connect Sidecar Aliasing ping",
|
|
|
|
AliasService: "ping",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
true, "", ConfigSourceLocal,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) {
|
|
|
|
t.Helper()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
chk := a.State.CheckState(types.CheckID(checkID))
|
|
|
|
if chk == nil {
|
|
|
|
r.Fatalf("check=%q is completely missing", checkID)
|
|
|
|
}
|
|
|
|
if chk.Check.Status != expectedStatus {
|
|
|
|
logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Check.Status)
|
|
|
|
r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Check.Status)
|
|
|
|
}
|
|
|
|
logf(t, a, "check %q has reached desired status %q", checkID, expectedStatus)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
registerServicesAndChecks(t, a)
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a, "service:ping", api.HealthPassing)
|
|
|
|
retryUntilCheckState(t, a, "service:ping-sidecar-proxy", api.HealthPassing)
|
|
|
|
|
|
|
|
logf(t, a, "==== POWERING DOWN ORIGINAL ====")
|
|
|
|
|
|
|
|
require.NoError(t, a.Shutdown())
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
futureHCL := cfg + `
|
|
|
|
node_id = "` + string(a.Config.NodeID) + `"
|
|
|
|
node_name = "` + a.Config.NodeName + `"
|
|
|
|
`
|
|
|
|
|
|
|
|
restartOnce := func(idx int, t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
// Reload and retain former NodeID and data directory.
|
|
|
|
a2 := &TestAgent{Name: t.Name(), HCL: futureHCL, DataDir: dataDir}
|
|
|
|
a2.LogOutput = testutil.TestWriter(t)
|
|
|
|
a2.Start(t)
|
|
|
|
defer a2.Shutdown()
|
|
|
|
a = nil
|
|
|
|
|
|
|
|
// reregister during standup; we use an adjustable timing to try and force a race
|
|
|
|
sleepDur := time.Duration(idx+1) * 500 * time.Millisecond
|
|
|
|
time.Sleep(sleepDur)
|
|
|
|
logf(t, a2, "re-registering checks and services after a delay of %v", sleepDur)
|
|
|
|
for i := 0; i < 20; i++ { // RACE RACE RACE!
|
|
|
|
registerServicesAndChecks(t, a2)
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a2, "service:ping", api.HealthPassing)
|
|
|
|
|
|
|
|
logf(t, a2, "giving the alias check a chance to notice...")
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a2, "service:ping-sidecar-proxy", api.HealthPassing)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
name := "restart-" + strconv.Itoa(i)
|
|
|
|
ok := t.Run(name, func(t *testing.T) {
|
|
|
|
restartOnce(i, t)
|
|
|
|
})
|
|
|
|
require.True(t, ok, name+" failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func launchHTTPCheckServer(t *testing.T, ctx context.Context) *httptest.Server {
|
|
|
|
ports := freeport.GetT(t, 1)
|
|
|
|
port := ports[0]
|
|
|
|
|
|
|
|
addr := net.JoinHostPort("127.0.0.1", strconv.Itoa(port))
|
|
|
|
|
|
|
|
var lc net.ListenConfig
|
|
|
|
listener, err := lc.Listen(ctx, "tcp", addr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
_, _ = w.Write([]byte("OK\n"))
|
|
|
|
})
|
|
|
|
|
|
|
|
srv := &httptest.Server{
|
|
|
|
Listener: listener,
|
|
|
|
Config: &http.Server{Handler: handler},
|
|
|
|
}
|
|
|
|
srv.Start()
|
|
|
|
return srv
|
|
|
|
}
|
|
|
|
|
2018-06-30 13:38:56 +00:00
|
|
|
func TestAgent_AddCheck_Alias(t *testing.T) {
|
|
|
|
t.Parallel()
|
2018-07-12 17:17:53 +00:00
|
|
|
|
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-06-30 13:38:56 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2018-07-12 17:17:53 +00:00
|
|
|
require.NoError(err)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
|
|
|
sChk, ok := a.State.Checks()["aliashealth"]
|
2018-07-12 17:17:53 +00:00
|
|
|
require.True(ok, "missing aliashealth check")
|
|
|
|
require.NotNil(sChk)
|
|
|
|
require.Equal(api.HealthCritical, sChk.Status)
|
|
|
|
|
|
|
|
chkImpl, ok := a.checkAliases["aliashealth"]
|
|
|
|
require.True(ok, "missing aliashealth check")
|
|
|
|
require.Equal("", chkImpl.RPCReq.Token)
|
|
|
|
|
|
|
|
cs := a.State.CheckState("aliashealth")
|
|
|
|
require.NotNil(cs)
|
|
|
|
require.Equal("", cs.Token)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_setToken(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-07-12 17:17:53 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "foo", ConfigSourceLocal)
|
2018-07-12 17:17:53 +00:00
|
|
|
require.NoError(err)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
2018-07-12 17:17:53 +00:00
|
|
|
cs := a.State.CheckState("aliashealth")
|
|
|
|
require.NotNil(cs)
|
|
|
|
require.Equal("foo", cs.Token)
|
|
|
|
|
|
|
|
chkImpl, ok := a.checkAliases["aliashealth"]
|
|
|
|
require.True(ok, "missing aliashealth check")
|
|
|
|
require.Equal("foo", chkImpl.RPCReq.Token)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_userToken(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-07-12 17:17:53 +00:00
|
|
|
acl_token = "hello"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2018-07-12 17:17:53 +00:00
|
|
|
require.NoError(err)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
2018-07-12 17:17:53 +00:00
|
|
|
cs := a.State.CheckState("aliashealth")
|
|
|
|
require.NotNil(cs)
|
|
|
|
require.Equal("", cs.Token) // State token should still be empty
|
|
|
|
|
|
|
|
chkImpl, ok := a.checkAliases["aliashealth"]
|
|
|
|
require.True(ok, "missing aliashealth check")
|
|
|
|
require.Equal("hello", chkImpl.RPCReq.Token) // Check should use the token
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_userAndSetToken(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-07-12 17:17:53 +00:00
|
|
|
acl_token = "hello"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-07-12 17:17:53 +00:00
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "goodbye", ConfigSourceLocal)
|
2018-07-12 17:17:53 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
cs := a.State.CheckState("aliashealth")
|
|
|
|
require.NotNil(cs)
|
|
|
|
require.Equal("goodbye", cs.Token)
|
|
|
|
|
|
|
|
chkImpl, ok := a.checkAliases["aliashealth"]
|
|
|
|
require.True(ok, "missing aliashealth check")
|
|
|
|
require.Equal("goodbye", chkImpl.RPCReq.Token)
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
func TestAgent_RemoveCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-07-17 18:20:35 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Remove check that doesn't exist
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveCheck("mem", false); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:10:56 +00:00
|
|
|
// Remove without an ID
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveCheck("", false); err == nil {
|
2015-01-27 09:10:56 +00:00
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove check
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveCheck("mem", false); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()["mem"]; ok {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("have mem check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, ok := a.checkMonitors["mem"]; ok {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("have mem monitor")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
func TestAgent_HTTPCheck_TLSSkipVerify(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintln(w, "GOOD")
|
|
|
|
})
|
|
|
|
server := httptest.NewTLSServer(handler)
|
|
|
|
defer server.Close()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-11-08 02:22:09 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "tls",
|
|
|
|
Name: "tls check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
HTTP: server.URL,
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-11-08 02:22:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
status := a.State.Checks()["tls"]
|
|
|
|
if status.Status != api.HealthPassing {
|
|
|
|
r.Fatalf("bad: %v", status.Status)
|
|
|
|
}
|
|
|
|
if !strings.Contains(status.Output, "GOOD") {
|
|
|
|
r.Fatalf("bad: %v", status.Output)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
run := func(t *testing.T, ca string) {
|
|
|
|
a := &TestAgent{
|
|
|
|
Name: t.Name(),
|
|
|
|
UseTLS: true,
|
|
|
|
HCL: `
|
|
|
|
enable_agent_tls_for_checks = true
|
|
|
|
|
|
|
|
verify_incoming = true
|
|
|
|
server_name = "consul.test"
|
|
|
|
key_file = "../test/client_certs/server.key"
|
|
|
|
cert_file = "../test/client_certs/server.crt"
|
|
|
|
` + ca,
|
|
|
|
}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2017-11-08 02:22:09 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "tls",
|
|
|
|
Name: "tls check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
|
|
|
|
url := fmt.Sprintf("https://%s/v1/agent/self", a.srv.ln.Addr().String())
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
HTTP: url,
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-11-08 02:22:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
status := a.State.Checks()["tls"]
|
|
|
|
if status.Status != api.HealthPassing {
|
|
|
|
r.Fatalf("bad: %v", status.Status)
|
|
|
|
}
|
|
|
|
if !strings.Contains(status.Output, "200 OK") {
|
|
|
|
r.Fatalf("bad: %v", status.Output)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to test both methods of passing the CA info to ensure that
|
|
|
|
// we propagate all the fields correctly. All the other fields are
|
|
|
|
// covered by the HCL in the test run function.
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
config string
|
|
|
|
}{
|
|
|
|
{"ca_file", `ca_file = "../test/client_certs/rootca.crt"`},
|
|
|
|
{"ca_path", `ca_path = "../test/client_certs/path"`},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
run(t, tt.config)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_updateTTLCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2019-06-26 15:43:25 +00:00
|
|
|
checkBufSize := 100
|
2014-01-30 21:39:02 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2019-06-26 15:43:25 +00:00
|
|
|
TTL: 15 * time.Second,
|
|
|
|
OutputMaxSize: checkBufSize,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Add check and update it.
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.updateTTLCheck("mem", api.HealthPassing, "foo"); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Ensure we have a check mapping.
|
2017-08-28 12:17:13 +00:00
|
|
|
status := a.State.Checks()["mem"]
|
2017-04-19 23:00:11 +00:00
|
|
|
if status.Status != api.HealthPassing {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
2014-04-21 23:20:22 +00:00
|
|
|
if status.Output != "foo" {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
2019-06-26 15:43:25 +00:00
|
|
|
|
|
|
|
if err := a.updateTTLCheck("mem", api.HealthCritical, strings.Repeat("--bad-- ", 5*checkBufSize)); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping.
|
|
|
|
status = a.State.Checks()["mem"]
|
|
|
|
if status.Status != api.HealthCritical {
|
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
|
|
|
if len(status.Output) > checkBufSize*2 {
|
|
|
|
t.Fatalf("bad: %v", len(status.Output))
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2014-10-14 22:05:41 +00:00
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
func TestAgent_PersistService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2017-09-25 18:40:42 +00:00
|
|
|
defer os.RemoveAll(dataDir)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc.ID))
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Check is not persisted unless requested
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should not persist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persists to file if requested
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, true, "mytoken", ConfigSourceLocal); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-04-28 19:44:46 +00:00
|
|
|
expected, err := json.Marshal(persistedService{
|
2015-05-05 00:36:17 +00:00
|
|
|
Token: "mytoken",
|
2015-04-28 05:46:01 +00:00
|
|
|
Service: svc,
|
|
|
|
})
|
2014-11-24 08:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
content, err := ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
|
|
|
t.Fatalf("bad: %s", string(content))
|
|
|
|
}
|
2015-05-06 05:08:03 +00:00
|
|
|
|
|
|
|
// Updates service definition on disk
|
|
|
|
svc.Port = 8001
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, true, "mytoken", ConfigSourceLocal); err != nil {
|
2015-05-06 05:08:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
expected, err = json.Marshal(persistedService{
|
|
|
|
Token: "mytoken",
|
|
|
|
Service: svc,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
content, err = ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
|
|
|
t.Fatalf("bad: %s", string(content))
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
// Should load it back during later start
|
2017-09-25 18:40:42 +00:00
|
|
|
a2 := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a2.Start(t)
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
restored := a2.State.ServiceState(svc.ID)
|
|
|
|
if restored == nil {
|
|
|
|
t.Fatalf("service %q missing", svc.ID)
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := restored.Token, "mytoken"; got != want {
|
|
|
|
t.Fatalf("got token %q want %q", got, want)
|
2015-04-28 05:46:01 +00:00
|
|
|
}
|
2017-08-28 12:17:14 +00:00
|
|
|
if got, want := restored.Service.Port, 8001; got != want {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatalf("got port %d want %d", got, want)
|
2015-05-06 05:08:03 +00:00
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_persistedService_compat(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-04-28 19:18:41 +00:00
|
|
|
// Tests backwards compatibility of persisted services from pre-0.5.1
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:18:41 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
2015-04-28 19:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the NodeService directly. This is what previous versions
|
|
|
|
// would serialize to the file (without the wrapper)
|
|
|
|
encoded, err := json.Marshal(svc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the content to the file
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc.ID))
|
2015-04-28 19:18:41 +00:00
|
|
|
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if err := ioutil.WriteFile(file, encoded, 0600); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the services
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.loadServices(a.Config); err != nil {
|
2015-04-28 19:18:41 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the service was restored
|
2017-08-28 12:17:13 +00:00
|
|
|
services := a.State.Services()
|
2015-04-28 19:18:41 +00:00
|
|
|
result, ok := services["redis"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
2019-01-08 10:13:49 +00:00
|
|
|
require.Equal(t, svc, result)
|
2015-04-28 19:18:41 +00:00
|
|
|
}
|
|
|
|
|
2014-11-26 07:58:02 +00:00
|
|
|
func TestAgent_PurgeService(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-26 07:58:02 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc.ID))
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not removed
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService(svc.ID, false); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
2016-11-09 21:56:54 +00:00
|
|
|
// Re-add the service
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2016-11-09 21:56:54 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-26 07:58:02 +00:00
|
|
|
// Removed
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveService(svc.ID, true); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("bad: %#v", err)
|
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 03:24:32 +00:00
|
|
|
func TestAgent_PurgeServiceOnDuplicate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-09-25 18:40:42 +00:00
|
|
|
defer os.RemoveAll(dataDir)
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
svc1 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// First persist the service
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc1, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Try bringing the agent back up with the service already
|
|
|
|
// existing in the config
|
2017-09-25 18:40:42 +00:00
|
|
|
a2 := &TestAgent{Name: t.Name() + "-a2", HCL: cfg + `
|
|
|
|
service = {
|
|
|
|
id = "redis"
|
|
|
|
name = "redis"
|
|
|
|
tags = ["bar"]
|
|
|
|
port = 9000
|
|
|
|
}
|
|
|
|
`, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a2.Start(t)
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc1.ID))
|
2014-11-25 03:24:32 +00:00
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should have removed persisted service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
result := a2.State.Service("redis")
|
|
|
|
if result == nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("missing service registration")
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if !reflect.DeepEqual(result.Tags, []string{"bar"}) || result.Port != 9000 {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("bad: %#v", result)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
func TestAgent_PersistProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2018-05-14 20:55:24 +00:00
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Add a service to proxy (precondition for AddProxy)
|
|
|
|
svc1 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
// Add a proxy for it
|
|
|
|
proxy := &structs.ConnectManagedProxy{
|
|
|
|
TargetServiceID: svc1.ID,
|
|
|
|
Command: []string{"/bin/sleep", "3600"},
|
|
|
|
}
|
|
|
|
|
|
|
|
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash("redis-proxy"))
|
|
|
|
|
|
|
|
// Proxy is not persisted unless requested
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, false, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
_, err := os.Stat(file)
|
|
|
|
require.Error(err, "proxy should not be persisted")
|
|
|
|
|
|
|
|
// Proxy is persisted if requested
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
_, err = os.Stat(file)
|
|
|
|
require.NoError(err, "proxy should be persisted")
|
|
|
|
|
|
|
|
content, err := ioutil.ReadFile(file)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
var gotProxy persistedProxy
|
|
|
|
require.NoError(json.Unmarshal(content, &gotProxy))
|
|
|
|
assert.Equal(proxy.Command, gotProxy.Proxy.Command)
|
|
|
|
assert.Len(gotProxy.ProxyToken, 36) // sanity check for UUID
|
|
|
|
|
|
|
|
// Updates service definition on disk
|
|
|
|
proxy.Config = map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
content, err = ioutil.ReadFile(file)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
require.NoError(json.Unmarshal(content, &gotProxy))
|
|
|
|
assert.Equal(gotProxy.Proxy.Command, proxy.Command)
|
|
|
|
assert.Equal(gotProxy.Proxy.Config, proxy.Config)
|
|
|
|
assert.Len(gotProxy.ProxyToken, 36) // sanity check for UUID
|
|
|
|
|
|
|
|
a.Shutdown()
|
|
|
|
|
|
|
|
// Should load it back during later start
|
|
|
|
a2 := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a2.Start(t)
|
2018-05-14 20:55:24 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
restored := a2.State.Proxy("redis-proxy")
|
|
|
|
require.NotNil(restored)
|
|
|
|
assert.Equal(gotProxy.ProxyToken, restored.ProxyToken)
|
|
|
|
// Ensure the port that was auto picked at random is the same again
|
|
|
|
assert.Equal(gotProxy.Proxy.ProxyService.Port, restored.Proxy.ProxyService.Port)
|
|
|
|
assert.Equal(gotProxy.Proxy.Command, restored.Proxy.Command)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_PurgeProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2018-05-14 20:55:24 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Add a service to proxy (precondition for AddProxy)
|
|
|
|
svc1 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
// Add a proxy for it
|
|
|
|
proxy := &structs.ConnectManagedProxy{
|
|
|
|
TargetServiceID: svc1.ID,
|
|
|
|
Command: []string{"/bin/sleep", "3600"},
|
|
|
|
}
|
|
|
|
proxyID := "redis-proxy"
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash("redis-proxy"))
|
|
|
|
|
|
|
|
// Not removed
|
|
|
|
require.NoError(a.RemoveProxy(proxyID, false))
|
|
|
|
_, err := os.Stat(file)
|
|
|
|
require.NoError(err, "should not be removed")
|
|
|
|
|
|
|
|
// Re-add the proxy
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
// Removed
|
|
|
|
require.NoError(a.RemoveProxy(proxyID, true))
|
|
|
|
_, err = os.Stat(file)
|
|
|
|
require.Error(err, "should be removed")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_PurgeProxyOnDuplicate(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2018-05-14 20:55:24 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Add a service to proxy (precondition for AddProxy)
|
|
|
|
svc1 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
// Add a proxy for it
|
|
|
|
proxy := &structs.ConnectManagedProxy{
|
|
|
|
TargetServiceID: svc1.ID,
|
|
|
|
Command: []string{"/bin/sleep", "3600"},
|
|
|
|
}
|
|
|
|
proxyID := "redis-proxy"
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
a.Shutdown()
|
|
|
|
|
|
|
|
// Try bringing the agent back up with the service already
|
|
|
|
// existing in the config
|
|
|
|
a2 := &TestAgent{Name: t.Name() + "-a2", HCL: cfg + `
|
|
|
|
service = {
|
|
|
|
id = "redis"
|
|
|
|
name = "redis"
|
|
|
|
tags = ["bar"]
|
|
|
|
port = 9000
|
|
|
|
connect {
|
|
|
|
proxy {
|
|
|
|
command = ["/bin/sleep", "3600"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a2.Start(t)
|
2018-05-14 20:55:24 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash(proxyID))
|
|
|
|
_, err := os.Stat(file)
|
2018-07-19 13:20:51 +00:00
|
|
|
require.NoError(err, "Config File based proxies should be persisted too")
|
2018-05-14 20:55:24 +00:00
|
|
|
|
|
|
|
result := a2.State.Proxy(proxyID)
|
|
|
|
require.NotNil(result)
|
|
|
|
require.Equal(proxy.Command, result.Proxy.Command)
|
|
|
|
}
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
func TestAgent_PersistCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
cfg := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2017-09-25 18:40:42 +00:00
|
|
|
defer os.RemoveAll(dataDir)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
check := &structs.HealthCheck{
|
2017-09-25 18:40:42 +00:00
|
|
|
Node: a.config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"/bin/true"},
|
|
|
|
Interval: 10 * time.Second,
|
2014-11-29 20:25:01 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, checkIDHash(check.CheckID))
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Not persisted if not requested
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check, chkType, false, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should not persist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should persist if requested
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-04-28 19:44:46 +00:00
|
|
|
expected, err := json.Marshal(persistedCheck{
|
|
|
|
Check: check,
|
|
|
|
ChkType: chkType,
|
2015-05-05 00:36:17 +00:00
|
|
|
Token: "mytoken",
|
2015-04-28 19:44:46 +00:00
|
|
|
})
|
2014-11-24 08:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
content, err := ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
2019-03-04 14:34:05 +00:00
|
|
|
t.Fatalf("bad: %s != %s", string(content), expected)
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2015-05-06 05:08:03 +00:00
|
|
|
|
|
|
|
// Updates the check definition on disk
|
|
|
|
check.Name = "mem1"
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal); err != nil {
|
2015-05-06 05:08:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
expected, err = json.Marshal(persistedCheck{
|
|
|
|
Check: check,
|
|
|
|
ChkType: chkType,
|
|
|
|
Token: "mytoken",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
content, err = ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
|
|
|
t.Fatalf("bad: %s", string(content))
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
// Should load it back during later start
|
2017-09-25 18:40:42 +00:00
|
|
|
a2 := &TestAgent{Name: t.Name() + "-a2", HCL: cfg, DataDir: dataDir}
|
2019-02-14 15:59:14 +00:00
|
|
|
a2.Start(t)
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
result := a2.State.Check(check.CheckID)
|
|
|
|
if result == nil {
|
|
|
|
t.Fatalf("bad: %#v", a2.State.Checks())
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if result.Status != api.HealthCritical {
|
2014-11-24 09:15:18 +00:00
|
|
|
t.Fatalf("bad: %#v", result)
|
|
|
|
}
|
2015-05-06 05:08:03 +00:00
|
|
|
if result.Name != "mem1" {
|
|
|
|
t.Fatalf("bad: %#v", result)
|
|
|
|
}
|
2014-11-29 20:25:01 +00:00
|
|
|
|
|
|
|
// Should have restored the monitor
|
2017-05-31 08:56:19 +00:00
|
|
|
if _, ok := a2.checkMonitors[check.CheckID]; !ok {
|
|
|
|
t.Fatalf("bad: %#v", a2.checkMonitors)
|
2014-11-29 20:25:01 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if a2.State.CheckState(check.CheckID).Token != "mytoken" {
|
|
|
|
t.Fatalf("bad: %s", a2.State.CheckState(check.CheckID).Token)
|
2015-04-28 05:46:01 +00:00
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_PurgeCheck(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-26 07:58:02 +00:00
|
|
|
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, checkIDHash(check.CheckID))
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not removed
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveCheck(check.CheckID, false); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removed
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RemoveCheck(check.CheckID, true); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("bad: %#v", err)
|
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
func TestAgent_PurgeCheckOnDuplicate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
nodeID := NodeID()
|
|
|
|
dataDir := testutil.TempDir(t, "agent")
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = "`+nodeID+`"
|
|
|
|
node_name = "Node `+nodeID+`"
|
|
|
|
data_dir = "`+dataDir+`"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
|
|
|
defer os.RemoveAll(dataDir)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
check1 := &structs.HealthCheck{
|
2017-09-25 18:40:42 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// First persist the check
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Start again with the check registered in config
|
2019-02-14 15:59:14 +00:00
|
|
|
a2 := NewTestAgent(t, t.Name()+"-a2", `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = "`+nodeID+`"
|
|
|
|
node_name = "Node `+nodeID+`"
|
|
|
|
data_dir = "`+dataDir+`"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
|
|
|
check = {
|
|
|
|
id = "mem"
|
|
|
|
name = "memory check"
|
|
|
|
notes = "my cool notes"
|
2018-05-08 22:31:53 +00:00
|
|
|
args = ["/bin/check-redis.py"]
|
2017-09-25 18:40:42 +00:00
|
|
|
interval = "30s"
|
|
|
|
}
|
|
|
|
`)
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
file := filepath.Join(dataDir, checksDir, checkIDHash(check1.CheckID))
|
2014-11-25 03:24:32 +00:00
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should have removed persisted check")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
result := a2.State.Check("mem")
|
|
|
|
if result == nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("missing check registration")
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
expected := &structs.HealthCheck{
|
|
|
|
Node: a2.Config.NodeName,
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
Notes: "my cool notes",
|
|
|
|
}
|
|
|
|
if got, want := result, expected; !verify.Values(t, "", got, want) {
|
|
|
|
t.FailNow()
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-08 06:26:40 +00:00
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_loadChecks_token(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
check = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
token = "abc123"
|
|
|
|
ttl = "10s"
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:44:46 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
checks := a.State.Checks()
|
2015-04-28 19:44:46 +00:00
|
|
|
if _, ok := checks["rabbitmq"]; !ok {
|
|
|
|
t.Fatalf("missing check")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken("rabbitmq"); token != "abc123" {
|
2015-04-28 19:44:46 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-08 06:26:40 +00:00
|
|
|
func TestAgent_unloadChecks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-08 06:26:40 +00:00
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
2015-01-08 06:26:40 +00:00
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "service:redis",
|
2015-01-08 06:26:40 +00:00
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-08 06:26:40 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
found := false
|
2017-08-28 12:17:13 +00:00
|
|
|
for check := range a.State.Checks() {
|
2015-01-08 06:26:40 +00:00
|
|
|
if check == check1.CheckID {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Fatalf("check should have been registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unload all of the checks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadChecks(); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it was unloaded
|
2017-08-28 12:17:13 +00:00
|
|
|
for check := range a.State.Checks() {
|
2015-01-08 06:26:40 +00:00
|
|
|
if check == check1.CheckID {
|
|
|
|
t.Fatalf("should have unloaded checks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_loadServices_token(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:44:46 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
services := a.State.Services()
|
2015-04-28 19:44:46 +00:00
|
|
|
if _, ok := services["rabbitmq"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.ServiceToken("rabbitmq"); token != "abc123" {
|
2015-04-28 19:44:46 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
func TestAgent_loadServices_sidecar(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-09-27 13:33:12 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect = {
|
|
|
|
sidecar_service {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
if _, ok := services["rabbitmq"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
if _, ok := services["rabbitmq-sidecar-proxy"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq-sidecar-proxy"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sanity check rabbitmq service should NOT have sidecar info in state since
|
|
|
|
// it's done it's job and should be a registration syntax sugar only.
|
|
|
|
assert.Nil(t, services["rabbitmq"].Connect.SidecarService)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_loadServices_sidecarSeparateToken(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-09-27 13:33:12 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
token = "789xyz"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
if _, ok := services["rabbitmq"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
if _, ok := services["rabbitmq-sidecar-proxy"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq-sidecar-proxy"); token != "789xyz" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
func TestAgent_loadServices_sidecarInheritMeta(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
tags = ["a", "b"],
|
|
|
|
meta = {
|
|
|
|
environment = "prod"
|
|
|
|
}
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
|
|
|
|
svc, ok := services["rabbitmq"]
|
|
|
|
require.True(t, ok, "missing service")
|
|
|
|
require.Len(t, svc.Tags, 2)
|
|
|
|
require.Len(t, svc.Meta, 1)
|
|
|
|
|
|
|
|
sidecar, ok := services["rabbitmq-sidecar-proxy"]
|
|
|
|
require.True(t, ok, "missing sidecar service")
|
|
|
|
require.ElementsMatch(t, svc.Tags, sidecar.Tags)
|
|
|
|
require.Len(t, sidecar.Meta, 1)
|
|
|
|
meta, ok := sidecar.Meta["environment"]
|
|
|
|
require.True(t, ok, "missing sidecar service meta")
|
|
|
|
require.Equal(t, "prod", meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_loadServices_sidecarOverrideMeta(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
tags = ["a", "b"],
|
|
|
|
meta = {
|
|
|
|
environment = "prod"
|
|
|
|
}
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
tags = ["foo"],
|
|
|
|
meta = {
|
|
|
|
environment = "qa"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
|
|
|
|
svc, ok := services["rabbitmq"]
|
|
|
|
require.True(t, ok, "missing service")
|
|
|
|
require.Len(t, svc.Tags, 2)
|
|
|
|
require.Len(t, svc.Meta, 1)
|
|
|
|
|
|
|
|
sidecar, ok := services["rabbitmq-sidecar-proxy"]
|
|
|
|
require.True(t, ok, "missing sidecar service")
|
|
|
|
require.Len(t, sidecar.Tags, 1)
|
|
|
|
require.Equal(t, "foo", sidecar.Tags[0])
|
|
|
|
require.Len(t, sidecar.Meta, 1)
|
|
|
|
meta, ok := sidecar.Meta["environment"]
|
|
|
|
require.True(t, ok, "missing sidecar service meta")
|
|
|
|
require.Equal(t, "qa", meta)
|
|
|
|
}
|
|
|
|
|
2015-01-08 06:26:40 +00:00
|
|
|
func TestAgent_unloadServices(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-08 06:26:40 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
found := false
|
2017-08-28 12:17:13 +00:00
|
|
|
for id := range a.State.Services() {
|
2015-01-08 06:26:40 +00:00
|
|
|
if id == svc.ID {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Fatalf("should have registered service")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unload all services
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadServices(); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if len(a.State.Services()) != 0 {
|
2017-07-14 05:33:47 +00:00
|
|
|
t.Fatalf("should have unloaded services")
|
2015-01-08 06:26:40 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-15 08:25:36 +00:00
|
|
|
|
2018-04-18 21:03:51 +00:00
|
|
|
func TestAgent_loadProxies(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-04-18 21:03:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect {
|
|
|
|
proxy {
|
|
|
|
config {
|
|
|
|
bind_port = 1234
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
if _, ok := services["rabbitmq"]; !ok {
|
|
|
|
t.Fatalf("missing service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
if _, ok := services["rabbitmq-proxy"]; !ok {
|
|
|
|
t.Fatalf("missing proxy service")
|
|
|
|
}
|
|
|
|
if token := a.State.ServiceToken("rabbitmq-proxy"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
proxies := a.State.Proxies()
|
|
|
|
if _, ok := proxies["rabbitmq-proxy"]; !ok {
|
|
|
|
t.Fatalf("missing proxy")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-02 18:51:47 +00:00
|
|
|
func TestAgent_loadProxies_nilProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-05-02 18:51:47 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
services := a.State.Services()
|
|
|
|
require.Contains(t, services, "rabbitmq")
|
|
|
|
require.Equal(t, "abc123", a.State.ServiceToken("rabbitmq"))
|
|
|
|
require.NotContains(t, services, "rabbitme-proxy")
|
|
|
|
require.Empty(t, a.State.Proxies())
|
|
|
|
}
|
|
|
|
|
2018-04-18 21:03:51 +00:00
|
|
|
func TestAgent_unloadProxies(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-04-18 21:03:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect {
|
|
|
|
proxy {
|
|
|
|
config {
|
|
|
|
bind_port = 1234
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Sanity check it's there
|
|
|
|
require.NotNil(t, a.State.Proxy("rabbitmq-proxy"))
|
|
|
|
|
|
|
|
// Unload all proxies
|
|
|
|
if err := a.unloadProxies(); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if len(a.State.Proxies()) != 0 {
|
|
|
|
t.Fatalf("should have unloaded proxies")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_Service_MaintenanceMode(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-15 08:25:36 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enter maintenance mode for the service
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.EnableServiceMaintenance("redis", "broken", "mytoken"); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the critical health check was added
|
2015-01-15 20:20:57 +00:00
|
|
|
checkID := serviceMaintCheckID("redis")
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok := a.State.Checks()[checkID]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have registered critical maintenance check")
|
2015-01-15 08:25:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Check that the token was used to register the check
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(checkID); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
|
2015-01-15 08:25:36 +00:00
|
|
|
// Leave maintenance mode
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.DisableServiceMaintenance("redis"); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check was deregistered
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have deregistered maintenance check")
|
|
|
|
}
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Enter service maintenance mode without providing a reason
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.EnableServiceMaintenance("redis", "", ""); err != nil {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check was registered with the default notes
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok = a.State.Checks()[checkID]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("should have registered critical check")
|
|
|
|
}
|
2015-01-21 22:45:09 +00:00
|
|
|
if check.Notes != defaultServiceMaintReason {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 19:26:14 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_Service_Reap(t *testing.T) {
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_reap_interval = "50ms"
|
|
|
|
check_deregister_interval_min = "0s"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-04 11:31:51 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2019-03-18 16:06:16 +00:00
|
|
|
Status: api.HealthPassing,
|
|
|
|
TTL: 25 * time.Millisecond,
|
2017-07-04 10:44:24 +00:00
|
|
|
DeregisterCriticalServiceAfter: 200 * time.Millisecond,
|
2016-08-16 07:05:55 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service.
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it's there and there's no critical check yet.
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have critical checks")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// Wait for the check TTL to fail but before the check is reaped.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) != 1 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass the TTL.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.updateTTLCheck("service:redis", api.HealthPassing, "foo"); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the check TTL to fail again.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) != 1 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the reap.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(400 * time.Millisecond)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("redis service should have been reaped")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have critical checks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_Service_NoReap(t *testing.T) {
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_reap_interval = "50ms"
|
|
|
|
check_deregister_interval_min = "0s"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-07-04 10:44:24 +00:00
|
|
|
TTL: 25 * time.Millisecond,
|
2016-08-16 07:05:55 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service.
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it's there and there's no critical check yet.
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the check TTL to fail.
|
2017-07-05 09:24:03 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) != 1 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a while and make sure it doesn't reap.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()["redis"]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have redis service")
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := a.State.CriticalCheckStates(); len(checks) != 1 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-07 18:53:03 +00:00
|
|
|
func TestAgent_AddService_restoresSnapshot(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-05-06 19:28:42 +00:00
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-05-06 19:28:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-05-06 19:28:42 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-05-06 19:28:42 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-05-06 19:28:42 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-registering the service preserves the state of the check
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{&structs.CheckType{TTL: 30 * time.Second}}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2015-05-06 19:28:42 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok := a.State.Checks()["service:redis"]
|
2015-05-06 19:28:42 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing check")
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if check.Status != api.HealthPassing {
|
2015-05-06 19:28:42 +00:00
|
|
|
t.Fatalf("bad: %s", check.Status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-07 18:53:03 +00:00
|
|
|
func TestAgent_AddCheck_restoresSnapshot(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2019-01-07 18:53:03 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-registering the check preserves its state
|
|
|
|
check1.Status = ""
|
|
|
|
if err := a.AddCheck(check1, &structs.CheckType{TTL: 30 * time.Second}, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
check, ok := a.State.Checks()["service:redis"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("missing check")
|
|
|
|
}
|
|
|
|
if check.Status != api.HealthPassing {
|
|
|
|
t.Fatalf("bad: %s", check.Status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 19:26:14 +00:00
|
|
|
func TestAgent_NodeMaintenanceMode(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Enter maintenance mode for the node
|
2017-05-21 07:11:09 +00:00
|
|
|
a.EnableNodeMaintenance("broken", "mytoken")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Make sure the critical health check was added
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok := a.State.Checks()[structs.NodeMaint]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have registered critical node check")
|
|
|
|
}
|
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Check that the token was used to register the check
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
|
2015-01-15 19:26:14 +00:00
|
|
|
// Leave maintenance mode
|
2017-05-21 07:11:09 +00:00
|
|
|
a.DisableNodeMaintenance()
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Ensure the check was deregistered
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[structs.NodeMaint]; ok {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have deregistered critical node check")
|
2015-01-15 08:25:36 +00:00
|
|
|
}
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Enter maintenance mode without passing a reason
|
2017-05-21 07:11:09 +00:00
|
|
|
a.EnableNodeMaintenance("", "")
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Make sure the check was registered with the default note
|
2017-08-28 12:17:13 +00:00
|
|
|
check, ok = a.State.Checks()[structs.NodeMaint]
|
2015-01-21 20:21:57 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("should have registered critical node check")
|
|
|
|
}
|
2015-01-21 22:45:09 +00:00
|
|
|
if check.Notes != defaultNodeMaintReason {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 08:25:36 +00:00
|
|
|
}
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
func TestAgent_checkStateSnapshot(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-02-17 20:00:04 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-02-17 20:00:04 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, true, "", ConfigSourceLocal); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshot the state
|
2017-05-21 07:11:09 +00:00
|
|
|
snap := a.snapshotCheckState()
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
// Unload all of the checks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadChecks(); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Reload the checks and restore the snapshot.
|
|
|
|
if err := a.loadChecks(a.Config, snap); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for the check
|
2017-08-28 12:17:13 +00:00
|
|
|
out, ok := a.State.Checks()[check1.CheckID]
|
2015-02-17 20:00:04 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("check should have been registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure state was restored
|
2017-04-19 23:00:11 +00:00
|
|
|
if out.Status != api.HealthPassing {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("should have restored check state")
|
|
|
|
}
|
|
|
|
}
|
2015-03-11 23:13:19 +00:00
|
|
|
|
|
|
|
func TestAgent_loadChecks_checkFails(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-03-11 23:13:19 +00:00
|
|
|
|
|
|
|
// Persist a health check with an invalid service ID
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-03-11 23:13:19 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-03-11 23:13:19 +00:00
|
|
|
ServiceID: "nope",
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.persistCheck(check, nil); err != nil {
|
2015-03-11 23:13:19 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check to make sure the check was persisted
|
2016-06-06 08:53:30 +00:00
|
|
|
checkHash := checkIDHash(check.CheckID)
|
2017-05-21 07:11:09 +00:00
|
|
|
checkPath := filepath.Join(a.Config.DataDir, checksDir, checkHash)
|
2015-03-11 23:13:19 +00:00
|
|
|
if _, err := os.Stat(checkPath); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try loading the checks from the persisted files
|
2019-07-17 19:06:50 +00:00
|
|
|
if err := a.loadChecks(a.Config, nil); err != nil {
|
2015-03-11 23:13:19 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the erroneous check was purged
|
|
|
|
if _, err := os.Stat(checkPath); err == nil {
|
|
|
|
t.Fatalf("should have purged check")
|
|
|
|
}
|
|
|
|
}
|
2015-06-05 23:45:05 +00:00
|
|
|
|
2015-06-05 23:59:41 +00:00
|
|
|
func TestAgent_persistCheckState(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:45:05 +00:00
|
|
|
|
|
|
|
// Create the TTL check to persist
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2015-06-05 23:45:05 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
TTL: 10 * time.Minute,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist some check state for the check
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthCritical, "nope")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the persisted file exists and has the content
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checkStateDir, stringHash("check1"))
|
2015-06-05 23:45:05 +00:00
|
|
|
buf, err := ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the state
|
|
|
|
var p persistedCheckState
|
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the fields
|
|
|
|
if p.CheckID != "check1" {
|
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
if p.Output != "nope" {
|
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if p.Status != api.HealthCritical {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the expiration time was set
|
|
|
|
if p.Expires < time.Now().Unix() {
|
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
func TestAgent_loadCheckState(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:45:05 +00:00
|
|
|
|
|
|
|
// Create a check whose state will expire immediately
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2015-06-05 23:45:05 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
TTL: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist the check state
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
// Try to load the state
|
2015-06-05 23:45:05 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
CheckID: "check1",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2015-06-05 23:45:05 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.loadCheckState(health); err != nil {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not have restored the status due to expiration
|
2017-04-19 23:00:11 +00:00
|
|
|
if health.Status != api.HealthCritical {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
if health.Output != "" {
|
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
|
2015-06-05 23:59:41 +00:00
|
|
|
// Should have purged the state
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, stringHash("check1"))
|
2015-06-05 23:59:41 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("should have purged state")
|
|
|
|
}
|
|
|
|
|
2015-06-05 23:45:05 +00:00
|
|
|
// Set a TTL which will not expire before we check it
|
|
|
|
check.TTL = time.Minute
|
2017-05-21 07:11:09 +00:00
|
|
|
err = a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
// Try to load
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.loadCheckState(health); err != nil {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should have restored
|
2017-04-19 23:00:11 +00:00
|
|
|
if health.Status != api.HealthPassing {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
if health.Output != "yup" {
|
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
}
|
2015-06-05 23:57:14 +00:00
|
|
|
|
|
|
|
func TestAgent_purgeCheckState(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:57:14 +00:00
|
|
|
|
|
|
|
// No error if the state does not exist
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.purgeCheckState("check1"); err != nil {
|
2015-06-05 23:57:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist some state to the data dir
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2015-06-05 23:57:14 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge the check state
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.purgeCheckState("check1"); err != nil {
|
2015-06-05 23:57:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removed the file
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checkStateDir, stringHash("check1"))
|
2015-06-05 23:57:14 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("should have removed file")
|
|
|
|
}
|
|
|
|
}
|
2015-10-16 02:28:31 +00:00
|
|
|
|
|
|
|
func TestAgent_GetCoordinate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-10-16 02:28:31 +00:00
|
|
|
check := func(server bool) {
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
server = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-10-16 02:28:31 +00:00
|
|
|
|
|
|
|
// This doesn't verify the returned coordinate, but it makes
|
|
|
|
// sure that the agent chooses the correct Serf instance,
|
|
|
|
// depending on how it's configured as a client or a server.
|
|
|
|
// If it chooses the wrong one, this will crash.
|
2017-05-21 07:11:09 +00:00
|
|
|
if _, err := a.GetLANCoordinate(); err != nil {
|
2015-10-16 02:28:31 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
check(true)
|
|
|
|
check(false)
|
|
|
|
}
|
2017-09-26 20:47:27 +00:00
|
|
|
|
|
|
|
func TestAgent_reloadWatches(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), "")
|
2017-09-26 20:47:27 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Normal watch with http addr set, should succeed
|
|
|
|
newConf := *a.config
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
2018-04-26 17:06:26 +00:00
|
|
|
// Should fail to reload with connect watches
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
|
|
|
"type": "connect_roots",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err == nil || !strings.Contains(err.Error(), "not allowed in agent config") {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 20:47:27 +00:00
|
|
|
// Should still succeed with only HTTPS addresses
|
|
|
|
newConf.HTTPSAddrs = newConf.HTTPAddrs
|
|
|
|
newConf.HTTPAddrs = make([]net.Addr, 0)
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should fail to reload with no http or https addrs
|
|
|
|
newConf.HTTPSAddrs = make([]net.Addr, 0)
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err == nil || !strings.Contains(err.Error(), "watch plans require an HTTP or HTTPS endpoint") {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2018-05-31 21:20:16 +00:00
|
|
|
|
|
|
|
func TestAgent_reloadWatchesHTTPS(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := TestAgent{Name: t.Name(), UseTLS: true}
|
2019-02-14 15:59:14 +00:00
|
|
|
a.Start(t)
|
2018-05-31 21:20:16 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Normal watch with http addr set, should succeed
|
|
|
|
newConf := *a.config
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
func TestAgent_AddProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
tests := []struct {
|
2018-05-03 19:51:47 +00:00
|
|
|
desc string
|
|
|
|
proxy, wantProxy *structs.ConnectManagedProxy
|
2018-06-19 11:11:42 +00:00
|
|
|
wantTCPCheck string
|
2018-05-03 19:51:47 +00:00
|
|
|
wantErr bool
|
2018-04-16 15:00:20 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "basic proxy adding, unregistered service",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
2018-04-27 05:16:21 +00:00
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
2018-04-16 15:00:20 +00:00
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
TargetServiceID: "db", // non-existent service.
|
|
|
|
},
|
|
|
|
// Target service must be registered.
|
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
{
|
2018-06-19 11:11:42 +00:00
|
|
|
desc: "basic proxy adding, registered service",
|
2018-04-16 15:00:20 +00:00
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
2018-04-27 05:16:21 +00:00
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
2018-04-16 15:00:20 +00:00
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
2018-07-12 11:57:10 +00:00
|
|
|
// Proxy will inherit agent's 0.0.0.0 bind address but we can't check that
|
|
|
|
// so we should default to localhost in that case.
|
|
|
|
wantTCPCheck: "127.0.0.1:20000",
|
|
|
|
wantErr: false,
|
2018-04-16 15:00:20 +00:00
|
|
|
},
|
2018-05-03 19:51:47 +00:00
|
|
|
{
|
|
|
|
desc: "default global exec mode",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantProxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeScript,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
2018-07-12 11:57:10 +00:00
|
|
|
wantTCPCheck: "127.0.0.1:20000",
|
|
|
|
wantErr: false,
|
2018-05-03 19:51:47 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "default daemon command",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantProxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"foo", "bar"},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
2018-07-12 11:57:10 +00:00
|
|
|
wantTCPCheck: "127.0.0.1:20000",
|
|
|
|
wantErr: false,
|
2018-05-03 19:51:47 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "default script command",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeScript,
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantProxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeScript,
|
|
|
|
Command: []string{"bar", "foo"},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
2018-07-12 11:57:10 +00:00
|
|
|
wantTCPCheck: "127.0.0.1:20000",
|
|
|
|
wantErr: false,
|
2018-05-03 19:51:47 +00:00
|
|
|
},
|
2018-06-19 11:11:42 +00:00
|
|
|
{
|
|
|
|
desc: "managed proxy with custom bind port",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
"bind_address": "127.10.10.10",
|
|
|
|
"bind_port": 1234,
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantTCPCheck: "127.10.10.10:1234",
|
|
|
|
wantErr: false,
|
|
|
|
},
|
2018-06-22 06:03:31 +00:00
|
|
|
{
|
|
|
|
// This test is necessary since JSON and HCL both will parse
|
|
|
|
// numbers as a float64.
|
|
|
|
desc: "managed proxy with custom bind port (float64)",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
"bind_address": "127.10.10.10",
|
|
|
|
"bind_port": float64(1234),
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantTCPCheck: "127.10.10.10:1234",
|
|
|
|
wantErr: false,
|
|
|
|
},
|
2018-07-12 12:07:48 +00:00
|
|
|
{
|
|
|
|
desc: "managed proxy with overridden but unspecified ipv6 bind address",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
"bind_address": "[::]",
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantTCPCheck: "127.0.0.1:20000",
|
|
|
|
wantErr: false,
|
|
|
|
},
|
2018-07-12 11:57:10 +00:00
|
|
|
{
|
|
|
|
desc: "managed proxy with overridden check address",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
"tcp_check_address": "127.20.20.20",
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantTCPCheck: "127.20.20.20:20000",
|
|
|
|
wantErr: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "managed proxy with disabled check",
|
|
|
|
proxy: &structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"foo": "bar",
|
|
|
|
"disable_tcp_check": true,
|
|
|
|
},
|
|
|
|
TargetServiceID: "web",
|
|
|
|
},
|
|
|
|
wantTCPCheck: "",
|
|
|
|
wantErr: false,
|
|
|
|
},
|
2018-04-16 15:00:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-07-12 11:57:10 +00:00
|
|
|
node_name = "node1"
|
|
|
|
|
|
|
|
# Explicit test because proxies inheriting this value must have a health
|
|
|
|
# check on a different IP.
|
|
|
|
bind_addr = "0.0.0.0"
|
|
|
|
|
|
|
|
connect {
|
|
|
|
proxy_defaults {
|
|
|
|
exec_mode = "script"
|
|
|
|
daemon_command = ["foo", "bar"]
|
|
|
|
script_command = ["bar", "foo"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ports {
|
|
|
|
proxy_min_port = 20000
|
|
|
|
proxy_max_port = 20000
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register a target service we can use
|
|
|
|
reg := &structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
|
2018-07-12 11:57:10 +00:00
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddProxy(tt.proxy, false, false, "", ConfigSourceLocal)
|
2018-04-16 15:00:20 +00:00
|
|
|
if tt.wantErr {
|
|
|
|
require.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Test the ID was created as we expect.
|
|
|
|
got := a.State.Proxy("web-proxy")
|
2018-05-03 19:51:47 +00:00
|
|
|
wantProxy := tt.wantProxy
|
|
|
|
if wantProxy == nil {
|
|
|
|
wantProxy = tt.proxy
|
|
|
|
}
|
|
|
|
wantProxy.ProxyService = got.Proxy.ProxyService
|
|
|
|
require.Equal(wantProxy, got.Proxy)
|
2018-06-19 11:11:42 +00:00
|
|
|
|
|
|
|
// Ensure a TCP check was created for the service.
|
|
|
|
gotCheck := a.State.Check("service:web-proxy")
|
2018-07-12 11:57:10 +00:00
|
|
|
if tt.wantTCPCheck == "" {
|
|
|
|
require.Nil(gotCheck)
|
|
|
|
} else {
|
|
|
|
require.NotNil(gotCheck)
|
|
|
|
require.Equal("Connect Proxy Listening", gotCheck.Name)
|
|
|
|
|
|
|
|
// Confusingly, a.State.Check("service:web-proxy") will return the state
|
|
|
|
// but it's Definition field will be empty. This appears to be expected
|
|
|
|
// when adding Checks as part of `AddService`. Notice how `AddService`
|
|
|
|
// tests in this file don't assert on that state but instead look at the
|
|
|
|
// agent's check state directly to ensure the right thing was registered.
|
|
|
|
// We'll do the same for now.
|
|
|
|
gotTCP, ok := a.checkTCPs["service:web-proxy"]
|
|
|
|
require.True(ok)
|
|
|
|
require.Equal(tt.wantTCPCheck, gotTCP.TCP)
|
|
|
|
require.Equal(10*time.Second, gotTCP.Interval)
|
2018-06-19 11:11:42 +00:00
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RemoveProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), `
|
2018-04-16 15:00:20 +00:00
|
|
|
node_name = "node1"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Register a target service we can use
|
|
|
|
reg := &structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
// Add a proxy for web
|
|
|
|
pReg := &structs.ConnectManagedProxy{
|
|
|
|
TargetServiceID: "web",
|
2018-05-03 17:44:10 +00:00
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
2018-04-27 05:16:21 +00:00
|
|
|
Command: []string{"foo"},
|
2018-04-16 15:00:20 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddProxy(pReg, false, false, "", ConfigSourceLocal))
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
// Test the ID was created as we expect.
|
|
|
|
gotProxy := a.State.Proxy("web-proxy")
|
2018-06-13 06:21:50 +00:00
|
|
|
require.NotNil(gotProxy)
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
err := a.RemoveProxy("web-proxy", false)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
gotProxy = a.State.Proxy("web-proxy")
|
|
|
|
require.Nil(gotProxy)
|
2018-06-13 06:21:50 +00:00
|
|
|
require.Nil(a.State.Service("web-proxy"), "web-proxy service")
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
// Removing invalid proxy should be an error
|
|
|
|
err = a.RemoveProxy("foobar", false)
|
|
|
|
require.Error(err)
|
|
|
|
}
|
2018-07-17 20:16:43 +00:00
|
|
|
|
|
|
|
func TestAgent_ReLoadProxiesFromConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-02-14 15:59:14 +00:00
|
|
|
a := NewTestAgent(t, t.Name(),
|
2018-07-17 20:16:43 +00:00
|
|
|
`node_name = "node1"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Register a target service we can use
|
|
|
|
reg := &structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
|
2018-07-17 20:16:43 +00:00
|
|
|
|
|
|
|
proxies := a.State.Proxies()
|
|
|
|
require.Len(proxies, 0)
|
|
|
|
|
|
|
|
config := config.RuntimeConfig{
|
|
|
|
Services: []*structs.ServiceDefinition{
|
|
|
|
&structs.ServiceDefinition{
|
|
|
|
Name: "web",
|
|
|
|
Connect: &structs.ServiceConnect{
|
|
|
|
Native: false,
|
|
|
|
Proxy: &structs.ServiceDefinitionConnectProxy{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(a.loadProxies(&config))
|
|
|
|
|
|
|
|
// ensure we loaded the proxy
|
|
|
|
proxies = a.State.Proxies()
|
|
|
|
require.Len(proxies, 1)
|
|
|
|
|
|
|
|
// store the auto-generated token
|
|
|
|
ptok := ""
|
|
|
|
pid := ""
|
|
|
|
for id := range proxies {
|
|
|
|
pid = id
|
|
|
|
ptok = proxies[id].ProxyToken
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// reload the proxies and ensure the proxy token is the same
|
|
|
|
require.NoError(a.unloadProxies())
|
2018-07-19 13:20:51 +00:00
|
|
|
proxies = a.State.Proxies()
|
|
|
|
require.Len(proxies, 0)
|
2018-07-17 20:16:43 +00:00
|
|
|
require.NoError(a.loadProxies(&config))
|
2018-07-19 13:20:51 +00:00
|
|
|
proxies = a.State.Proxies()
|
2018-07-17 20:16:43 +00:00
|
|
|
require.Len(proxies, 1)
|
|
|
|
require.Equal(ptok, proxies[pid].ProxyToken)
|
|
|
|
|
|
|
|
// make sure when the config goes away so does the proxy
|
|
|
|
require.NoError(a.unloadProxies())
|
2018-07-19 13:20:51 +00:00
|
|
|
proxies = a.State.Proxies()
|
|
|
|
require.Len(proxies, 0)
|
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
// a.config contains no services or proxies
|
|
|
|
require.NoError(a.loadProxies(a.config))
|
2018-07-19 13:20:51 +00:00
|
|
|
proxies = a.State.Proxies()
|
2018-07-17 20:16:43 +00:00
|
|
|
require.Len(proxies, 0)
|
|
|
|
}
|
2018-09-13 14:06:04 +00:00
|
|
|
|
|
|
|
func TestAgent_SetupProxyManager(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
hcl := `
|
|
|
|
ports { http = -1 }
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
`
|
2019-03-13 09:29:06 +00:00
|
|
|
a, err := NewUnstartedAgent(t, t.Name(), hcl)
|
2018-09-13 14:06:04 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Error(t, a.setupProxyManager(), "setupProxyManager should fail with invalid HTTP API config")
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
ports { http = 8001 }
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
`
|
2019-03-13 09:29:06 +00:00
|
|
|
a, err = NewUnstartedAgent(t, t.Name(), hcl)
|
2018-09-13 14:06:04 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, a.setupProxyManager())
|
|
|
|
}
|
2019-02-27 19:28:31 +00:00
|
|
|
|
|
|
|
func TestAgent_loadTokens(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, t.Name(), `
|
|
|
|
acl = {
|
|
|
|
enabled = true
|
|
|
|
tokens = {
|
|
|
|
agent = "alfa"
|
|
|
|
agent_master = "bravo",
|
|
|
|
default = "charlie"
|
|
|
|
replication = "delta"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
tokensFullPath := filepath.Join(a.config.DataDir, tokensPath)
|
|
|
|
|
|
|
|
t.Run("original-configuration", func(t *testing.T) {
|
|
|
|
require.Equal("alfa", a.tokens.AgentToken())
|
|
|
|
require.Equal("bravo", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("charlie", a.tokens.UserToken())
|
|
|
|
require.Equal("delta", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("updated-configuration", func(t *testing.T) {
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "echo",
|
|
|
|
ACLAgentToken: "foxtrot",
|
|
|
|
ACLAgentMasterToken: "golf",
|
|
|
|
ACLReplicationToken: "hotel",
|
|
|
|
}
|
|
|
|
// ensures no error for missing persisted tokens file
|
|
|
|
require.NoError(a.loadTokens(cfg))
|
|
|
|
require.Equal("echo", a.tokens.UserToken())
|
|
|
|
require.Equal("foxtrot", a.tokens.AgentToken())
|
|
|
|
require.Equal("golf", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("hotel", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("persisted-tokens", func(t *testing.T) {
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "echo",
|
|
|
|
ACLAgentToken: "foxtrot",
|
|
|
|
ACLAgentMasterToken: "golf",
|
|
|
|
ACLReplicationToken: "hotel",
|
|
|
|
}
|
|
|
|
|
|
|
|
tokens := `{
|
|
|
|
"agent" : "india",
|
|
|
|
"agent_master" : "juliett",
|
|
|
|
"default": "kilo",
|
|
|
|
"replication" : "lima"
|
|
|
|
}`
|
|
|
|
|
|
|
|
require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600))
|
|
|
|
require.NoError(a.loadTokens(cfg))
|
|
|
|
|
|
|
|
// no updates since token persistence is not enabled
|
|
|
|
require.Equal("echo", a.tokens.UserToken())
|
|
|
|
require.Equal("foxtrot", a.tokens.AgentToken())
|
|
|
|
require.Equal("golf", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("hotel", a.tokens.ReplicationToken())
|
|
|
|
|
|
|
|
a.config.ACLEnableTokenPersistence = true
|
|
|
|
require.NoError(a.loadTokens(cfg))
|
|
|
|
|
|
|
|
require.Equal("india", a.tokens.AgentToken())
|
|
|
|
require.Equal("juliett", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("kilo", a.tokens.UserToken())
|
|
|
|
require.Equal("lima", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("persisted-tokens-override", func(t *testing.T) {
|
|
|
|
tokens := `{
|
|
|
|
"agent" : "mike",
|
|
|
|
"agent_master" : "november",
|
|
|
|
"default": "oscar",
|
|
|
|
"replication" : "papa"
|
|
|
|
}`
|
|
|
|
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "quebec",
|
|
|
|
ACLAgentToken: "romeo",
|
|
|
|
ACLAgentMasterToken: "sierra",
|
|
|
|
ACLReplicationToken: "tango",
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600))
|
|
|
|
require.NoError(a.loadTokens(cfg))
|
|
|
|
|
|
|
|
require.Equal("mike", a.tokens.AgentToken())
|
|
|
|
require.Equal("november", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("oscar", a.tokens.UserToken())
|
|
|
|
require.Equal("papa", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("partial-persisted", func(t *testing.T) {
|
|
|
|
tokens := `{
|
|
|
|
"agent" : "uniform",
|
|
|
|
"agent_master" : "victor"
|
|
|
|
}`
|
|
|
|
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "whiskey",
|
|
|
|
ACLAgentToken: "xray",
|
|
|
|
ACLAgentMasterToken: "yankee",
|
|
|
|
ACLReplicationToken: "zulu",
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600))
|
|
|
|
require.NoError(a.loadTokens(cfg))
|
|
|
|
|
|
|
|
require.Equal("uniform", a.tokens.AgentToken())
|
|
|
|
require.Equal("victor", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("whiskey", a.tokens.UserToken())
|
|
|
|
require.Equal("zulu", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("persistence-error-not-json", func(t *testing.T) {
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "one",
|
|
|
|
ACLAgentToken: "two",
|
|
|
|
ACLAgentMasterToken: "three",
|
|
|
|
ACLReplicationToken: "four",
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(ioutil.WriteFile(tokensFullPath, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600))
|
|
|
|
err := a.loadTokens(cfg)
|
|
|
|
require.Error(err)
|
|
|
|
|
|
|
|
require.Equal("one", a.tokens.UserToken())
|
|
|
|
require.Equal("two", a.tokens.AgentToken())
|
|
|
|
require.Equal("three", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("four", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("persistence-error-wrong-top-level", func(t *testing.T) {
|
|
|
|
cfg := &config.RuntimeConfig{
|
|
|
|
ACLToken: "alfa",
|
|
|
|
ACLAgentToken: "bravo",
|
|
|
|
ACLAgentMasterToken: "charlie",
|
|
|
|
ACLReplicationToken: "foxtrot",
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(ioutil.WriteFile(tokensFullPath, []byte("[1,2,3]"), 0600))
|
|
|
|
err := a.loadTokens(cfg)
|
|
|
|
require.Error(err)
|
|
|
|
|
|
|
|
require.Equal("alfa", a.tokens.UserToken())
|
|
|
|
require.Equal("bravo", a.tokens.AgentToken())
|
|
|
|
require.Equal("charlie", a.tokens.AgentMasterToken())
|
|
|
|
require.Equal("foxtrot", a.tokens.ReplicationToken())
|
|
|
|
})
|
|
|
|
}
|
2019-03-13 09:29:06 +00:00
|
|
|
|
|
|
|
func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2019-03-18 16:06:16 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), hcl)
|
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.OutgoingRPCConfig()
|
|
|
|
require.True(t, tlsConf.InsecureSkipVerify)
|
|
|
|
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
|
|
|
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_path = "../test/ca_path"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = true
|
|
|
|
`
|
|
|
|
c := TestConfig(config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
|
|
|
require.NoError(t, a.ReloadConfig(c))
|
|
|
|
tlsConf = a.tlsConfigurator.OutgoingRPCConfig()
|
|
|
|
require.False(t, tlsConf.InsecureSkipVerify)
|
|
|
|
require.Len(t, tlsConf.RootCAs.Subjects(), 2)
|
|
|
|
require.Len(t, tlsConf.ClientCAs.Subjects(), 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2019-03-18 16:06:16 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), hcl)
|
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.IncomingRPCConfig()
|
|
|
|
require.NotNil(t, tlsConf.GetConfigForClient)
|
2019-03-18 16:06:16 +00:00
|
|
|
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
2019-03-13 09:29:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, tlsConf)
|
|
|
|
require.True(t, tlsConf.InsecureSkipVerify)
|
|
|
|
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
|
|
|
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_path = "../test/ca_path"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = true
|
|
|
|
`
|
|
|
|
c := TestConfig(config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
|
|
|
require.NoError(t, a.ReloadConfig(c))
|
|
|
|
tlsConf, err = tlsConf.GetConfigForClient(nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, tlsConf.InsecureSkipVerify)
|
|
|
|
require.Len(t, tlsConf.ClientCAs.Subjects(), 2)
|
|
|
|
require.Len(t, tlsConf.RootCAs.Subjects(), 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2019-03-18 16:06:16 +00:00
|
|
|
a := NewTestAgent(t, t.Name(), hcl)
|
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.IncomingRPCConfig()
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_incoming = true
|
|
|
|
`
|
|
|
|
c := TestConfig(config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
|
|
|
require.Error(t, a.ReloadConfig(c))
|
2019-03-18 16:06:16 +00:00
|
|
|
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
2019-03-13 09:29:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tls.NoClientCert, tlsConf.ClientAuth)
|
|
|
|
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
|
|
|
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
|
|
|
}
|
2019-06-27 20:22:07 +00:00
|
|
|
|
|
|
|
func TestAgent_consulConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
defer os.RemoveAll(dataDir)
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_incoming = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
auto_encrypt { allow_tls = true }
|
|
|
|
`
|
|
|
|
a := NewTestAgent(t, t.Name(), hcl)
|
|
|
|
defer a.Shutdown()
|
|
|
|
require.True(t, a.consulConfig().AutoEncryptAllowTLS)
|
|
|
|
}
|