2015-07-06 20:23:11 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2019-12-06 20:46:46 +00:00
|
|
|
"errors"
|
2016-02-22 02:51:34 +00:00
|
|
|
"fmt"
|
2018-03-16 22:53:14 +00:00
|
|
|
"net"
|
2015-07-06 21:38:57 +00:00
|
|
|
"reflect"
|
2016-08-16 06:11:57 +00:00
|
|
|
"strings"
|
2015-07-06 20:23:11 +00:00
|
|
|
"testing"
|
2015-08-23 02:17:49 +00:00
|
|
|
"time"
|
2015-07-06 20:23:11 +00:00
|
|
|
|
2017-02-08 05:22:48 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2019-01-15 19:46:12 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
2021-05-07 17:58:40 +00:00
|
|
|
vapi "github.com/hashicorp/vault/api"
|
|
|
|
"github.com/kr/pretty"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2017-09-15 03:33:31 +00:00
|
|
|
"github.com/hashicorp/nomad/acl"
|
2019-12-06 20:46:46 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
|
|
|
"github.com/hashicorp/nomad/helper"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-11 21:27:14 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2018-05-12 00:26:25 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-07-06 20:23:11 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestClientEndpoint_Register(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2018-01-05 21:50:04 +00:00
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-07-06 20:23:11 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have no client connections
|
|
|
|
require.Empty(s1.connectedNodes())
|
|
|
|
|
2015-07-06 20:23:11 +00:00
|
|
|
// Create the register request
|
2015-08-11 21:27:14 +00:00
|
|
|
node := mock.Node()
|
2015-07-07 16:51:42 +00:00
|
|
|
req := &structs.NodeRegisterRequest{
|
2015-07-06 20:23:11 +00:00
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 20:23:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
|
2015-07-06 20:23:11 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-07-06 20:34:32 +00:00
|
|
|
if resp.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp.Index)
|
|
|
|
}
|
2015-07-06 20:23:11 +00:00
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have the client connections
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
require.Len(nodes, 1)
|
2018-01-12 23:57:07 +00:00
|
|
|
require.Contains(nodes, node.ID)
|
2018-01-05 21:50:04 +00:00
|
|
|
|
2015-07-06 20:23:11 +00:00
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2016-08-16 06:11:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("expected node")
|
|
|
|
}
|
|
|
|
if out.CreateIndex != resp.Index {
|
|
|
|
t.Fatalf("index mis-match")
|
|
|
|
}
|
|
|
|
if out.ComputedClass == "" {
|
|
|
|
t.Fatal("ComputedClass not set")
|
|
|
|
}
|
2018-01-05 21:50:04 +00:00
|
|
|
|
|
|
|
// Close the connection and check that we remove the client connections
|
|
|
|
require.Nil(codec.Close())
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
return len(nodes) == 0, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have no clients")
|
|
|
|
})
|
2016-08-16 06:11:57 +00:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:10:43 +00:00
|
|
|
// This test asserts that we only track node connections if they are not from
|
|
|
|
// forwarded RPCs. This is essential otherwise we will think a Yamux session to
|
|
|
|
// a Nomad server is actually the session to the node.
|
|
|
|
func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {
|
2018-03-01 22:17:33 +00:00
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2018-03-27 01:10:43 +00:00
|
|
|
c.BootstrapExpect = 2
|
|
|
|
})
|
2018-03-01 22:17:33 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2020-03-02 15:29:24 +00:00
|
|
|
c.BootstrapExpect = 2
|
2018-03-27 01:10:43 +00:00
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
2018-03-27 01:10:43 +00:00
|
|
|
TestJoin(t, s1, s2)
|
2018-03-01 22:17:33 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2018-03-27 01:10:43 +00:00
|
|
|
testutil.WaitForLeader(t, s2.RPC)
|
2018-03-01 22:17:33 +00:00
|
|
|
|
2018-03-27 01:10:43 +00:00
|
|
|
// Determine the non-leader server
|
|
|
|
var leader, nonLeader *Server
|
|
|
|
if s1.IsLeader() {
|
|
|
|
leader = s1
|
|
|
|
nonLeader = s2
|
|
|
|
} else {
|
|
|
|
leader = s2
|
|
|
|
nonLeader = s1
|
2018-03-01 22:17:33 +00:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:10:43 +00:00
|
|
|
// Send the requests to the non-leader
|
|
|
|
codec := rpcClient(t, nonLeader)
|
|
|
|
|
|
|
|
// Check that we have no client connections
|
|
|
|
require.Empty(nonLeader.connectedNodes())
|
|
|
|
require.Empty(leader.connectedNodes())
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
req := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2018-03-01 22:17:33 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
2018-03-27 01:10:43 +00:00
|
|
|
// Fetch the response
|
2018-03-01 22:17:33 +00:00
|
|
|
var resp structs.GenericResponse
|
2018-03-27 01:10:43 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we have the client connections on the non leader
|
|
|
|
nodes := nonLeader.connectedNodes()
|
|
|
|
require.Len(nodes, 1)
|
|
|
|
require.Contains(nodes, node.ID)
|
|
|
|
|
|
|
|
// Check that we have no client connections on the leader
|
|
|
|
nodes = leader.connectedNodes()
|
|
|
|
require.Empty(nodes)
|
2018-03-08 19:23:34 +00:00
|
|
|
|
|
|
|
// Check for the node in the FSM
|
2018-03-27 01:10:43 +00:00
|
|
|
state := leader.State()
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
return false, fmt.Errorf("expected node")
|
|
|
|
}
|
|
|
|
if out.CreateIndex != resp.Index {
|
|
|
|
return false, fmt.Errorf("index mis-match")
|
|
|
|
}
|
|
|
|
if out.ComputedClass == "" {
|
|
|
|
return false, fmt.Errorf("ComputedClass not set")
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Close the connection and check that we remove the client connections
|
|
|
|
require.Nil(codec.Close())
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
nodes := nonLeader.connectedNodes()
|
|
|
|
return len(nodes) == 0, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have no clients")
|
|
|
|
})
|
2018-03-01 22:17:33 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 17:50:49 +00:00
|
|
|
func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-19 17:50:49 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
req := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the nodes SecretID
|
2017-09-29 16:58:48 +00:00
|
|
|
node.SecretID = uuid.Generate()
|
2016-08-19 17:50:49 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Not registering") {
|
2017-02-28 00:00:19 +00:00
|
|
|
t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
|
2016-08-19 17:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-13 13:46:05 +00:00
|
|
|
// Test the deprecated single node deregistration path
|
|
|
|
func TestClientEndpoint_DeregisterOne(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-07-06 20:42:33 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
2015-08-11 21:27:14 +00:00
|
|
|
node := mock.Node()
|
2015-07-07 16:51:42 +00:00
|
|
|
reg := &structs.NodeRegisterRequest{
|
2015-07-06 20:42:33 +00:00
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 20:42:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-07-06 20:42:33 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deregister
|
2015-07-07 16:51:42 +00:00
|
|
|
dereg := &structs.NodeDeregisterRequest{
|
2019-06-13 13:46:05 +00:00
|
|
|
NodeID: node.ID,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 20:42:33 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
|
2015-07-06 20:42:33 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2015-07-06 20:42:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("unexpected node")
|
|
|
|
}
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
2017-10-26 21:12:17 +00:00
|
|
|
func TestClientEndpoint_Deregister_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-10-26 21:12:17 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
|
|
|
node1 := mock.Node()
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node); err != nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node1); err != nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the policy and tokens
|
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
|
|
|
|
|
|
|
|
// Deregister without any token and expect it to fail
|
2019-06-26 14:57:58 +00:00
|
|
|
dereg := &structs.NodeBatchDeregisterRequest{
|
2019-06-05 14:49:57 +00:00
|
|
|
NodeIDs: []string{node.ID},
|
2017-10-26 21:12:17 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp structs.GenericResponse
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err == nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("node de-register succeeded")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deregister with a valid token
|
|
|
|
dereg.AuthToken = validToken.SecretID
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err != nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("unexpected node")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deregister with an invalid token.
|
2019-06-26 14:57:58 +00:00
|
|
|
dereg1 := &structs.NodeBatchDeregisterRequest{
|
2019-06-05 14:49:57 +00:00
|
|
|
NodeIDs: []string{node1.ID},
|
2017-10-26 21:12:17 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
dereg1.AuthToken = invalidToken.SecretID
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err == nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("rpc should not have succeeded")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
|
|
|
dereg1.AuthToken = root.SecretID
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err != nil {
|
2017-10-26 21:12:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2015-07-06 20:42:33 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
func TestClientEndpoint_Deregister_Vault(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-22 20:57:27 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swap the servers Vault Client
|
|
|
|
tvc := &TestVaultClient{}
|
|
|
|
s1.vault = tvc
|
|
|
|
|
|
|
|
// Put some Vault accessors in the state store for that node
|
|
|
|
state := s1.fsm.State()
|
|
|
|
va1 := mock.VaultAccessor()
|
|
|
|
va1.NodeID = node.ID
|
|
|
|
va2 := mock.VaultAccessor()
|
|
|
|
va2.NodeID = node.ID
|
|
|
|
state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
|
|
|
|
|
|
|
|
// Deregister
|
2019-06-26 14:57:58 +00:00
|
|
|
dereg := &structs.NodeBatchDeregisterRequest{
|
2019-06-05 14:49:57 +00:00
|
|
|
NodeIDs: []string{node.ID},
|
2016-08-22 20:57:27 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.GenericResponse
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp2); err != nil {
|
2016-08-22 20:57:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("unexpected node")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the endpoint revoked the tokens
|
|
|
|
if l := len(tvc.RevokedTokens); l != 2 {
|
|
|
|
t.Fatalf("Deregister revoked %d tokens; want 2", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2018-01-05 21:50:04 +00:00
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-07-06 20:50:40 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have no client connections
|
|
|
|
require.Empty(s1.connectedNodes())
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
// Create the register request
|
2015-08-11 21:27:14 +00:00
|
|
|
node := mock.Node()
|
2015-07-07 16:51:42 +00:00
|
|
|
reg := &structs.NodeRegisterRequest{
|
2015-07-06 20:50:40 +00:00
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 20:50:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
2015-08-23 00:49:48 +00:00
|
|
|
var resp structs.NodeUpdateResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-07-06 20:50:40 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Check for heartbeat interval
|
2015-08-23 01:16:05 +00:00
|
|
|
ttl := resp.HeartbeatTTL
|
2015-08-31 01:10:12 +00:00
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
2015-08-23 01:16:05 +00:00
|
|
|
t.Fatalf("bad: %#v", ttl)
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
// Update the status
|
2015-07-07 16:51:42 +00:00
|
|
|
dereg := &structs.NodeUpdateStatusRequest{
|
2015-07-06 20:50:40 +00:00
|
|
|
NodeID: node.ID,
|
2015-09-07 02:47:02 +00:00
|
|
|
Status: structs.NodeStatusInit,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 20:50:40 +00:00
|
|
|
}
|
2015-08-23 00:49:48 +00:00
|
|
|
var resp2 structs.NodeUpdateResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
|
2015-07-06 20:50:40 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Check for heartbeat interval
|
2015-08-23 01:16:05 +00:00
|
|
|
ttl = resp2.HeartbeatTTL
|
2015-08-31 01:10:12 +00:00
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
2015-08-23 01:16:05 +00:00
|
|
|
t.Fatalf("bad: %#v", ttl)
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have the client connections
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
require.Len(nodes, 1)
|
2018-01-12 23:57:07 +00:00
|
|
|
require.Contains(nodes, node.ID)
|
2018-01-05 21:50:04 +00:00
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2015-07-06 20:50:40 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("expected node")
|
|
|
|
}
|
|
|
|
if out.ModifyIndex != resp2.Index {
|
|
|
|
t.Fatalf("index mis-match")
|
|
|
|
}
|
2018-01-05 21:50:04 +00:00
|
|
|
|
|
|
|
// Close the connection and check that we remove the client connections
|
|
|
|
require.Nil(codec.Close())
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
return len(nodes) == 0, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have no clients")
|
|
|
|
})
|
2015-07-06 20:50:40 +00:00
|
|
|
}
|
2015-07-06 21:38:57 +00:00
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-22 20:57:27 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl := resp.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swap the servers Vault Client
|
|
|
|
tvc := &TestVaultClient{}
|
|
|
|
s1.vault = tvc
|
|
|
|
|
|
|
|
// Put some Vault accessors in the state store for that node
|
|
|
|
state := s1.fsm.State()
|
|
|
|
va1 := mock.VaultAccessor()
|
|
|
|
va1.NodeID = node.ID
|
|
|
|
va2 := mock.VaultAccessor()
|
|
|
|
va2.NodeID = node.ID
|
|
|
|
state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
|
|
|
|
|
|
|
|
// Update the status to be down
|
|
|
|
dereg := &structs.NodeUpdateStatusRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.NodeStatusDown,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the endpoint revoked the tokens
|
|
|
|
if l := len(tvc.RevokedTokens); l != 2 {
|
|
|
|
t.Fatalf("Deregister revoked %d tokens; want 2", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-12 00:26:25 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-05-12 00:26:25 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Check that we have no client connections
|
|
|
|
require.Empty(s1.connectedNodes())
|
|
|
|
|
|
|
|
// Create the register request but make the node down
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
require.NoError(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
|
|
|
|
// Update the status
|
|
|
|
dereg := &structs.NodeUpdateStatusRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.NodeStatusInit,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeUpdateResponse
|
|
|
|
require.NoError(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2))
|
|
|
|
require.NotZero(resp2.Index)
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl := resp2.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotNil(out)
|
|
|
|
require.EqualValues(resp2.Index, out.ModifyIndex)
|
|
|
|
require.Len(out.Events, 2)
|
|
|
|
require.Equal(NodeHeartbeatEventReregistered, out.Events[1].Message)
|
|
|
|
}
|
|
|
|
|
2016-07-21 22:22:02 +00:00
|
|
|
func TestClientEndpoint_Register_GetEvals(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-07-21 22:22:02 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Register a system job.
|
|
|
|
job := mock.SystemJob()
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil {
|
2016-07-21 22:22:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the register request going directly to ready
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusReady
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl := resp.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for an eval caused by the system job.
|
|
|
|
if len(resp.EvalIDs) != 1 {
|
|
|
|
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
evalID := resp.EvalIDs[0]
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
eval, err := state.EvalByID(ws, evalID)
|
2016-07-21 22:22:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("could not get eval %v", evalID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if eval.Type != "system" {
|
|
|
|
t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
2017-02-08 05:22:48 +00:00
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2016-07-21 22:22:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("expected node")
|
|
|
|
}
|
|
|
|
if out.ModifyIndex != resp.Index {
|
|
|
|
t.Fatalf("index mis-match")
|
|
|
|
}
|
2016-07-25 19:46:18 +00:00
|
|
|
|
2017-08-07 21:13:05 +00:00
|
|
|
// Transition it to down and then ready
|
2016-07-25 19:46:18 +00:00
|
|
|
node.Status = structs.NodeStatusDown
|
|
|
|
reg = &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp.EvalIDs) != 1 {
|
|
|
|
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
node.Status = structs.NodeStatusReady
|
|
|
|
reg = &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp.EvalIDs) != 1 {
|
|
|
|
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
|
|
|
|
}
|
2016-07-21 22:22:02 +00:00
|
|
|
}
|
|
|
|
|
2015-10-20 18:33:37 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-10-20 18:33:37 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Register a system job.
|
|
|
|
job := mock.SystemJob()
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil {
|
2015-10-20 18:33:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusInit
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl := resp.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status
|
|
|
|
update := &structs.NodeUpdateStatusRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.NodeStatusReady,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for an eval caused by the system job.
|
|
|
|
if len(resp2.EvalIDs) != 1 {
|
|
|
|
t.Fatalf("expected one eval; got %#v", resp2.EvalIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
evalID := resp2.EvalIDs[0]
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
eval, err := state.EvalByID(ws, evalID)
|
2015-10-20 18:33:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("could not get eval %v", evalID)
|
|
|
|
}
|
|
|
|
|
2015-10-23 23:32:45 +00:00
|
|
|
if eval.Type != "system" {
|
|
|
|
t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
|
2015-10-20 18:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl = resp2.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
2017-02-08 05:22:48 +00:00
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2015-10-20 18:33:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("expected node")
|
|
|
|
}
|
|
|
|
if out.ModifyIndex != resp2.Index {
|
|
|
|
t.Fatalf("index mis-match")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2016-06-01 10:47:19 +00:00
|
|
|
|
2020-03-02 15:29:24 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 3
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
|
|
|
|
|
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
2020-03-02 15:29:24 +00:00
|
|
|
c.BootstrapExpect = 3
|
2016-06-01 10:47:19 +00:00
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS2()
|
2016-06-01 10:47:19 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
2020-03-02 15:29:24 +00:00
|
|
|
c.BootstrapExpect = 3
|
2016-06-01 10:47:19 +00:00
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS3()
|
2016-06-01 10:47:19 +00:00
|
|
|
servers := []*Server{s1, s2, s3}
|
2018-01-12 01:00:30 +00:00
|
|
|
TestJoin(t, s1, s2, s3)
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-02 21:52:31 +00:00
|
|
|
peers, _ := s.numPeers()
|
2017-02-03 01:50:06 +00:00
|
|
|
return peers == 3, nil
|
2016-06-01 10:47:19 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have 3 peers")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-08-23 00:49:48 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
2015-08-23 01:16:05 +00:00
|
|
|
ttl := resp.HeartbeatTTL
|
2015-08-31 01:10:12 +00:00
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
2015-08-23 01:16:05 +00:00
|
|
|
t.Fatalf("bad: %#v", ttl)
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
|
2016-05-23 18:09:31 +00:00
|
|
|
// Check for heartbeat servers
|
2016-06-01 10:47:19 +00:00
|
|
|
serverAddrs := resp.Servers
|
|
|
|
if len(serverAddrs) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", serverAddrs)
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Update the status, static state
|
|
|
|
dereg := &structs.NodeUpdateStatusRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: node.Status,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.NodeUpdateResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
|
2015-08-23 00:49:48 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
2015-08-23 01:16:05 +00:00
|
|
|
ttl = resp2.HeartbeatTTL
|
2015-08-31 01:10:12 +00:00
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
2015-08-23 01:16:05 +00:00
|
|
|
t.Fatalf("bad: %#v", ttl)
|
2015-08-23 00:49:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-16 22:53:14 +00:00
|
|
|
func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
advAddr := "127.0.1.1:1234"
|
|
|
|
adv, err := net.ResolveTCPAddr("tcp", advAddr)
|
|
|
|
require.Nil(err)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2018-03-16 22:53:14 +00:00
|
|
|
c.ClientRPCAdvertise = adv
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
2018-03-16 22:53:14 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat interval
|
|
|
|
ttl := resp.HeartbeatTTL
|
|
|
|
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
|
|
|
|
t.Fatalf("bad: %#v", ttl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for heartbeat servers
|
|
|
|
require.Len(resp.Servers, 1)
|
|
|
|
require.Equal(resp.Servers[0].RPCAdvertiseAddr, advAddr)
|
|
|
|
}
|
|
|
|
|
2021-05-07 17:58:40 +00:00
|
|
|
// TestClientEndpoint_UpdateDrain asserts the ability to initiate drain
|
|
|
|
// against a node and cancel that drain. It also asserts:
|
|
|
|
// * an evaluation is created when the node becomes eligible
|
|
|
|
// * drain metadata is properly persisted in Node.LastDrain
|
2015-09-07 03:00:12 +00:00
|
|
|
func TestClientEndpoint_UpdateDrain(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2018-02-23 18:42:43 +00:00
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-09-07 03:00:12 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2018-03-20 22:42:04 +00:00
|
|
|
// Disable drainer to prevent drain from completing during test
|
|
|
|
s1.nodeDrainer.SetEnabled(false, nil)
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
|
2018-03-20 22:42:04 +00:00
|
|
|
beforeUpdate := time.Now()
|
2018-02-23 18:42:43 +00:00
|
|
|
strategy := &structs.DrainStrategy{
|
2018-02-26 22:34:32 +00:00
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
},
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status
|
|
|
|
dereg := &structs.NodeUpdateDrainRequest{
|
2018-02-23 18:42:43 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
DrainStrategy: strategy,
|
2021-05-07 17:58:40 +00:00
|
|
|
Meta: map[string]string{"message": "this node is not needed"},
|
2018-02-23 18:42:43 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.NodeDrainUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
|
|
|
|
require.NotZero(resp2.Index)
|
2015-09-07 03:00:12 +00:00
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(err)
|
2021-02-11 15:40:59 +00:00
|
|
|
require.NotNil(out.DrainStrategy)
|
2018-03-20 22:42:04 +00:00
|
|
|
require.Equal(strategy.Deadline, out.DrainStrategy.Deadline)
|
2018-05-10 23:54:43 +00:00
|
|
|
require.Len(out.Events, 2)
|
|
|
|
require.Equal(NodeDrainEventDrainSet, out.Events[1].Message)
|
2021-05-07 17:58:40 +00:00
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
require.Equal(structs.DrainMetadata{
|
|
|
|
StartedAt: out.LastDrain.UpdatedAt,
|
|
|
|
UpdatedAt: out.LastDrain.StartedAt,
|
|
|
|
Status: structs.DrainStatusDraining,
|
|
|
|
Meta: map[string]string{"message": "this node is not needed"},
|
|
|
|
}, *out.LastDrain)
|
2018-03-27 22:53:24 +00:00
|
|
|
|
2018-03-20 22:42:04 +00:00
|
|
|
// before+deadline should be before the forced deadline
|
|
|
|
require.True(beforeUpdate.Add(strategy.Deadline).Before(out.DrainStrategy.ForceDeadline))
|
2018-03-27 22:53:24 +00:00
|
|
|
|
2018-03-20 22:42:04 +00:00
|
|
|
// now+deadline should be after the forced deadline
|
|
|
|
require.True(time.Now().Add(strategy.Deadline).After(out.DrainStrategy.ForceDeadline))
|
2018-03-27 22:53:24 +00:00
|
|
|
|
2019-11-13 22:26:35 +00:00
|
|
|
drainStartedAt := out.DrainStrategy.StartedAt
|
|
|
|
// StartedAt should be close to the time the drain started
|
2019-11-14 21:06:09 +00:00
|
|
|
require.WithinDuration(beforeUpdate, drainStartedAt, 1*time.Second)
|
2019-11-13 22:26:35 +00:00
|
|
|
|
|
|
|
// StartedAt shouldn't change if a new request comes while still draining
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
|
|
|
|
ws = memdb.NewWatchSet()
|
|
|
|
out, err = state.NodeByID(ws, node.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.True(out.DrainStrategy.StartedAt.Equal(drainStartedAt))
|
|
|
|
|
2018-03-27 22:53:24 +00:00
|
|
|
// Register a system job
|
|
|
|
job := mock.SystemJob()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job))
|
2018-03-27 22:53:24 +00:00
|
|
|
|
|
|
|
// Update the eligibility and expect evals
|
|
|
|
dereg.DrainStrategy = nil
|
|
|
|
dereg.MarkEligible = true
|
2021-05-07 17:58:40 +00:00
|
|
|
dereg.Meta = map[string]string{"cancelled": "yes"}
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp3 structs.NodeDrainUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
|
|
|
|
require.NotZero(resp3.Index)
|
|
|
|
require.NotZero(resp3.EvalCreateIndex)
|
|
|
|
require.Len(resp3.EvalIDs, 1)
|
2018-06-06 18:02:10 +00:00
|
|
|
|
|
|
|
// Check for updated node in the FSM
|
|
|
|
ws = memdb.NewWatchSet()
|
|
|
|
out, err = state.NodeByID(ws, node.ID)
|
2018-06-06 18:08:42 +00:00
|
|
|
require.NoError(err)
|
2019-11-13 22:26:35 +00:00
|
|
|
require.Len(out.Events, 4)
|
|
|
|
require.Equal(NodeDrainEventDrainDisabled, out.Events[3].Message)
|
2021-05-07 17:58:40 +00:00
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
require.False(out.LastDrain.UpdatedAt.Before(out.LastDrain.StartedAt))
|
|
|
|
require.Equal(structs.DrainMetadata{
|
|
|
|
StartedAt: out.LastDrain.StartedAt,
|
|
|
|
UpdatedAt: out.LastDrain.UpdatedAt,
|
|
|
|
Status: structs.DrainStatusCanceled,
|
|
|
|
Meta: map[string]string{"cancelled": "yes"},
|
|
|
|
}, *out.LastDrain)
|
2018-06-06 18:02:10 +00:00
|
|
|
|
|
|
|
// Check that calling UpdateDrain with the same DrainStrategy does not emit
|
|
|
|
// a node event.
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
|
|
|
|
ws = memdb.NewWatchSet()
|
|
|
|
out, err = state.NodeByID(ws, node.ID)
|
2018-06-06 18:08:42 +00:00
|
|
|
require.NoError(err)
|
2019-11-13 22:26:35 +00:00
|
|
|
require.Len(out.Events, 4)
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
|
|
|
|
2021-05-07 17:58:40 +00:00
|
|
|
// TestClientEndpoint_UpdatedDrainAndCompleted asserts that drain metadata
|
|
|
|
// is properly persisted in Node.LastDrain as the node drain is updated and
|
|
|
|
// completes.
|
|
|
|
func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
|
|
|
// Disable drainer for now
|
|
|
|
s1.nodeDrainer.SetEnabled(false, nil)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
|
|
|
|
strategy := &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status
|
|
|
|
dereg := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
DrainStrategy: strategy,
|
|
|
|
Meta: map[string]string{
|
|
|
|
"message": "first drain",
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeDrainUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
|
|
|
|
require.NotZero(resp2.Index)
|
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(out.DrainStrategy)
|
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
firstDrainUpdate := out.LastDrain.UpdatedAt
|
|
|
|
require.Equal(structs.DrainMetadata{
|
|
|
|
StartedAt: firstDrainUpdate,
|
|
|
|
UpdatedAt: firstDrainUpdate,
|
|
|
|
Status: structs.DrainStatusDraining,
|
|
|
|
Meta: map[string]string{"message": "first drain"},
|
|
|
|
}, *out.LastDrain)
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
// Update the drain
|
|
|
|
dereg.DrainStrategy.DrainSpec.Deadline *= 2
|
|
|
|
dereg.Meta["message"] = "second drain"
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
|
|
|
|
require.NotZero(resp2.Index)
|
|
|
|
|
|
|
|
out, err = state.NodeByID(nil, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(out.DrainStrategy)
|
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
secondDrainUpdate := out.LastDrain.UpdatedAt
|
|
|
|
require.True(secondDrainUpdate.After(firstDrainUpdate))
|
|
|
|
require.Equal(structs.DrainMetadata{
|
|
|
|
StartedAt: firstDrainUpdate,
|
|
|
|
UpdatedAt: secondDrainUpdate,
|
|
|
|
Status: structs.DrainStatusDraining,
|
|
|
|
Meta: map[string]string{"message": "second drain"},
|
|
|
|
}, *out.LastDrain)
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
// Enable the drainer, wait for completion
|
|
|
|
s1.nodeDrainer.SetEnabled(true, state)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, err = state.NodeByID(nil, node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
return false, fmt.Errorf("could not find node")
|
|
|
|
}
|
|
|
|
return out.DrainStrategy == nil, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
require.True(out.LastDrain.UpdatedAt.After(secondDrainUpdate))
|
|
|
|
require.Equal(structs.DrainMetadata{
|
|
|
|
StartedAt: firstDrainUpdate,
|
|
|
|
UpdatedAt: out.LastDrain.UpdatedAt,
|
|
|
|
Status: structs.DrainStatusComplete,
|
|
|
|
Meta: map[string]string{"message": "second drain"},
|
|
|
|
}, *out.LastDrain)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestClientEndpoint_UpdatedDrainNoop asserts that drain metadata is properly
|
|
|
|
// persisted in Node.LastDrain when calls to Node.UpdateDrain() don't affect
|
|
|
|
// the drain status.
|
|
|
|
func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
|
|
|
|
// Update the status
|
|
|
|
dereg := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Meta: map[string]string{
|
|
|
|
"message": "drain",
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &drainResp))
|
|
|
|
require.NotZero(drainResp.Index)
|
|
|
|
|
|
|
|
var out *structs.Node
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
var err error
|
|
|
|
out, err = state.NodeByID(nil, node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
return false, fmt.Errorf("could not find node")
|
|
|
|
}
|
|
|
|
return out.DrainStrategy == nil && out.SchedulingEligibility == structs.NodeSchedulingIneligible, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
require.Equal(structs.DrainStatusComplete, out.LastDrain.Status)
|
|
|
|
require.Equal(map[string]string{"message": "drain"}, out.LastDrain.Meta)
|
|
|
|
prevDrain := out.LastDrain
|
|
|
|
|
|
|
|
// call again with Drain Strategy nil; should be a no-op because drain is already complete
|
|
|
|
dereg.DrainStrategy = nil
|
|
|
|
dereg.Meta = map[string]string{
|
|
|
|
"new_message": "is new",
|
|
|
|
}
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &drainResp))
|
|
|
|
require.NotZero(drainResp.Index)
|
|
|
|
|
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Nil(out.DrainStrategy)
|
|
|
|
require.NotNil(out.LastDrain)
|
|
|
|
require.Equal(prevDrain, out.LastDrain)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestClientEndpoint_UpdateDrain_ACL asserts that Node.UpdateDrain() enforces
|
|
|
|
// node.write ACLs, and that token accessor ID is properly persisted in
|
|
|
|
// Node.LastDrain.AccessorID
|
2017-09-15 03:33:31 +00:00
|
|
|
func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-09-15 03:33:31 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2018-02-23 18:42:43 +00:00
|
|
|
require := require.New(t)
|
2017-09-15 03:33:31 +00:00
|
|
|
|
2017-09-15 03:59:18 +00:00
|
|
|
// Create the node
|
2017-09-15 03:33:31 +00:00
|
|
|
node := mock.Node()
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2017-09-15 03:33:31 +00:00
|
|
|
|
2017-09-15 04:41:26 +00:00
|
|
|
// Create the policy and tokens
|
2017-10-04 22:08:10 +00:00
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
|
2017-09-15 03:33:31 +00:00
|
|
|
|
|
|
|
// Update the status without a token and expect failure
|
|
|
|
dereg := &structs.NodeUpdateDrainRequest{
|
2018-02-23 18:42:43 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
2018-02-26 22:34:32 +00:00
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
},
|
2018-02-23 18:42:43 +00:00
|
|
|
},
|
2017-09-15 03:33:31 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
|
|
|
var resp structs.NodeDrainUpdateResponse
|
2017-09-15 17:41:28 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
|
2018-02-23 18:42:43 +00:00
|
|
|
require.NotNil(err, "RPC")
|
|
|
|
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 03:33:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token
|
2017-10-12 22:16:33 +00:00
|
|
|
dereg.AuthToken = validToken.SecretID
|
2017-09-15 03:33:31 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeDrainUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
|
2021-05-07 17:58:40 +00:00
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(validToken.AccessorID, out.LastDrain.AccessorID)
|
2017-09-15 03:33:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
2017-10-12 22:16:33 +00:00
|
|
|
dereg.AuthToken = invalidToken.SecretID
|
2017-09-15 03:33:31 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeDrainUpdateResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
|
2018-02-23 18:42:43 +00:00
|
|
|
require.NotNil(err, "RPC")
|
|
|
|
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 03:33:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
2021-05-07 17:58:40 +00:00
|
|
|
dereg.DrainStrategy.DrainSpec.Deadline = 20 * time.Second
|
2017-10-12 22:16:33 +00:00
|
|
|
dereg.AuthToken = root.SecretID
|
2017-09-15 03:33:31 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeDrainUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
|
2021-05-07 17:58:40 +00:00
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Equal(root.AccessorID, out.LastDrain.AccessorID)
|
2017-09-15 03:33:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// This test ensures that Nomad marks client state of allocations which are in
|
|
|
|
// pending/running state to lost when a node is marked as down.
|
2016-08-09 17:16:17 +00:00
|
|
|
func TestClientEndpoint_Drain_Down(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-09 17:16:17 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2018-02-23 18:42:43 +00:00
|
|
|
require := require.New(t)
|
2016-08-09 17:16:17 +00:00
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// Register a node
|
2016-08-09 17:16:17 +00:00
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
2016-08-09 17:16:17 +00:00
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// Register a service job
|
2016-08-09 17:16:17 +00:00
|
|
|
var jobResp structs.JobRegisterResponse
|
|
|
|
job := mock.Job()
|
2016-08-09 20:11:58 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2016-08-09 17:16:17 +00:00
|
|
|
jobReq := &structs.JobRegisterRequest{
|
2017-09-07 23:56:15 +00:00
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
2016-08-09 17:16:17 +00:00
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp))
|
2016-08-09 17:16:17 +00:00
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// Register a system job
|
|
|
|
var jobResp1 structs.JobRegisterResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
job1 := mock.SystemJob()
|
2016-08-09 20:11:58 +00:00
|
|
|
job1.TaskGroups[0].Count = 1
|
|
|
|
jobReq1 := &structs.JobRegisterRequest{
|
2017-09-07 23:56:15 +00:00
|
|
|
Job: job1,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job1.Namespace,
|
|
|
|
},
|
2016-08-09 20:11:58 +00:00
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1))
|
2016-08-09 20:11:58 +00:00
|
|
|
|
|
|
|
// Wait for the scheduler to create an allocation
|
2016-08-09 17:16:17 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
|
2016-08-09 17:16:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
|
2016-08-09 20:11:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return len(allocs) > 0 && len(allocs1) > 0, nil
|
2016-08-09 17:16:17 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// Drain the node
|
2016-08-09 17:16:17 +00:00
|
|
|
dereg := &structs.NodeUpdateDrainRequest{
|
2018-02-23 18:42:43 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
2018-02-26 22:34:32 +00:00
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: -1 * time.Second,
|
|
|
|
},
|
2018-02-23 18:42:43 +00:00
|
|
|
},
|
2016-08-09 17:16:17 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeDrainUpdateResponse
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
|
2016-08-09 17:16:17 +00:00
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
// Mark the node as down
|
2016-08-09 17:16:17 +00:00
|
|
|
node.Status = structs.NodeStatusDown
|
|
|
|
reg = &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
2016-08-09 17:16:17 +00:00
|
|
|
|
|
|
|
// Ensure that the allocation has transitioned to lost
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
|
2016-08-09 17:16:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2016-08-09 20:11:58 +00:00
|
|
|
expectedSummary := &structs.JobSummary{
|
2017-09-07 23:56:15 +00:00
|
|
|
JobID: job.ID,
|
|
|
|
Namespace: job.Namespace,
|
2016-08-09 17:16:17 +00:00
|
|
|
Summary: map[string]structs.TaskGroupSummary{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2016-08-09 20:11:58 +00:00
|
|
|
Queued: 1,
|
|
|
|
Lost: 1,
|
2016-08-09 17:16:17 +00:00
|
|
|
},
|
|
|
|
},
|
2016-12-16 18:21:56 +00:00
|
|
|
Children: new(structs.JobChildrenSummary),
|
2016-08-09 20:11:58 +00:00
|
|
|
CreateIndex: jobResp.JobModifyIndex,
|
|
|
|
ModifyIndex: summary.ModifyIndex,
|
2016-08-09 17:16:17 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary, expectedSummary) {
|
2018-02-23 18:42:43 +00:00
|
|
|
return false, fmt.Errorf("Service: expected: %#v, actual: %#v", expectedSummary, summary)
|
2016-08-09 20:11:58 +00:00
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
|
2016-08-09 20:11:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
expectedSummary1 := &structs.JobSummary{
|
2017-09-07 23:56:15 +00:00
|
|
|
JobID: job1.ID,
|
|
|
|
Namespace: job1.Namespace,
|
2016-08-09 20:11:58 +00:00
|
|
|
Summary: map[string]structs.TaskGroupSummary{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2016-08-09 20:11:58 +00:00
|
|
|
Lost: 1,
|
|
|
|
},
|
|
|
|
},
|
2016-12-16 18:21:56 +00:00
|
|
|
Children: new(structs.JobChildrenSummary),
|
2016-08-09 20:11:58 +00:00
|
|
|
CreateIndex: jobResp1.JobModifyIndex,
|
|
|
|
ModifyIndex: summary1.ModifyIndex,
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary1, expectedSummary1) {
|
2018-02-23 18:42:43 +00:00
|
|
|
return false, fmt.Errorf("System: expected: %#v, actual: %#v", expectedSummary1, summary1)
|
2016-08-09 17:16:17 +00:00
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-02-27 00:34:42 +00:00
|
|
|
func TestClientEndpoint_UpdateEligibility(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-02-27 00:34:42 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
|
|
|
|
// Update the eligibility
|
2018-03-27 22:53:24 +00:00
|
|
|
elig := &structs.NodeUpdateEligibilityRequest{
|
2018-02-27 00:34:42 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
Eligibility: structs.NodeSchedulingIneligible,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp2 structs.NodeEligibilityUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp2))
|
2018-02-27 00:34:42 +00:00
|
|
|
require.NotZero(resp2.Index)
|
2018-03-27 22:53:24 +00:00
|
|
|
require.Zero(resp2.EvalCreateIndex)
|
|
|
|
require.Empty(resp2.EvalIDs)
|
2018-02-27 00:34:42 +00:00
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
state := s1.fsm.State()
|
|
|
|
out, err := state.NodeByID(nil, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Equal(out.SchedulingEligibility, structs.NodeSchedulingIneligible)
|
2018-05-11 21:32:34 +00:00
|
|
|
require.Len(out.Events, 2)
|
|
|
|
require.Equal(NodeEligibilityEventIneligible, out.Events[1].Message)
|
2018-03-27 22:53:24 +00:00
|
|
|
|
|
|
|
// Register a system job
|
|
|
|
job := mock.SystemJob()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job))
|
2018-03-27 22:53:24 +00:00
|
|
|
|
|
|
|
// Update the eligibility and expect evals
|
|
|
|
elig.Eligibility = structs.NodeSchedulingEligible
|
|
|
|
var resp3 structs.NodeEligibilityUpdateResponse
|
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp3))
|
|
|
|
require.NotZero(resp3.Index)
|
|
|
|
require.NotZero(resp3.EvalCreateIndex)
|
|
|
|
require.Len(resp3.EvalIDs, 1)
|
2018-05-11 21:32:34 +00:00
|
|
|
|
|
|
|
out, err = state.NodeByID(nil, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Len(out.Events, 3)
|
|
|
|
require.Equal(NodeEligibilityEventEligible, out.Events[2].Message)
|
2018-02-27 00:34:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-02-27 00:34:42 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2018-02-27 00:34:42 +00:00
|
|
|
|
|
|
|
// Create the policy and tokens
|
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
|
|
|
|
|
|
|
|
// Update the status without a token and expect failure
|
|
|
|
dereg := &structs.NodeUpdateEligibilityRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Eligibility: structs.NodeSchedulingIneligible,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp structs.NodeEligibilityUpdateResponse
|
2018-02-27 00:34:42 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
|
|
|
|
require.NotNil(err, "RPC")
|
|
|
|
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token
|
|
|
|
dereg.AuthToken = validToken.SecretID
|
|
|
|
{
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp structs.NodeEligibilityUpdateResponse
|
2018-02-27 00:34:42 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
|
|
|
dereg.AuthToken = invalidToken.SecretID
|
|
|
|
{
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp structs.NodeEligibilityUpdateResponse
|
2018-02-27 00:34:42 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
|
|
|
|
require.NotNil(err, "RPC")
|
|
|
|
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
|
|
|
dereg.AuthToken = root.SecretID
|
|
|
|
{
|
2018-03-27 22:53:24 +00:00
|
|
|
var resp structs.NodeEligibilityUpdateResponse
|
2018-02-27 00:34:42 +00:00
|
|
|
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:38:57 +00:00
|
|
|
func TestClientEndpoint_GetNode(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-07-06 21:38:57 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
2015-08-11 21:27:14 +00:00
|
|
|
node := mock.Node()
|
2015-07-07 16:51:42 +00:00
|
|
|
reg := &structs.NodeRegisterRequest{
|
2015-07-06 21:38:57 +00:00
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-07-06 21:38:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-07-06 21:38:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Lookup the node
|
|
|
|
get := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
2015-09-14 01:18:40 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
2015-07-06 21:38:57 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.SingleNodeResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
|
2015-07-06 21:38:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != resp.Index {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
|
|
|
|
}
|
|
|
|
|
2016-01-30 01:46:44 +00:00
|
|
|
if resp2.Node.ComputedClass == "" {
|
2016-01-21 20:21:42 +00:00
|
|
|
t.Fatalf("bad ComputedClass: %#v", resp2.Node)
|
|
|
|
}
|
|
|
|
|
2016-07-25 21:11:32 +00:00
|
|
|
// Update the status updated at value
|
|
|
|
node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
|
2021-03-26 17:03:15 +00:00
|
|
|
node.SecretID = ""
|
2018-03-14 00:59:37 +00:00
|
|
|
node.Events = resp2.Node.Events
|
2021-12-24 00:40:35 +00:00
|
|
|
require.Equal(t, node, resp2.Node)
|
2015-07-06 21:38:57 +00:00
|
|
|
|
2018-03-08 14:34:08 +00:00
|
|
|
// assert that the node register event was set correctly
|
2018-03-14 00:59:37 +00:00
|
|
|
if len(resp2.Node.Events) != 1 {
|
2018-03-08 14:34:08 +00:00
|
|
|
t.Fatalf("Did not set node events: %#v", resp2.Node)
|
|
|
|
}
|
2018-05-12 00:26:25 +00:00
|
|
|
if resp2.Node.Events[0].Message != state.NodeRegisterEventRegistered {
|
2018-03-08 14:34:08 +00:00
|
|
|
t.Fatalf("Did not set node register event correctly: %#v", resp2.Node)
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:38:57 +00:00
|
|
|
// Lookup non-existing node
|
2016-01-14 20:57:43 +00:00
|
|
|
get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
|
2015-07-06 21:38:57 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != resp.Index {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
|
|
|
|
}
|
|
|
|
if resp2.Node != nil {
|
|
|
|
t.Fatalf("unexpected node")
|
|
|
|
}
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
2017-09-15 03:59:18 +00:00
|
|
|
func TestClientEndpoint_GetNode_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-09-15 03:59:18 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2017-09-15 03:59:18 +00:00
|
|
|
|
2017-09-15 04:41:26 +00:00
|
|
|
// Create the policy and tokens
|
2017-10-04 22:08:10 +00:00
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
|
2017-09-15 03:59:18 +00:00
|
|
|
|
|
|
|
// Lookup the node without a token and expect failure
|
|
|
|
req := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
|
|
|
var resp structs.SingleNodeResponse
|
2017-09-15 17:41:28 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 03:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token
|
2017-10-12 23:27:33 +00:00
|
|
|
req.AuthToken = validToken.SecretID
|
|
|
|
{
|
|
|
|
var resp structs.SingleNodeResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
|
|
|
|
assert.Equal(node.ID, resp.Node.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a Node.SecretID
|
|
|
|
req.AuthToken = node.SecretID
|
2017-09-15 03:59:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.SingleNodeResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
|
|
|
|
assert.Equal(node.ID, resp.Node.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
2017-10-12 23:27:33 +00:00
|
|
|
req.AuthToken = invalidToken.SecretID
|
2017-09-15 03:59:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.SingleNodeResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
2017-10-12 23:27:33 +00:00
|
|
|
req.AuthToken = root.SecretID
|
2017-09-15 03:59:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.SingleNodeResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
|
|
|
|
assert.Equal(node.ID, resp.Node.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-30 02:00:02 +00:00
|
|
|
func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-10-29 22:48:44 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node1 := mock.Node()
|
|
|
|
node2 := mock.Node()
|
|
|
|
|
|
|
|
// First create an unrelated node.
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 100, node1); err != nil {
|
2015-10-29 22:48:44 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Upsert the node we are watching later
|
|
|
|
time.AfterFunc(200*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 200, node2); err != nil {
|
2015-10-29 22:48:44 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Lookup the node
|
2015-10-30 02:00:02 +00:00
|
|
|
req := &structs.NodeSpecificRequest{
|
2015-10-29 22:48:44 +00:00
|
|
|
NodeID: node2.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
2017-02-08 06:10:33 +00:00
|
|
|
MinQueryIndex: 150,
|
2015-10-29 22:48:44 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.SingleNodeResponse
|
|
|
|
start := time.Now()
|
2015-10-30 02:00:02 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
|
2015-10-29 22:48:44 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
|
2015-10-29 22:48:44 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
|
|
|
|
}
|
2015-10-30 02:00:02 +00:00
|
|
|
if resp.Index != 200 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp.Index, 200)
|
2015-10-29 22:48:44 +00:00
|
|
|
}
|
|
|
|
if resp.Node == nil || resp.Node.ID != node2.ID {
|
|
|
|
t.Fatalf("bad: %#v", resp.Node)
|
|
|
|
}
|
2015-10-30 02:00:02 +00:00
|
|
|
|
|
|
|
// Node update triggers watches
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
|
|
|
nodeUpdate := mock.Node()
|
|
|
|
nodeUpdate.ID = node2.ID
|
|
|
|
nodeUpdate.Status = structs.NodeStatusDown
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 300, nodeUpdate); err != nil {
|
2015-10-30 02:00:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
req.QueryOptions.MinQueryIndex = 250
|
|
|
|
var resp2 structs.SingleNodeResponse
|
|
|
|
start = time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-30 02:00:02 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
|
|
|
|
}
|
|
|
|
if resp2.Index != 300 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
|
|
|
|
}
|
|
|
|
if resp2.Node == nil || resp2.Node.Status != structs.NodeStatusDown {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Node)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node delete triggers watches
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.DeleteNode(structs.MsgTypeTestSetup, 400, []string{node2.ID}); err != nil {
|
2015-10-30 02:00:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
req.QueryOptions.MinQueryIndex = 350
|
|
|
|
var resp3 structs.SingleNodeResponse
|
|
|
|
start = time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp3); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-30 02:00:02 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
|
|
|
|
}
|
|
|
|
if resp3.Index != 400 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 400)
|
|
|
|
}
|
|
|
|
if resp3.Node != nil {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Node)
|
|
|
|
}
|
2015-10-29 22:48:44 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 02:17:49 +00:00
|
|
|
func TestClientEndpoint_GetAllocs(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-08-23 02:17:49 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-08-23 02:17:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Inject fake evaluations
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocs
|
|
|
|
get := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
2015-09-14 01:18:40 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.NodeAllocsResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
|
2015-08-23 02:17:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != 100 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Allocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup non-existing node
|
|
|
|
get.NodeID = "foobarbaz"
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
|
2015-08-23 02:17:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != 100 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
|
|
|
|
}
|
|
|
|
if len(resp2.Allocs) != 0 {
|
|
|
|
t.Fatalf("unexpected node")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-16 21:21:29 +00:00
|
|
|
func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) {
|
2017-09-15 04:42:19 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-09-15 04:42:19 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create the node
|
2017-09-15 21:27:11 +00:00
|
|
|
allocDefaultNS := mock.Alloc()
|
2017-09-15 04:42:19 +00:00
|
|
|
node := mock.Node()
|
2017-09-15 21:27:11 +00:00
|
|
|
allocDefaultNS.NodeID = node.ID
|
2017-09-15 04:42:19 +00:00
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2017-09-15 21:27:11 +00:00
|
|
|
assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
|
2017-10-16 21:21:29 +00:00
|
|
|
allocs := []*structs.Allocation{allocDefaultNS}
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, allocs), "UpsertAllocs")
|
2017-09-15 04:42:19 +00:00
|
|
|
|
|
|
|
// Create the namespace policy and tokens
|
2017-10-04 22:08:10 +00:00
|
|
|
validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
|
|
|
|
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
|
|
|
|
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
|
2017-09-15 04:42:19 +00:00
|
|
|
|
|
|
|
req := &structs.NodeSpecificRequest{
|
2017-10-13 16:56:56 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
},
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
2017-10-13 16:56:56 +00:00
|
|
|
|
|
|
|
// Lookup the node without a token and expect failure
|
2017-09-15 04:42:19 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
2017-09-15 17:41:28 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 21:27:11 +00:00
|
|
|
// Try with a valid token for the default namespace
|
2017-10-13 16:56:56 +00:00
|
|
|
req.AuthToken = validDefaultToken.SecretID
|
2017-09-15 04:42:19 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
|
2017-09-15 21:27:11 +00:00
|
|
|
assert.Len(resp.Allocs, 1)
|
|
|
|
assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
|
|
|
|
}
|
|
|
|
|
2017-09-15 04:42:19 +00:00
|
|
|
// Try with a invalid token
|
2017-10-13 16:56:56 +00:00
|
|
|
req.AuthToken = invalidToken.SecretID
|
2017-09-15 04:42:19 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
2017-10-13 16:56:56 +00:00
|
|
|
req.AuthToken = root.SecretID
|
2017-09-15 04:42:19 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
|
2017-10-16 21:21:29 +00:00
|
|
|
assert.Len(resp.Allocs, 1)
|
2017-09-15 21:27:11 +00:00
|
|
|
for _, alloc := range resp.Allocs {
|
|
|
|
switch alloc.ID {
|
2017-10-16 21:21:29 +00:00
|
|
|
case allocDefaultNS.ID:
|
2017-09-15 21:27:11 +00:00
|
|
|
// expected
|
|
|
|
default:
|
|
|
|
t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
|
|
|
|
}
|
|
|
|
}
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 04:16:25 +00:00
|
|
|
func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create the namespaces
|
|
|
|
ns1 := mock.Namespace()
|
|
|
|
ns2 := mock.Namespace()
|
|
|
|
ns1.Name = "altnamespace"
|
|
|
|
ns2.Name = "should-only-be-displayed-for-root-ns"
|
|
|
|
|
|
|
|
// Create the allocs
|
|
|
|
allocDefaultNS := mock.Alloc()
|
|
|
|
allocAltNS := mock.Alloc()
|
|
|
|
allocAltNS.Namespace = ns1.Name
|
|
|
|
allocOtherNS := mock.Alloc()
|
|
|
|
allocOtherNS.Namespace = ns2.Name
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
allocDefaultNS.NodeID = node.ID
|
|
|
|
allocAltNS.NodeID = node.ID
|
|
|
|
allocOtherNS.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
|
|
|
assert.Nil(state.UpsertNamespaces(1, []*structs.Namespace{ns1, ns2}), "UpsertNamespaces")
|
|
|
|
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 2, node), "UpsertNode")
|
|
|
|
assert.Nil(state.UpsertJobSummary(3, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
|
|
|
|
assert.Nil(state.UpsertJobSummary(4, mock.JobSummary(allocAltNS.JobID)), "UpsertJobSummary")
|
|
|
|
assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(allocOtherNS.JobID)), "UpsertJobSummary")
|
|
|
|
allocs := []*structs.Allocation{allocDefaultNS, allocAltNS, allocOtherNS}
|
|
|
|
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, allocs), "UpsertAllocs")
|
|
|
|
|
|
|
|
// Create the namespace policy and tokens
|
|
|
|
validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
|
|
|
|
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
|
|
|
|
validNoNSToken := mock.CreatePolicyAndToken(t, state, 1003, "test-alt-valid", mock.NodePolicy(acl.PolicyRead))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
|
|
|
|
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
|
|
|
|
|
|
|
|
// Lookup the node without a token and expect failure
|
|
|
|
req := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token for the default namespace
|
|
|
|
req.AuthToken = validDefaultToken.SecretID
|
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
|
|
|
|
assert.Len(resp.Allocs, 1)
|
|
|
|
assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token for a namespace with no allocs on this node
|
|
|
|
req.AuthToken = validNoNSToken.SecretID
|
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
|
|
|
|
assert.Len(resp.Allocs, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
|
|
|
req.AuthToken = invalidToken.SecretID
|
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
|
|
|
req.AuthToken = root.SecretID
|
|
|
|
{
|
|
|
|
var resp structs.NodeAllocsResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
|
|
|
|
assert.Len(resp.Allocs, 3)
|
|
|
|
for _, alloc := range resp.Allocs {
|
|
|
|
switch alloc.ID {
|
|
|
|
case allocDefaultNS.ID, allocAltNS.ID, allocOtherNS.ID:
|
|
|
|
// expected
|
|
|
|
default:
|
|
|
|
t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
func TestClientEndpoint_GetClientAllocs(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2018-01-05 21:50:04 +00:00
|
|
|
require := require.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-01-29 14:29:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have no client connections
|
|
|
|
require.Empty(s1.connectedNodes())
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
2018-01-05 21:50:04 +00:00
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 98, node))
|
2016-01-29 14:29:52 +00:00
|
|
|
|
|
|
|
// Inject fake evaluations
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocs
|
|
|
|
get := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
2016-08-16 06:11:57 +00:00
|
|
|
SecretID: node.SecretID,
|
2016-01-29 14:29:52 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeClientAllocsResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != 100 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Allocs)
|
|
|
|
}
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// Check that we have the client connections
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
require.Len(nodes, 1)
|
2018-01-12 23:57:07 +00:00
|
|
|
require.Contains(nodes, node.ID)
|
2018-01-05 21:50:04 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Lookup node with bad SecretID
|
|
|
|
get.SecretID = "foobarbaz"
|
2016-01-29 14:29:52 +00:00
|
|
|
var resp3 structs.NodeClientAllocsResponse
|
2016-08-16 06:11:57 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "does not match") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup non-existing node
|
2017-09-29 16:58:48 +00:00
|
|
|
get.NodeID = uuid.Generate()
|
2016-08-16 06:11:57 +00:00
|
|
|
var resp4 structs.NodeClientAllocsResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp4); err != nil {
|
2016-01-29 14:29:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if resp4.Index != 100 {
|
2016-01-29 14:29:52 +00:00
|
|
|
t.Fatalf("Bad index: %d %d", resp3.Index, 100)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if len(resp4.Allocs) != 0 {
|
2016-01-29 14:29:52 +00:00
|
|
|
t.Fatalf("unexpected node %#v", resp3.Allocs)
|
|
|
|
}
|
2018-01-05 21:50:04 +00:00
|
|
|
|
|
|
|
// Close the connection and check that we remove the client connections
|
|
|
|
require.Nil(codec.Close())
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
nodes := s1.connectedNodes()
|
|
|
|
return len(nodes) == 0, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should have no clients")
|
|
|
|
})
|
2016-01-29 14:29:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-01-29 14:29:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Inject fake evaluations async
|
2017-10-25 18:06:25 +00:00
|
|
|
now := time.Now().UTC().UnixNano()
|
2016-01-29 14:29:52 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
2017-10-25 18:06:25 +00:00
|
|
|
alloc.ModifyTime = now
|
2016-01-29 14:29:52 +00:00
|
|
|
state := s1.fsm.State()
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2016-01-29 14:29:52 +00:00
|
|
|
start := time.Now()
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Lookup the allocs in a blocking query
|
|
|
|
req := &structs.NodeSpecificRequest{
|
2016-08-16 06:11:57 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
2016-01-29 14:29:52 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
MinQueryIndex: 50,
|
|
|
|
MaxQueryTime: time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeClientAllocsResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should block at least 100ms
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp2.Index != 100 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Allocs)
|
|
|
|
}
|
|
|
|
|
2017-10-30 18:20:44 +00:00
|
|
|
iter, err := state.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
getAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
allocs = append(allocs, raw.(*structs.Allocation))
|
|
|
|
}
|
|
|
|
return allocs
|
|
|
|
}
|
|
|
|
out := getAllocs(iter)
|
|
|
|
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("Expected to get one allocation but got:%v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
if out[0].ModifyTime != now {
|
|
|
|
t.Fatalf("Invalid modify time %v", out[0].ModifyTime)
|
2017-10-25 18:06:25 +00:00
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
// Alloc updates fire watches
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
|
|
|
allocUpdate := mock.Alloc()
|
|
|
|
allocUpdate.NodeID = alloc.NodeID
|
|
|
|
allocUpdate.ID = alloc.ID
|
|
|
|
allocUpdate.ClientStatus = structs.AllocClientStatusRunning
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
req.QueryOptions.MinQueryIndex = 150
|
|
|
|
var resp3 structs.NodeClientAllocsResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
|
|
|
if resp3.Index != 200 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp3.Index, 200)
|
|
|
|
}
|
|
|
|
if len(resp3.Allocs) != 1 || resp3.Allocs[alloc.ID] != 200 {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Allocs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 23:59:37 +00:00
|
|
|
func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) {
|
|
|
|
t.Parallel()
|
2017-10-27 16:50:10 +00:00
|
|
|
assert := assert.New(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-10-26 23:59:37 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2017-10-27 16:50:10 +00:00
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
2017-10-26 23:59:37 +00:00
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Inject fake allocations async
|
|
|
|
alloc1 := mock.Alloc()
|
|
|
|
alloc1.NodeID = node.ID
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
|
|
|
|
start := time.Now()
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1, alloc2}))
|
2017-10-26 23:59:37 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Lookup the allocs in a blocking query
|
|
|
|
req := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
MinQueryIndex: 50,
|
|
|
|
MaxQueryTime: time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeClientAllocsResponse
|
2017-10-27 16:50:10 +00:00
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2))
|
2017-10-26 23:59:37 +00:00
|
|
|
|
|
|
|
// Should block at least 100ms
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
|
|
|
|
2017-10-27 16:50:10 +00:00
|
|
|
assert.EqualValues(100, resp2.Index)
|
|
|
|
if assert.Len(resp2.Allocs, 2) {
|
|
|
|
assert.EqualValues(100, resp2.Allocs[alloc1.ID])
|
2017-10-26 23:59:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete an allocation
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2017-10-27 16:50:10 +00:00
|
|
|
assert.Nil(state.DeleteEval(200, nil, []string{alloc2.ID}))
|
2017-10-26 23:59:37 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
req.QueryOptions.MinQueryIndex = 150
|
|
|
|
var resp3 structs.NodeClientAllocsResponse
|
2017-10-27 16:50:10 +00:00
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3))
|
2017-10-26 23:59:37 +00:00
|
|
|
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
2020-10-08 18:27:52 +00:00
|
|
|
assert.EqualValues(200, resp3.Index)
|
2017-10-27 16:50:10 +00:00
|
|
|
if assert.Len(resp3.Allocs, 1) {
|
|
|
|
assert.EqualValues(100, resp3.Allocs[alloc1.ID])
|
2017-10-26 23:59:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-10 00:23:26 +00:00
|
|
|
// A MigrateToken should not be created if an allocation shares the same node
|
|
|
|
// with its previous allocation
|
|
|
|
func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) {
|
2017-10-02 19:18:33 +00:00
|
|
|
t.Parallel()
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-10-02 19:18:33 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Inject fake evaluations
|
2017-10-10 00:23:26 +00:00
|
|
|
prevAlloc := mock.Alloc()
|
|
|
|
prevAlloc.NodeID = node.ID
|
2017-10-02 19:18:33 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
2017-10-10 00:23:26 +00:00
|
|
|
alloc.PreviousAllocation = prevAlloc.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocClientStatusComplete
|
2017-10-02 19:18:33 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{prevAlloc, alloc})
|
2017-10-02 19:18:33 +00:00
|
|
|
assert.Nil(err)
|
|
|
|
|
|
|
|
// Lookup the allocs
|
|
|
|
get := &structs.NodeSpecificRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeClientAllocsResponse
|
|
|
|
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2)
|
|
|
|
assert.Nil(err)
|
|
|
|
|
2017-10-10 00:23:26 +00:00
|
|
|
assert.Equal(uint64(100), resp2.Index)
|
|
|
|
assert.Equal(2, len(resp2.Allocs))
|
|
|
|
assert.Equal(uint64(100), resp2.Allocs[alloc.ID])
|
|
|
|
assert.Equal(0, len(resp2.MigrateTokens))
|
2017-10-02 19:18:33 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 02:17:49 +00:00
|
|
|
func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-08-23 02:17:49 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-08-23 02:17:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Inject fake evaluations async
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2015-08-23 02:17:49 +00:00
|
|
|
start := time.Now()
|
2015-10-30 02:00:02 +00:00
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-30 02:00:02 +00:00
|
|
|
})
|
2015-08-23 02:17:49 +00:00
|
|
|
|
|
|
|
// Lookup the allocs in a blocking query
|
2015-10-30 02:00:02 +00:00
|
|
|
req := &structs.NodeSpecificRequest{
|
2015-08-23 02:17:49 +00:00
|
|
|
NodeID: node.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2015-09-14 01:18:40 +00:00
|
|
|
Region: "global",
|
2015-08-23 02:17:49 +00:00
|
|
|
MinQueryIndex: 50,
|
|
|
|
MaxQueryTime: time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeAllocsResponse
|
2015-10-30 02:00:02 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp2); err != nil {
|
2015-08-23 02:17:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should block at least 100ms
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp2.Index != 100 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Allocs)
|
|
|
|
}
|
2015-10-30 02:00:02 +00:00
|
|
|
|
|
|
|
// Alloc updates fire watches
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
|
|
|
allocUpdate := mock.Alloc()
|
|
|
|
allocUpdate.NodeID = alloc.NodeID
|
|
|
|
allocUpdate.ID = alloc.ID
|
|
|
|
allocUpdate.ClientStatus = structs.AllocClientStatusRunning
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
|
2020-10-02 20:13:49 +00:00
|
|
|
err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
|
2015-10-30 02:00:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
req.QueryOptions.MinQueryIndex = 150
|
|
|
|
var resp3 structs.NodeAllocsResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp3); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Since(start) < 100*time.Millisecond {
|
|
|
|
t.Fatalf("too fast")
|
|
|
|
}
|
|
|
|
if resp3.Index != 200 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp3.Index, 200)
|
|
|
|
}
|
|
|
|
if len(resp3.Allocs) != 1 || resp3.Allocs[0].ClientStatus != structs.AllocClientStatusRunning {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Allocs[0])
|
|
|
|
}
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 01:12:51 +00:00
|
|
|
func TestClientEndpoint_UpdateAlloc(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2018-04-10 20:30:15 +00:00
|
|
|
// Disabling scheduling in this test so that we can
|
|
|
|
// ensure that the state store doesn't accumulate more evals
|
|
|
|
// than what we expect the unit test to add
|
|
|
|
c.NumSchedulers = 0
|
|
|
|
})
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
2015-08-26 01:12:51 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2018-01-20 02:48:37 +00:00
|
|
|
require := require.New(t)
|
2015-08-26 01:12:51 +00:00
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-26 01:12:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-08-26 01:12:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-17 22:24:57 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
// Inject mock job
|
|
|
|
job := mock.Job()
|
2018-04-10 19:00:07 +00:00
|
|
|
job.ID = "mytestjob"
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job)
|
2018-01-20 02:48:37 +00:00
|
|
|
require.Nil(err)
|
2018-01-17 22:24:57 +00:00
|
|
|
|
2018-01-16 14:55:35 +00:00
|
|
|
// Inject fake allocations
|
2015-08-26 01:12:51 +00:00
|
|
|
alloc := mock.Alloc()
|
2018-01-17 22:24:57 +00:00
|
|
|
alloc.JobID = job.ID
|
2015-08-26 01:12:51 +00:00
|
|
|
alloc.NodeID = node.ID
|
2018-01-20 02:48:37 +00:00
|
|
|
err = state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
|
|
|
require.Nil(err)
|
2018-01-17 22:24:57 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2018-04-09 19:05:31 +00:00
|
|
|
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.JobID = job.ID
|
|
|
|
alloc2.NodeID = node.ID
|
|
|
|
err = state.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))
|
2018-01-20 02:48:37 +00:00
|
|
|
require.Nil(err)
|
2018-04-09 19:05:31 +00:00
|
|
|
alloc2.TaskGroup = job.TaskGroups[0].Name
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc, alloc2})
|
2018-04-09 19:05:31 +00:00
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
// Attempt updates of more than one alloc for the same job
|
|
|
|
clientAlloc1 := new(structs.Allocation)
|
|
|
|
*clientAlloc1 = *alloc
|
|
|
|
clientAlloc1.ClientStatus = structs.AllocClientStatusFailed
|
2018-01-16 14:55:35 +00:00
|
|
|
|
2018-04-09 19:05:31 +00:00
|
|
|
clientAlloc2 := new(structs.Allocation)
|
|
|
|
*clientAlloc2 = *alloc2
|
|
|
|
clientAlloc2.ClientStatus = structs.AllocClientStatusFailed
|
2015-08-26 01:12:51 +00:00
|
|
|
|
|
|
|
// Update the alloc
|
|
|
|
update := &structs.AllocUpdateRequest{
|
2018-04-09 19:05:31 +00:00
|
|
|
Alloc: []*structs.Allocation{clientAlloc1, clientAlloc2},
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-26 01:12:51 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.NodeAllocsResponse
|
2016-02-22 02:51:34 +00:00
|
|
|
start := time.Now()
|
2018-01-17 22:24:57 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2)
|
2018-01-23 17:45:13 +00:00
|
|
|
require.Nil(err)
|
2020-02-03 16:59:00 +00:00
|
|
|
require.NotEqual(uint64(0), resp2.Index)
|
2018-01-17 22:24:57 +00:00
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
if diff := time.Since(start); diff < batchUpdateInterval {
|
|
|
|
t.Fatalf("too fast: %v", diff)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the alloc
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.AllocByID(ws, alloc.ID)
|
2018-01-23 17:45:13 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.Equal(structs.AllocClientStatusFailed, out.ClientStatus)
|
|
|
|
require.True(out.ModifyTime > 0)
|
2017-10-25 18:06:25 +00:00
|
|
|
|
2018-04-09 19:05:31 +00:00
|
|
|
// Assert that exactly one eval with TriggeredBy EvalTriggerRetryFailedAlloc exists
|
2018-01-17 22:24:57 +00:00
|
|
|
evaluations, err := state.EvalsByJob(ws, job.Namespace, job.ID)
|
2018-01-23 17:45:13 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.True(len(evaluations) != 0)
|
2018-04-09 19:05:31 +00:00
|
|
|
foundCount := 0
|
2018-01-20 19:02:58 +00:00
|
|
|
for _, resultEval := range evaluations {
|
2018-04-10 20:30:15 +00:00
|
|
|
if resultEval.TriggeredBy == structs.EvalTriggerRetryFailedAlloc && resultEval.WaitUntil.IsZero() {
|
2018-04-09 19:05:31 +00:00
|
|
|
foundCount++
|
2018-01-20 19:02:58 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-09 19:05:31 +00:00
|
|
|
require.Equal(1, foundCount, "Should create exactly one eval for failed allocs")
|
2018-01-20 19:02:58 +00:00
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_BatchUpdate(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-02-22 02:51:34 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inject fake evaluations
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
|
2016-02-22 02:51:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt update
|
|
|
|
clientAlloc := new(structs.Allocation)
|
|
|
|
*clientAlloc = *alloc
|
|
|
|
clientAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
// Call to do the batch update
|
2018-03-06 22:37:37 +00:00
|
|
|
bf := structs.NewBatchFuture()
|
2018-01-03 22:59:52 +00:00
|
|
|
endpoint := s1.staticEndpoints.Node
|
2018-01-16 14:55:35 +00:00
|
|
|
endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}, nil)
|
2016-02-22 02:51:34 +00:00
|
|
|
if err := bf.Wait(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if bf.Index() == 0 {
|
|
|
|
t.Fatalf("Bad index: %d", bf.Index())
|
|
|
|
}
|
2015-08-26 01:12:51 +00:00
|
|
|
|
|
|
|
// Lookup the alloc
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.AllocByID(ws, alloc.ID)
|
2015-08-26 01:12:51 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-22 20:57:27 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swap the servers Vault Client
|
|
|
|
tvc := &TestVaultClient{}
|
|
|
|
s1.vault = tvc
|
|
|
|
|
|
|
|
// Inject fake allocation and vault accessor
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
state := s1.fsm.State()
|
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}); err != nil {
|
2016-08-22 20:57:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
va := mock.VaultAccessor()
|
|
|
|
va.NodeID = node.ID
|
|
|
|
va.AllocID = alloc.ID
|
|
|
|
if err := state.UpsertVaultAccessor(101, []*structs.VaultAccessor{va}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-16 14:55:35 +00:00
|
|
|
// Inject mock job
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = alloc.JobID
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job)
|
2018-01-16 14:55:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// Attempt update
|
|
|
|
clientAlloc := new(structs.Allocation)
|
|
|
|
*clientAlloc = *alloc
|
|
|
|
clientAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
// Update the alloc
|
|
|
|
update := &structs.AllocUpdateRequest{
|
|
|
|
Alloc: []*structs.Allocation{clientAlloc},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeAllocsResponse
|
|
|
|
start := time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index == 0 {
|
|
|
|
t.Fatalf("Bad index: %d", resp2.Index)
|
|
|
|
}
|
|
|
|
if diff := time.Since(start); diff < batchUpdateInterval {
|
|
|
|
t.Fatalf("too fast: %v", diff)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the alloc
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.AllocByID(ws, alloc.ID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(tvc.RevokedTokens); l != 1 {
|
|
|
|
t.Fatalf("Deregister revoked %d tokens; want 1", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-08-06 23:39:20 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Inject fake evaluations
|
2015-08-11 21:27:14 +00:00
|
|
|
alloc := mock.Alloc()
|
2015-08-06 23:39:20 +00:00
|
|
|
state := s1.fsm.State()
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc}); err != nil {
|
2015-10-20 17:57:53 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inject a fake system job.
|
|
|
|
job := mock.SystemJob()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertJob(structs.MsgTypeTestSetup, 3, job); err != nil {
|
2015-08-06 23:39:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create some evaluations
|
2018-01-03 22:59:52 +00:00
|
|
|
ids, index, err := s1.staticEndpoints.Node.createNodeEvals(alloc.NodeID, 1)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if index == 0 {
|
|
|
|
t.Fatalf("bad: %d", index)
|
|
|
|
}
|
2015-10-20 17:57:53 +00:00
|
|
|
if len(ids) != 2 {
|
2015-08-06 23:39:20 +00:00
|
|
|
t.Fatalf("bad: %s", ids)
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// Lookup the evaluations
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2015-10-20 17:57:53 +00:00
|
|
|
evalByType := make(map[string]*structs.Evaluation, 2)
|
|
|
|
for _, id := range ids {
|
2017-02-08 05:22:48 +00:00
|
|
|
eval, err := state.EvalByID(ws, id)
|
2015-10-20 17:57:53 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if eval == nil {
|
|
|
|
t.Fatalf("expected eval")
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
if old, ok := evalByType[eval.Type]; ok {
|
|
|
|
t.Fatalf("multiple evals of the same type: %v and %v", old, eval)
|
|
|
|
}
|
|
|
|
|
|
|
|
evalByType[eval.Type] = eval
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
|
|
|
if len(evalByType) != 2 {
|
|
|
|
t.Fatalf("Expected a service and system job; got %#v", evalByType)
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
|
|
|
// Ensure the evals are correct.
|
|
|
|
for schedType, eval := range evalByType {
|
|
|
|
expPriority := alloc.Job.Priority
|
|
|
|
expJobID := alloc.JobID
|
2015-10-23 23:32:45 +00:00
|
|
|
if schedType == "system" {
|
2015-10-20 17:57:53 +00:00
|
|
|
expPriority = job.Priority
|
|
|
|
expJobID = job.ID
|
|
|
|
}
|
|
|
|
|
2020-05-09 19:07:04 +00:00
|
|
|
t.Logf("checking eval: %v", pretty.Sprint(eval))
|
|
|
|
require.Equal(t, index, eval.CreateIndex)
|
|
|
|
require.Equal(t, structs.EvalTriggerNodeUpdate, eval.TriggeredBy)
|
|
|
|
require.Equal(t, alloc.NodeID, eval.NodeID)
|
|
|
|
require.Equal(t, uint64(1), eval.NodeModifyIndex)
|
2020-05-26 22:18:59 +00:00
|
|
|
switch eval.Status {
|
|
|
|
case structs.EvalStatusPending, structs.EvalStatusComplete:
|
|
|
|
// success
|
|
|
|
default:
|
2020-05-27 14:09:56 +00:00
|
|
|
t.Fatalf("expected pending or complete, found %v", eval.Status)
|
2020-05-26 22:18:59 +00:00
|
|
|
}
|
2020-05-09 19:07:04 +00:00
|
|
|
require.Equal(t, expPriority, eval.Priority)
|
|
|
|
require.Equal(t, expJobID, eval.JobID)
|
|
|
|
require.NotZero(t, eval.CreateTime)
|
|
|
|
require.NotZero(t, eval.ModifyTime)
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-16 01:20:35 +00:00
|
|
|
|
2021-08-18 13:50:37 +00:00
|
|
|
// TestClientEndpoint_CreateNodeEvals_MultipleNSes asserts that evals are made
|
|
|
|
// for all jobs across namespaces
|
|
|
|
func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
|
|
|
idx := uint64(3)
|
|
|
|
ns1 := mock.Namespace()
|
|
|
|
err := state.UpsertNamespaces(idx, []*structs.Namespace{ns1})
|
|
|
|
require.NoError(t, err)
|
|
|
|
idx++
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
err = state.UpsertNode(structs.MsgTypeTestSetup, idx, node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
idx++
|
|
|
|
|
|
|
|
// Inject a fake system job.
|
|
|
|
defaultJob := mock.SystemJob()
|
|
|
|
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, defaultJob)
|
|
|
|
require.NoError(t, err)
|
|
|
|
idx++
|
|
|
|
|
|
|
|
nsJob := mock.SystemJob()
|
|
|
|
nsJob.ID = defaultJob.ID
|
|
|
|
nsJob.Namespace = ns1.Name
|
|
|
|
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nsJob)
|
|
|
|
require.NoError(t, err)
|
|
|
|
idx++
|
|
|
|
|
|
|
|
// Create some evaluations
|
|
|
|
evalIDs, index, err := s1.staticEndpoints.Node.createNodeEvals(node.ID, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotZero(t, index)
|
|
|
|
require.Len(t, evalIDs, 2)
|
|
|
|
|
|
|
|
byNS := map[string]*structs.Evaluation{}
|
|
|
|
for _, evalID := range evalIDs {
|
|
|
|
eval, err := state.EvalByID(nil, evalID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
byNS[eval.Namespace] = eval
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Len(t, byNS, 2)
|
|
|
|
|
|
|
|
defaultNSEval := byNS[defaultJob.Namespace]
|
|
|
|
require.NotNil(t, defaultNSEval)
|
|
|
|
require.Equal(t, defaultJob.ID, defaultNSEval.JobID)
|
|
|
|
require.Equal(t, defaultJob.Namespace, defaultNSEval.Namespace)
|
|
|
|
|
|
|
|
otherNSEval := byNS[nsJob.Namespace]
|
|
|
|
require.NotNil(t, otherNSEval)
|
|
|
|
require.Equal(t, nsJob.ID, otherNSEval.JobID)
|
|
|
|
require.Equal(t, nsJob.Namespace, otherNSEval.Namespace)
|
|
|
|
}
|
|
|
|
|
2015-08-16 01:20:35 +00:00
|
|
|
func TestClientEndpoint_Evaluate(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
2015-10-08 22:36:42 +00:00
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
2015-08-16 01:20:35 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Inject fake evaluations
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
node := mock.Node()
|
|
|
|
node.ID = alloc.NodeID
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node)
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-07-25 21:11:32 +00:00
|
|
|
state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID))
|
2020-10-19 13:30:15 +00:00
|
|
|
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-evaluate
|
|
|
|
req := &structs.NodeEvaluateRequest{
|
|
|
|
NodeID: alloc.NodeID,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-08-16 01:20:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.NodeUpdateResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp); err != nil {
|
2015-08-16 01:20:35 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Index == 0 {
|
|
|
|
t.Fatalf("bad index: %d", resp.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create some evaluations
|
|
|
|
ids := resp.EvalIDs
|
|
|
|
if len(ids) != 1 {
|
|
|
|
t.Fatalf("bad: %s", ids)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the evaluation
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
eval, err := state.EvalByID(ws, ids[0])
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if eval == nil {
|
|
|
|
t.Fatalf("expected eval")
|
|
|
|
}
|
|
|
|
if eval.CreateIndex != resp.Index {
|
|
|
|
t.Fatalf("index mis-match")
|
|
|
|
}
|
|
|
|
|
|
|
|
if eval.Priority != alloc.Job.Priority {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.Type != alloc.Job.Type {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.JobID != alloc.JobID {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.NodeID != alloc.NodeID {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.NodeModifyIndex != 1 {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.Status != structs.EvalStatusPending {
|
|
|
|
t.Fatalf("bad: %#v", eval)
|
|
|
|
}
|
2019-08-07 16:50:35 +00:00
|
|
|
if eval.CreateTime == 0 {
|
|
|
|
t.Fatalf("CreateTime is unset: %#v", eval)
|
|
|
|
}
|
|
|
|
if eval.ModifyTime == 0 {
|
|
|
|
t.Fatalf("ModifyTime is unset: %#v", eval)
|
|
|
|
}
|
2015-08-16 01:20:35 +00:00
|
|
|
}
|
2015-09-06 21:28:29 +00:00
|
|
|
|
2017-09-15 03:41:44 +00:00
|
|
|
func TestClientEndpoint_Evaluate_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-09-15 03:41:44 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
2017-09-15 03:59:18 +00:00
|
|
|
// Create the node with an alloc
|
2017-09-15 03:41:44 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
node := mock.Node()
|
|
|
|
node.ID = alloc.NodeID
|
|
|
|
state := s1.fsm.State()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2017-09-15 03:41:44 +00:00
|
|
|
assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary")
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}), "UpsertAllocs")
|
2017-09-15 03:41:44 +00:00
|
|
|
|
2017-09-15 04:41:26 +00:00
|
|
|
// Create the policy and tokens
|
2017-10-04 22:08:10 +00:00
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
|
2017-09-15 03:41:44 +00:00
|
|
|
|
|
|
|
// Re-evaluate without a token and expect failure
|
|
|
|
req := &structs.NodeEvaluateRequest{
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
|
|
|
var resp structs.NodeUpdateResponse
|
2017-09-15 17:41:28 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 03:41:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = validToken.SecretID
|
2017-09-15 03:41:44 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = invalidToken.SecretID
|
2017-09-15 03:41:44 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = root.SecretID
|
2017-09-15 03:41:44 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 21:28:29 +00:00
|
|
|
func TestClientEndpoint_ListNodes(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-09-06 21:28:29 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
2020-03-30 19:16:04 +00:00
|
|
|
node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
|
|
|
|
"foo": {
|
|
|
|
Name: "foo",
|
|
|
|
Path: "/",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
}
|
2015-09-06 21:28:29 +00:00
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2015-09-14 01:18:40 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
2015-09-06 21:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
|
2015-09-06 21:28:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Lookup the node
|
|
|
|
get := &structs.NodeListRequest{
|
2015-09-14 01:18:40 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
2015-09-06 21:28:29 +00:00
|
|
|
}
|
|
|
|
var resp2 structs.NodeListResponse
|
2015-09-07 03:31:32 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2); err != nil {
|
2015-09-06 21:28:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp2.Index != resp.Index {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
|
|
|
|
}
|
|
|
|
|
2020-03-30 19:16:04 +00:00
|
|
|
require.Len(t, resp2.Nodes, 1)
|
|
|
|
require.Equal(t, node.ID, resp2.Nodes[0].ID)
|
|
|
|
|
|
|
|
// #7344 - Assert HostVolumes are included in stub
|
|
|
|
require.Equal(t, node.HostVolumes, resp2.Nodes[0].HostVolumes)
|
2015-12-24 10:46:59 +00:00
|
|
|
|
2020-10-09 05:21:41 +00:00
|
|
|
// #9055 - Assert Resources are *not* included by default
|
|
|
|
require.Nil(t, resp2.Nodes[0].NodeResources)
|
|
|
|
require.Nil(t, resp2.Nodes[0].ReservedResources)
|
|
|
|
|
2015-12-24 10:46:59 +00:00
|
|
|
// Lookup the node with prefix
|
|
|
|
get = &structs.NodeListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global", Prefix: node.ID[:4]},
|
|
|
|
}
|
|
|
|
var resp3 structs.NodeListResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp3); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if resp3.Index != resp.Index {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp3.Index, resp2.Index)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp3.Nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Nodes)
|
|
|
|
}
|
|
|
|
if resp3.Nodes[0].ID != node.ID {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Nodes[0])
|
2015-09-06 21:28:29 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-28 18:21:39 +00:00
|
|
|
|
2020-10-09 05:21:41 +00:00
|
|
|
func TestClientEndpoint_ListNodes_Fields(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
node := mock.Node()
|
|
|
|
reg := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
|
|
|
|
node.CreateIndex = resp.Index
|
|
|
|
node.ModifyIndex = resp.Index
|
|
|
|
|
|
|
|
// Lookup the node with fields
|
|
|
|
get := &structs.NodeListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
Fields: &structs.NodeStubFields{
|
|
|
|
Resources: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp2 structs.NodeListResponse
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2))
|
|
|
|
require.Equal(t, resp.Index, resp2.Index)
|
|
|
|
require.Len(t, resp2.Nodes, 1)
|
|
|
|
require.Equal(t, node.ID, resp2.Nodes[0].ID)
|
|
|
|
require.NotNil(t, resp2.Nodes[0].NodeResources)
|
|
|
|
require.NotNil(t, resp2.Nodes[0].ReservedResources)
|
|
|
|
}
|
|
|
|
|
2017-09-15 05:01:18 +00:00
|
|
|
func TestClientEndpoint_ListNodes_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, root, cleanupS1 := TestACLServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-09-15 05:01:18 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
|
|
|
state := s1.fsm.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
|
2017-09-15 05:01:18 +00:00
|
|
|
|
|
|
|
// Create the namespace policy and tokens
|
2017-10-04 22:08:10 +00:00
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
|
|
|
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
|
2017-09-15 05:01:18 +00:00
|
|
|
|
|
|
|
// Lookup the node without a token and expect failure
|
|
|
|
req := &structs.NodeListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
{
|
|
|
|
var resp structs.NodeListResponse
|
2017-09-15 17:41:28 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
2017-09-15 05:01:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a valid token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = validToken.SecretID
|
2017-09-15 05:01:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeListResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
|
|
|
|
assert.Equal(node.ID, resp.Nodes[0].ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a invalid token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = invalidToken.SecretID
|
2017-09-15 05:01:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeListResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
|
|
|
|
assert.NotNil(err, "RPC")
|
|
|
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a root token
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = root.SecretID
|
2017-09-15 05:01:18 +00:00
|
|
|
{
|
|
|
|
var resp structs.NodeListResponse
|
|
|
|
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
|
|
|
|
assert.Equal(node.ID, resp.Nodes[0].ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-30 02:00:02 +00:00
|
|
|
func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-10-28 19:29:06 +00:00
|
|
|
state := s1.fsm.State()
|
2015-10-28 18:21:39 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2018-03-20 22:51:58 +00:00
|
|
|
// Disable drainer to prevent drain from completing during test
|
|
|
|
s1.nodeDrainer.SetEnabled(false, nil)
|
|
|
|
|
2015-10-28 18:21:39 +00:00
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
|
|
|
|
2015-10-28 19:29:06 +00:00
|
|
|
// Node upsert triggers watches
|
2018-03-20 22:51:58 +00:00
|
|
|
errCh := make(chan error, 1)
|
|
|
|
timer := time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
errCh <- state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
|
2015-10-28 19:29:06 +00:00
|
|
|
})
|
2018-03-20 22:51:58 +00:00
|
|
|
defer timer.Stop()
|
2015-10-28 18:21:39 +00:00
|
|
|
|
2015-10-28 19:29:06 +00:00
|
|
|
req := &structs.NodeListRequest{
|
2015-10-28 18:21:39 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
start := time.Now()
|
|
|
|
var resp structs.NodeListResponse
|
2015-10-28 19:29:06 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp); err != nil {
|
2015-10-28 18:21:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:51:58 +00:00
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
t.Fatalf("error from timer: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-28 18:21:39 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
|
|
|
|
}
|
|
|
|
if resp.Index != 2 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp.Index, 2)
|
|
|
|
}
|
2015-10-28 19:29:06 +00:00
|
|
|
if len(resp.Nodes) != 1 || resp.Nodes[0].ID != node.ID {
|
2015-10-28 18:21:39 +00:00
|
|
|
t.Fatalf("bad: %#v", resp.Nodes)
|
|
|
|
}
|
2015-10-28 19:29:06 +00:00
|
|
|
|
|
|
|
// Node drain updates trigger watches.
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2018-02-23 18:42:43 +00:00
|
|
|
s := &structs.DrainStrategy{
|
2018-02-26 22:34:32 +00:00
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
},
|
2018-02-23 18:42:43 +00:00
|
|
|
}
|
2021-05-07 17:58:40 +00:00
|
|
|
errCh <- state.UpdateNodeDrain(structs.MsgTypeTestSetup, 3, node.ID, s, false, 0, nil, nil, "")
|
2015-10-28 19:29:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
req.MinQueryIndex = 2
|
|
|
|
var resp2 structs.NodeListResponse
|
|
|
|
start = time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp2); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:51:58 +00:00
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
t.Fatalf("error from timer: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-29 01:35:48 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
|
2015-10-28 19:29:06 +00:00
|
|
|
}
|
|
|
|
if resp2.Index != 3 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp2.Index, 3)
|
|
|
|
}
|
|
|
|
if len(resp2.Nodes) != 1 || !resp2.Nodes[0].Drain {
|
|
|
|
t.Fatalf("bad: %#v", resp2.Nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node status update triggers watches
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-02 20:13:49 +00:00
|
|
|
errCh <- state.UpdateNodeStatus(structs.MsgTypeTestSetup, 40, node.ID, structs.NodeStatusDown, 0, nil)
|
2015-10-28 19:29:06 +00:00
|
|
|
})
|
|
|
|
|
2018-02-23 18:42:43 +00:00
|
|
|
req.MinQueryIndex = 38
|
2015-10-28 19:29:06 +00:00
|
|
|
var resp3 structs.NodeListResponse
|
|
|
|
start = time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp3); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:51:58 +00:00
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
t.Fatalf("error from timer: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-29 01:35:48 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp3)
|
2015-10-28 19:29:06 +00:00
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
if resp3.Index != 40 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp3.Index, 40)
|
2015-10-28 19:29:06 +00:00
|
|
|
}
|
|
|
|
if len(resp3.Nodes) != 1 || resp3.Nodes[0].Status != structs.NodeStatusDown {
|
|
|
|
t.Fatalf("bad: %#v", resp3.Nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node delete triggers watches.
|
|
|
|
time.AfterFunc(100*time.Millisecond, func() {
|
2020-10-19 13:30:15 +00:00
|
|
|
errCh <- state.DeleteNode(structs.MsgTypeTestSetup, 50, []string{node.ID})
|
2015-10-28 19:29:06 +00:00
|
|
|
})
|
|
|
|
|
2018-02-23 18:42:43 +00:00
|
|
|
req.MinQueryIndex = 45
|
2015-10-28 19:29:06 +00:00
|
|
|
var resp4 structs.NodeListResponse
|
|
|
|
start = time.Now()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp4); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:51:58 +00:00
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
t.Fatalf("error from timer: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-30 15:27:47 +00:00
|
|
|
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
|
2015-10-29 01:35:48 +00:00
|
|
|
t.Fatalf("should block (returned in %s) %#v", elapsed, resp4)
|
2015-10-28 19:29:06 +00:00
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
if resp4.Index != 50 {
|
|
|
|
t.Fatalf("Bad index: %d %d", resp4.Index, 50)
|
2015-10-28 19:29:06 +00:00
|
|
|
}
|
|
|
|
if len(resp4.Nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", resp4.Nodes)
|
2015-10-28 18:21:39 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-22 02:51:34 +00:00
|
|
|
|
2016-08-19 20:13:51 +00:00
|
|
|
func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-19 20:13:51 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
tasks := []string{task.Name}
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &structs.DeriveVaultTokenRequest{
|
|
|
|
NodeID: node.ID,
|
2017-09-29 16:58:48 +00:00
|
|
|
SecretID: uuid.Generate(),
|
2016-08-19 20:13:51 +00:00
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: tasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.DeriveVaultTokenResponse
|
2016-10-23 01:08:30 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.Error == nil || !strings.Contains(resp.Error.Error(), "SecretID mismatch") {
|
|
|
|
t.Fatalf("Expected SecretID mismatch: %v", resp.Error)
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Put the correct SecretID
|
|
|
|
req.SecretID = node.SecretID
|
|
|
|
|
|
|
|
// Now we should get an error about the allocation not running on the node
|
2016-10-23 01:08:30 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Error == nil || !strings.Contains(resp.Error.Error(), "not running on Node") {
|
|
|
|
t.Fatalf("Expected not running on node error: %v", resp.Error)
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update to be running on the node
|
|
|
|
alloc.NodeID = node.ID
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc}); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we should get an error about the job not needing any Vault secrets
|
2016-10-23 01:08:30 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Error == nil || !strings.Contains(resp.Error.Error(), "does not require") {
|
|
|
|
t.Fatalf("Expected no policies error: %v", resp.Error)
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update to be terminal
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{alloc}); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we should get an error about the job not needing any Vault secrets
|
2016-10-23 01:08:30 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
if resp.Error == nil || !strings.Contains(resp.Error.Error(), "terminal") {
|
|
|
|
t.Fatalf("Expected terminal allocation error: %v", resp.Error)
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-08-19 20:13:51 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Enable vault and allow authenticated
|
2016-10-11 20:28:18 +00:00
|
|
|
tr := true
|
|
|
|
s1.config.VaultConfig.Enabled = &tr
|
|
|
|
s1.config.VaultConfig.AllowUnauthenticated = &tr
|
2016-08-19 20:13:51 +00:00
|
|
|
|
|
|
|
// Replace the Vault Client on the server
|
|
|
|
tvc := &TestVaultClient{}
|
|
|
|
s1.vault = tvc
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an alloc an allocation that has vault policies required
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
tasks := []string{task.Name}
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
|
2016-08-19 20:13:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a secret for the task
|
2017-09-29 16:58:48 +00:00
|
|
|
token := uuid.Generate()
|
|
|
|
accessor := uuid.Generate()
|
2016-08-19 20:13:51 +00:00
|
|
|
ttl := 10
|
|
|
|
secret := &vapi.Secret{
|
|
|
|
WrapInfo: &vapi.SecretWrapInfo{
|
|
|
|
Token: token,
|
|
|
|
WrappedAccessor: accessor,
|
|
|
|
TTL: ttl,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tvc.SetCreateTokenSecret(alloc.ID, task.Name, secret)
|
|
|
|
|
|
|
|
req := &structs.DeriveVaultTokenRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: tasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.DeriveVaultTokenResponse
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
if resp.Error != nil {
|
|
|
|
t.Fatalf("bad: %v", resp.Error)
|
|
|
|
}
|
2016-08-19 20:13:51 +00:00
|
|
|
|
|
|
|
// Check the state store and ensure that we created a VaultAccessor
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
va, err := state.VaultAccessor(ws, accessor)
|
2016-08-19 20:13:51 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
if va == nil {
|
|
|
|
t.Fatalf("bad: %v", va)
|
|
|
|
}
|
|
|
|
|
|
|
|
if va.CreateIndex == 0 {
|
|
|
|
t.Fatalf("bad: %v", va)
|
|
|
|
}
|
|
|
|
|
|
|
|
va.CreateIndex = 0
|
|
|
|
expected := &structs.VaultAccessor{
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Task: task.Name,
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
Accessor: accessor,
|
|
|
|
CreationTTL: ttl,
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expected, va) {
|
|
|
|
t.Fatalf("Got %#v; want %#v", va, expected)
|
|
|
|
}
|
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-10-23 01:08:30 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Enable vault and allow authenticated
|
|
|
|
tr := true
|
|
|
|
s1.config.VaultConfig.Enabled = &tr
|
|
|
|
s1.config.VaultConfig.AllowUnauthenticated = &tr
|
|
|
|
|
|
|
|
// Replace the Vault Client on the server
|
|
|
|
tvc := &TestVaultClient{}
|
|
|
|
s1.vault = tvc
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
|
2016-10-23 01:08:30 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an alloc an allocation that has vault policies required
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
tasks := []string{task.Name}
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
|
2020-10-19 13:30:15 +00:00
|
|
|
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
|
2016-10-23 01:08:30 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return an error when creating the token
|
|
|
|
tvc.SetCreateTokenError(alloc.ID, task.Name,
|
|
|
|
structs.NewRecoverableError(fmt.Errorf("recover"), true))
|
|
|
|
|
|
|
|
req := &structs.DeriveVaultTokenRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: tasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.DeriveVaultTokenResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
2017-03-27 23:27:24 +00:00
|
|
|
if resp.Error == nil || !resp.Error.IsRecoverable() {
|
2016-10-23 01:08:30 +00:00
|
|
|
t.Fatalf("bad: %+v", resp.Error)
|
|
|
|
}
|
|
|
|
}
|
2018-03-27 01:10:43 +00:00
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
func TestClientEndpoint_taskUsesConnect(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
try := func(t *testing.T, task *structs.Task, exp bool) {
|
|
|
|
result := taskUsesConnect(task)
|
|
|
|
require.Equal(t, exp, result)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("task uses connect", func(t *testing.T) {
|
|
|
|
try(t, &structs.Task{
|
2020-12-15 20:38:33 +00:00
|
|
|
// see nomad.newConnectSidecarTask for how this works
|
2019-12-06 20:46:46 +00:00
|
|
|
Name: "connect-proxy-myservice",
|
|
|
|
Kind: "connect-proxy:myservice",
|
|
|
|
}, true)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("task does not use connect", func(t *testing.T) {
|
|
|
|
try(t, &structs.Task{
|
|
|
|
Name: "mytask",
|
|
|
|
Kind: "incorrect:mytask",
|
|
|
|
}, false)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("task does not exist", func(t *testing.T) {
|
|
|
|
try(t, nil, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_tasksNotUsingConnect(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
taskGroup := &structs.TaskGroup{
|
|
|
|
Name: "testgroup",
|
|
|
|
Tasks: []*structs.Task{{
|
|
|
|
Name: "connect-proxy-service1",
|
2020-05-18 19:21:12 +00:00
|
|
|
Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
|
2019-12-06 20:46:46 +00:00
|
|
|
}, {
|
|
|
|
Name: "incorrect-task3",
|
|
|
|
Kind: "incorrect:task3",
|
|
|
|
}, {
|
|
|
|
Name: "connect-proxy-service4",
|
2020-05-18 19:21:12 +00:00
|
|
|
Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service4"),
|
2019-12-06 20:46:46 +00:00
|
|
|
}, {
|
|
|
|
Name: "incorrect-task5",
|
|
|
|
Kind: "incorrect:task5",
|
2020-05-18 19:21:12 +00:00
|
|
|
}, {
|
|
|
|
Name: "task6",
|
|
|
|
Kind: structs.NewTaskKind(structs.ConnectNativePrefix, "service6"),
|
2019-12-06 20:46:46 +00:00
|
|
|
}},
|
|
|
|
}
|
|
|
|
|
|
|
|
requestingTasks := []string{
|
|
|
|
"connect-proxy-service1", // yes
|
|
|
|
"task2", // does not exist
|
|
|
|
"task3", // no
|
|
|
|
"connect-proxy-service4", // yes
|
|
|
|
"task5", // no
|
2020-05-18 19:21:12 +00:00
|
|
|
"task6", // yes, native
|
|
|
|
}
|
|
|
|
|
|
|
|
notConnect, usingConnect := connectTasks(taskGroup, requestingTasks)
|
|
|
|
|
|
|
|
notConnectExp := []string{"task2", "task3", "task5"}
|
|
|
|
usingConnectExp := []connectTask{
|
|
|
|
{TaskName: "connect-proxy-service1", TaskKind: "connect-proxy:service1"},
|
|
|
|
{TaskName: "connect-proxy-service4", TaskKind: "connect-proxy:service4"},
|
|
|
|
{TaskName: "task6", TaskKind: "connect-native:service6"},
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
|
2020-05-18 19:21:12 +00:00
|
|
|
require.Equal(t, notConnectExp, notConnect)
|
|
|
|
require.Equal(t, usingConnectExp, usingConnect)
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func mutateConnectJob(t *testing.T, job *structs.Job) {
|
|
|
|
var jch jobConnectHook
|
|
|
|
_, warnings, err := jch.Mutate(job)
|
|
|
|
require.Empty(t, warnings)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_DeriveSIToken(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
r := require.New(t)
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil) // already sets consul mocks
|
|
|
|
defer cleanupS1()
|
|
|
|
|
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Set allow unauthenticated (no operator token required)
|
|
|
|
s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
|
2019-12-06 20:46:46 +00:00
|
|
|
r.NoError(err)
|
|
|
|
|
|
|
|
// Create an alloc with a typical connect service (sidecar) defined
|
|
|
|
alloc := mock.ConnectAlloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
mutateConnectJob(t, alloc.Job) // appends sidecar task
|
|
|
|
sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
|
2019-12-06 20:46:46 +00:00
|
|
|
r.NoError(err)
|
|
|
|
|
|
|
|
request := &structs.DeriveSITokenRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: []string{sidecarTask.Name},
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var response structs.DeriveSITokenResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
|
|
|
|
r.NoError(err)
|
|
|
|
r.Nil(response.Error)
|
|
|
|
|
|
|
|
// Check the state store and ensure we created a Consul SI Token Accessor
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
accessors, err := state.SITokenAccessorsByNode(ws, node.ID)
|
|
|
|
r.NoError(err)
|
|
|
|
r.Equal(1, len(accessors)) // only asked for one
|
|
|
|
r.Equal("connect-proxy-testconnect", accessors[0].TaskName) // set by the mock
|
|
|
|
r.Equal(node.ID, accessors[0].NodeID) // should match
|
|
|
|
r.Equal(alloc.ID, accessors[0].AllocID) // should match
|
|
|
|
r.True(helper.IsUUID(accessors[0].AccessorID)) // should be set
|
|
|
|
r.Greater(accessors[0].CreateIndex, uint64(3)) // more than 3rd
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
r := require.New(t)
|
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Set allow unauthenticated (no operator token required)
|
|
|
|
s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
|
|
|
|
|
|
|
|
// Create the node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
|
2019-12-06 20:46:46 +00:00
|
|
|
r.NoError(err)
|
|
|
|
|
|
|
|
// Create an alloc with a typical connect service (sidecar) defined
|
|
|
|
alloc := mock.ConnectAlloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
mutateConnectJob(t, alloc.Job) // appends sidecar task
|
|
|
|
sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
|
|
|
|
|
|
|
|
// rejigger the server to use a broken mock consul
|
|
|
|
mockACLsAPI := consul.NewMockACLsAPI(s1.logger)
|
|
|
|
mockACLsAPI.SetError(structs.NewRecoverableError(errors.New("consul recoverable error"), true))
|
2020-01-02 15:03:05 +00:00
|
|
|
m := NewConsulACLsAPI(mockACLsAPI, s1.logger, nil)
|
2019-12-06 20:46:46 +00:00
|
|
|
s1.consulACLs = m
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
|
2019-12-06 20:46:46 +00:00
|
|
|
r.NoError(err)
|
|
|
|
|
|
|
|
request := &structs.DeriveSITokenRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
SecretID: node.SecretID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: []string{sidecarTask.Name},
|
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var response structs.DeriveSITokenResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
|
|
|
|
r.NoError(err)
|
|
|
|
r.NotNil(response.Error) // error should be set
|
|
|
|
r.True(response.Error.IsRecoverable()) // and is recoverable
|
|
|
|
}
|
|
|
|
|
2018-03-27 01:10:43 +00:00
|
|
|
func TestClientEndpoint_EmitEvents(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-03-27 01:10:43 +00:00
|
|
|
state := s1.fsm.State()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// create a node that we can register our event to
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
|
2018-03-27 01:10:43 +00:00
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
nodeEvent := &structs.NodeEvent{
|
|
|
|
Message: "Registration failed",
|
|
|
|
Subsystem: "Server",
|
2018-03-27 17:50:09 +00:00
|
|
|
Timestamp: time.Now(),
|
2018-03-27 01:10:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nodeEvents := map[string][]*structs.NodeEvent{node.ID: {nodeEvent}}
|
|
|
|
req := structs.EmitNodeEventsRequest{
|
|
|
|
NodeEvents: nodeEvents,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp)
|
|
|
|
require.Nil(err)
|
2020-02-03 16:59:00 +00:00
|
|
|
require.NotEqual(uint64(0), resp.Index)
|
2018-03-27 01:10:43 +00:00
|
|
|
|
|
|
|
// Check for the node in the FSM
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, node.ID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.False(len(out.Events) < 2)
|
|
|
|
}
|
2021-08-10 21:17:44 +00:00
|
|
|
|
|
|
|
func TestClientEndpoint_ShouldCreateNodeEval(t *testing.T) {
|
|
|
|
t.Run("spurious changes don't require eval", func(t *testing.T) {
|
|
|
|
n1 := mock.Node()
|
|
|
|
n2 := n1.Copy()
|
|
|
|
n2.SecretID = uuid.Generate()
|
|
|
|
n2.Links["vault"] = "links don't get interpolated"
|
|
|
|
n2.ModifyIndex++
|
|
|
|
|
|
|
|
require.False(t, shouldCreateNodeEval(n1, n2))
|
|
|
|
})
|
|
|
|
|
|
|
|
positiveCases := []struct {
|
|
|
|
name string
|
|
|
|
updateFn func(n *structs.Node)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"data center changes",
|
|
|
|
func(n *structs.Node) { n.Datacenter += "u" },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"attribute change",
|
|
|
|
func(n *structs.Node) { n.Attributes["test.attribute"] = "something" },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"meta change",
|
|
|
|
func(n *structs.Node) { n.Meta["test.meta"] = "something" },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"drivers health changed",
|
|
|
|
func(n *structs.Node) { n.Drivers["exec"].Detected = false },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"new drivers",
|
|
|
|
func(n *structs.Node) {
|
|
|
|
n.Drivers["newdriver"] = &structs.DriverInfo{
|
|
|
|
Detected: true,
|
|
|
|
Healthy: true,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range positiveCases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
n1 := mock.Node()
|
|
|
|
n2 := n1.Copy()
|
|
|
|
c.updateFn(n2)
|
|
|
|
|
|
|
|
require.Truef(t, shouldCreateNodeEval(n1, n2), "node changed but without node eval: %v", pretty.Diff(n1, n2))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|