2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2018-03-07 22:57:35 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2018-03-07 22:57:35 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2023-05-01 21:18:34 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
|
|
|
"github.com/shoenig/test/must"
|
|
|
|
"github.com/shoenig/test/wait"
|
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2022-08-17 16:26:34 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2018-04-25 20:21:36 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2018-05-11 00:22:06 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/drainer"
|
2018-03-07 22:57:35 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
|
|
|
)
|
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// allocClientStateSimulator simulates the updates in state from the
|
2023-08-14 20:17:25 +00:00
|
|
|
// client. service allocations that are new on the server get marked with
|
|
|
|
// healthy deployments, and service allocations that are DesiredStatus=stop on
|
|
|
|
// the server get updates with terminal client status.
|
2023-04-11 18:31:13 +00:00
|
|
|
func allocClientStateSimulator(t *testing.T, errCh chan<- error, ctx context.Context,
|
|
|
|
srv *Server, nodeID string, logger log.Logger) {
|
|
|
|
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
store := srv.State()
|
2018-03-07 22:57:35 +00:00
|
|
|
|
|
|
|
nindex := uint64(1)
|
|
|
|
for {
|
2023-04-11 18:31:13 +00:00
|
|
|
allocs, index, err := getNodeAllocs(ctx, store, nodeID, nindex)
|
2018-03-07 22:57:35 +00:00
|
|
|
if err != nil {
|
|
|
|
if err == context.Canceled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh <- fmt.Errorf("failed to get node allocs: %v", err)
|
|
|
|
return
|
2018-03-07 22:57:35 +00:00
|
|
|
}
|
|
|
|
nindex = index
|
|
|
|
|
|
|
|
// For each alloc that doesn't have its deployment status set, set it
|
|
|
|
var updates []*structs.Allocation
|
2018-04-10 22:29:54 +00:00
|
|
|
now := time.Now()
|
2018-03-07 22:57:35 +00:00
|
|
|
for _, alloc := range allocs {
|
2018-03-28 18:57:47 +00:00
|
|
|
if alloc.Job.Type != structs.JobTypeService {
|
2018-03-07 22:57:35 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-04-13 12:55:28 +00:00
|
|
|
switch alloc.DesiredStatus {
|
|
|
|
case structs.AllocDesiredStatusRun:
|
|
|
|
if alloc.DeploymentStatus.HasHealth() {
|
|
|
|
continue // only update to healthy once
|
|
|
|
}
|
|
|
|
newAlloc := alloc.Copy()
|
|
|
|
newAlloc.DeploymentStatus = &structs.AllocDeploymentStatus{
|
|
|
|
Healthy: pointer.Of(true),
|
|
|
|
Timestamp: now,
|
|
|
|
}
|
|
|
|
updates = append(updates, newAlloc)
|
|
|
|
logger.Trace("marking deployment health for alloc", "alloc_id", alloc.ID)
|
|
|
|
|
|
|
|
case structs.AllocDesiredStatusStop, structs.AllocDesiredStatusEvict:
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusComplete {
|
|
|
|
continue // only update to complete once
|
|
|
|
}
|
|
|
|
newAlloc := alloc.Copy()
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
updates = append(updates, newAlloc)
|
|
|
|
logger.Trace("marking alloc complete", "alloc_id", alloc.ID)
|
2018-03-07 22:57:35 +00:00
|
|
|
}
|
2023-04-13 12:55:28 +00:00
|
|
|
|
2018-03-07 22:57:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(updates) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the update
|
|
|
|
req := &structs.AllocUpdateRequest{
|
|
|
|
Alloc: updates,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2018-03-28 18:57:47 +00:00
|
|
|
var resp structs.GenericResponse
|
2018-03-19 22:19:57 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", req, &resp); err != nil {
|
|
|
|
if ctx.Err() == context.Canceled {
|
|
|
|
return
|
2018-03-28 18:57:47 +00:00
|
|
|
} else if err != nil {
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh <- err
|
2018-03-19 22:19:57 +00:00
|
|
|
}
|
|
|
|
}
|
2018-03-07 22:57:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-02 23:40:47 +00:00
|
|
|
// checkAllocPromoter is a small helper to return an error or nil from an error
|
2023-04-11 18:31:13 +00:00
|
|
|
// chan like the one given to the allocClientStateSimulator goroutine.
|
2018-04-02 23:40:47 +00:00
|
|
|
func checkAllocPromoter(errCh chan error) error {
|
|
|
|
select {
|
|
|
|
case err := <-errCh:
|
|
|
|
return err
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
func getNodeAllocs(ctx context.Context, store *state.StateStore, nodeID string, index uint64) ([]*structs.Allocation, uint64, error) {
|
|
|
|
resp, index, err := store.BlockingQuery(getNodeAllocsImpl(nodeID), index, ctx)
|
2018-03-07 22:57:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
if err := ctx.Err(); err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp.([]*structs.Allocation), index, nil
|
|
|
|
}
|
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
func getNodeAllocsImpl(nodeID string) func(ws memdb.WatchSet, store *state.StateStore) (interface{}, uint64, error) {
|
|
|
|
return func(ws memdb.WatchSet, store *state.StateStore) (interface{}, uint64, error) {
|
2018-03-07 22:57:35 +00:00
|
|
|
// Capture all the allocations
|
2023-04-11 18:31:13 +00:00
|
|
|
allocs, err := store.AllocsByNode(ws, nodeID)
|
2018-03-07 22:57:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the last index that affected the jobs table
|
2023-04-11 18:31:13 +00:00
|
|
|
index, err := store.Index("allocs")
|
2018-03-07 22:57:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, index, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return allocs, index, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDrainer_Simple_ServiceOnly(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
2018-03-07 22:57:35 +00:00
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a job that runs on that node
|
2018-03-07 22:57:35 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-07 22:57:35 +00:00
|
|
|
|
|
|
|
// Wait for the two allocations to be placed
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 2)
|
2018-03-07 22:57:35 +00:00
|
|
|
|
|
|
|
// Create the second node
|
2023-04-11 18:31:13 +00:00
|
|
|
n2 := mock.Node()
|
2018-03-07 22:57:35 +00:00
|
|
|
nodeReg = &structs.NodeRegisterRequest{
|
|
|
|
Node: n2,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-07 22:57:35 +00:00
|
|
|
|
|
|
|
// Drain the first node
|
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh := make(chan error, 2)
|
2018-03-07 22:57:35 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n2.ID, srv.logger)
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the allocs to be replaced
|
|
|
|
waitForAllocsStop(t, store, n1.ID, nil)
|
|
|
|
waitForPlacedAllocs(t, store, n2.ID, 2)
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, errCh, 3, "")
|
2018-03-07 22:57:35 +00:00
|
|
|
}
|
2018-03-07 23:16:45 +00:00
|
|
|
|
2018-03-12 20:44:33 +00:00
|
|
|
func TestDrainer_Simple_ServiceOnly_Deadline(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-12 20:44:33 +00:00
|
|
|
|
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-12 20:44:33 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a job that runs on it
|
2018-03-12 20:44:33 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Update = *structs.DefaultUpdateStrategy
|
|
|
|
job.Update.Stagger = 30 * time.Second
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-12 20:44:33 +00:00
|
|
|
|
|
|
|
// Wait for the two allocations to be placed
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 2)
|
2018-03-12 20:44:33 +00:00
|
|
|
|
|
|
|
// Drain the node
|
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 1 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-12 20:44:33 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the allocs to be stopped (but not replaced)
|
|
|
|
waitForAllocsStop(t, store, n1.ID, nil)
|
|
|
|
|
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, nil, 3, drainer.NodeDrainEventDetailDeadlined)
|
2018-03-12 20:44:33 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 23:16:45 +00:00
|
|
|
func TestDrainer_DrainEmptyNode(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-07 23:16:45 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create an empty node
|
2018-03-07 23:16:45 +00:00
|
|
|
n1 := mock.Node()
|
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-07 23:16:45 +00:00
|
|
|
|
|
|
|
// Drain the node
|
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-07 23:16:45 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, nil, 3, "")
|
2018-03-07 23:16:45 +00:00
|
|
|
}
|
2018-03-28 18:57:47 +00:00
|
|
|
|
|
|
|
func TestDrainer_AllTypes_Deadline(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
2018-03-28 18:57:47 +00:00
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a service job that runs on it
|
2018-03-28 18:57:47 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
|
|
|
// Create a system job
|
|
|
|
sysjob := mock.SystemJob()
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: sysjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
|
|
|
// Create a batch job
|
|
|
|
bjob := mock.BatchJob()
|
|
|
|
bjob.TaskGroups[0].Count = 2
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: bjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for all the allocations to be placed
|
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 5)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a second node
|
|
|
|
n2 := mock.Node()
|
2018-03-28 18:57:47 +00:00
|
|
|
nodeReg = &structs.NodeRegisterRequest{
|
|
|
|
Node: n2,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Drain the first node
|
2018-03-28 18:57:47 +00:00
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 2 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh := make(chan error, 2)
|
2018-03-28 18:57:47 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n2.ID, srv.logger)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for allocs to be replaced
|
|
|
|
finalAllocs := waitForAllocsStop(t, store, n1.ID, nil)
|
|
|
|
waitForPlacedAllocs(t, store, n2.ID, 5)
|
2018-03-28 18:57:47 +00:00
|
|
|
|
|
|
|
// Assert that the service finished before the batch and system
|
|
|
|
var serviceMax, batchMax uint64 = 0, 0
|
|
|
|
for _, alloc := range finalAllocs {
|
|
|
|
if alloc.Job.Type == structs.JobTypeService && alloc.ModifyIndex > serviceMax {
|
|
|
|
serviceMax = alloc.ModifyIndex
|
|
|
|
} else if alloc.Job.Type == structs.JobTypeBatch && alloc.ModifyIndex > batchMax {
|
|
|
|
batchMax = alloc.ModifyIndex
|
|
|
|
}
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.Less(t, batchMax, serviceMax)
|
|
|
|
|
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, nil, 3, drainer.NodeDrainEventDetailDeadlined)
|
2018-03-28 18:57:47 +00:00
|
|
|
}
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
// Test that drain is unset when batch jobs naturally finish
|
|
|
|
func TestDrainer_AllTypes_NoDeadline(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
// Create two nodes, registering the second later
|
2023-04-11 18:31:13 +00:00
|
|
|
n1 := mock.Node()
|
2018-03-29 20:51:04 +00:00
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a service job
|
2018-03-29 20:51:04 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
// Create a system job
|
|
|
|
sysjob := mock.SystemJob()
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: sysjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
// Create a batch job
|
|
|
|
bjob := mock.BatchJob()
|
|
|
|
bjob.TaskGroups[0].Count = 2
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: bjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for all the allocations to be placed
|
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 5)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a second node
|
|
|
|
n2 := mock.Node()
|
2018-03-29 20:51:04 +00:00
|
|
|
nodeReg = &structs.NodeRegisterRequest{
|
|
|
|
Node: n2,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Drain the first node
|
2018-03-29 20:51:04 +00:00
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 0 * time.Second, // Infinite
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh := make(chan error, 2)
|
2018-03-29 20:51:04 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n2.ID, srv.logger)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the service allocs (only) to be stopped on the draining node
|
|
|
|
must.Wait(t, wait.InitialSuccess(wait.ErrorFunc(func() error {
|
|
|
|
allocs, err := store.AllocsByJob(nil, job.Namespace, job.ID, false)
|
|
|
|
must.NoError(t, err)
|
2018-03-29 20:51:04 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
if alloc.NodeID != n1.ID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if alloc.DesiredStatus != structs.AllocDesiredStatusStop {
|
2023-04-11 18:31:13 +00:00
|
|
|
return fmt.Errorf("got desired status %v", alloc.DesiredStatus)
|
2018-03-29 20:51:04 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
return checkAllocPromoter(errCh)
|
|
|
|
}),
|
|
|
|
wait.Timeout(10*time.Second),
|
|
|
|
wait.Gap(100*time.Millisecond),
|
|
|
|
))
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
// Mark the batch allocations as finished
|
2023-04-11 18:31:13 +00:00
|
|
|
allocs, err := store.AllocsByJob(nil, job.Namespace, bjob.ID, false)
|
|
|
|
must.NoError(t, err)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
|
|
|
var updates []*structs.Allocation
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
new := alloc.Copy()
|
|
|
|
new.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
updates = append(updates, new)
|
|
|
|
}
|
2023-08-14 20:17:25 +00:00
|
|
|
|
|
|
|
batchDoneReq := &structs.AllocUpdateRequest{
|
|
|
|
Alloc: updates,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", batchDoneReq, &resp)
|
|
|
|
must.NoError(t, err)
|
2018-03-29 20:51:04 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the service allocations to be replaced
|
|
|
|
waitForPlacedAllocs(t, store, n2.ID, 3)
|
|
|
|
|
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, errCh, 3, "")
|
2018-04-25 20:21:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-04-25 20:21:36 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
2018-04-25 20:21:36 +00:00
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-04-25 20:21:36 +00:00
|
|
|
|
|
|
|
// Create a service job that runs on just one
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-04-25 20:21:36 +00:00
|
|
|
job.CreateIndex = resp.JobModifyIndex
|
|
|
|
|
|
|
|
// Create a system job
|
|
|
|
sysjob := mock.SystemJob()
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: sysjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-04-25 20:21:36 +00:00
|
|
|
sysjob.CreateIndex = resp.JobModifyIndex
|
|
|
|
|
|
|
|
// Create a batch job
|
|
|
|
bjob := mock.BatchJob()
|
|
|
|
bjob.TaskGroups[0].Count = 2
|
|
|
|
req = &structs.JobRegisterRequest{
|
|
|
|
Job: bjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-04-25 20:21:36 +00:00
|
|
|
bjob.CreateIndex = resp.JobModifyIndex
|
|
|
|
|
|
|
|
// Wait for the allocations to be placed
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 5)
|
2018-04-25 20:21:36 +00:00
|
|
|
|
|
|
|
// Create some old terminal allocs for each job that point at a non-existent
|
|
|
|
// node to simulate it being on a GC'd node.
|
|
|
|
var badAllocs []*structs.Allocation
|
|
|
|
for _, job := range []*structs.Job{job, sysjob, bjob} {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Namespace = job.Namespace
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = uuid.Generate()
|
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
badAllocs = append(badAllocs, alloc)
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1, badAllocs))
|
2018-04-25 20:21:36 +00:00
|
|
|
|
|
|
|
// Create the second node
|
2023-04-11 18:31:13 +00:00
|
|
|
n2 := mock.Node()
|
2018-04-25 20:21:36 +00:00
|
|
|
nodeReg = &structs.NodeRegisterRequest{
|
|
|
|
Node: n2,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-04-25 20:21:36 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Drain the first node
|
2018-04-25 20:21:36 +00:00
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 2 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-04-25 20:21:36 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2018-04-25 20:21:36 +00:00
|
|
|
errCh := make(chan error, 2)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n2.ID, srv.logger)
|
2018-04-25 20:21:36 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the allocs to be replaced
|
|
|
|
waitForAllocsStop(t, store, n1.ID, errCh)
|
|
|
|
waitForPlacedAllocs(t, store, n2.ID, 5)
|
2018-04-25 20:21:36 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, errCh, 3, drainer.NodeDrainEventDetailDeadlined)
|
2018-03-29 20:51:04 +00:00
|
|
|
}
|
2018-03-30 16:33:23 +00:00
|
|
|
|
2021-04-21 16:11:14 +00:00
|
|
|
// TestDrainer_MultipleNSes_ServiceOnly asserts that all jobs on an alloc, even
|
|
|
|
// when they belong to different namespaces and share the same ID
|
|
|
|
func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
2021-04-21 16:11:14 +00:00
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
nsrv, ns2 := mock.Namespace(), mock.Namespace()
|
|
|
|
nses := []*structs.Namespace{nsrv, ns2}
|
2021-04-21 16:11:14 +00:00
|
|
|
nsReg := &structs.NamespaceUpsertRequest{
|
|
|
|
Namespaces: nses,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nsResp structs.GenericResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Namespace.UpsertNamespaces", nsReg, &nsResp))
|
2021-04-21 16:11:14 +00:00
|
|
|
|
|
|
|
for _, ns := range nses {
|
2023-04-11 18:31:13 +00:00
|
|
|
// Create a job for each namespace
|
2021-04-21 16:11:14 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.ID = "example"
|
|
|
|
job.Name = "example"
|
|
|
|
job.Namespace = ns.Name
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the response
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2021-04-21 16:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the two allocations to be placed
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 2)
|
2021-04-21 16:11:14 +00:00
|
|
|
|
|
|
|
// Create the second node
|
2023-04-11 18:31:13 +00:00
|
|
|
n2 := mock.Node()
|
2021-04-21 16:11:14 +00:00
|
|
|
nodeReg = &structs.NodeRegisterRequest{
|
|
|
|
Node: n2,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2021-04-21 16:11:14 +00:00
|
|
|
|
|
|
|
// Drain the first node
|
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: 10 * time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2021-04-21 16:11:14 +00:00
|
|
|
errCh := make(chan error, 2)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n2.ID, srv.logger)
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the allocs to be replaced
|
|
|
|
waitForAllocsStop(t, store, n1.ID, errCh)
|
|
|
|
waitForPlacedAllocs(t, store, n2.ID, 2)
|
2021-04-21 16:11:14 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, errCh, 3, "")
|
2021-04-21 16:11:14 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 22:02:52 +00:00
|
|
|
// Test that transitions to force drain work.
|
2018-04-02 23:40:47 +00:00
|
|
|
func TestDrainer_Batch_TransitionToForce(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-03-30 16:33:23 +00:00
|
|
|
|
2018-03-30 18:24:39 +00:00
|
|
|
for _, inf := range []bool{true, false} {
|
|
|
|
name := "Infinite"
|
|
|
|
if !inf {
|
|
|
|
name = "Deadline"
|
|
|
|
}
|
|
|
|
t.Run(name, func(t *testing.T) {
|
2023-04-11 18:31:13 +00:00
|
|
|
srv, cleanupSrv := TestServer(t, nil)
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
store := srv.State()
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Create a node
|
|
|
|
n1 := mock.Node()
|
|
|
|
nodeReg := &structs.NodeRegisterRequest{
|
|
|
|
Node: n1,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var nodeResp structs.NodeUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", nodeReg, &nodeResp))
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Create a batch job
|
|
|
|
bjob := mock.BatchJob()
|
|
|
|
bjob.TaskGroups[0].Count = 2
|
|
|
|
req := &structs.JobRegisterRequest{
|
|
|
|
Job: bjob,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: bjob.Namespace,
|
|
|
|
},
|
2018-03-30 16:33:23 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 18:24:39 +00:00
|
|
|
// Fetch the response
|
|
|
|
var resp structs.JobRegisterResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
|
|
|
|
must.Positive(t, resp.Index)
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Wait for the allocations to be placed
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForPlacedAllocs(t, store, n1.ID, 2)
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Pick the deadline
|
|
|
|
deadline := 0 * time.Second
|
|
|
|
if !inf {
|
|
|
|
deadline = 10 * time.Second
|
2018-03-30 16:33:23 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 18:24:39 +00:00
|
|
|
// Drain the node
|
|
|
|
drainReq := &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: deadline,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var drainResp structs.NodeDrainUpdateResponse
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-30 18:24:39 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Setup client simulator
|
2018-04-02 23:40:47 +00:00
|
|
|
errCh := make(chan error, 1)
|
2018-03-30 18:24:39 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2023-04-11 18:31:13 +00:00
|
|
|
go allocClientStateSimulator(t, errCh, ctx, srv, n1.ID, srv.logger)
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Make sure the batch job isn't affected
|
2023-04-11 18:31:13 +00:00
|
|
|
must.Wait(t, wait.ContinualSuccess(wait.ErrorFunc(func() error {
|
2018-04-02 23:40:47 +00:00
|
|
|
if err := checkAllocPromoter(errCh); err != nil {
|
2023-04-11 18:31:13 +00:00
|
|
|
return fmt.Errorf("check alloc promoter error: %v", err)
|
2018-04-02 23:40:47 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
allocs, err := store.AllocsByNode(nil, n1.ID)
|
|
|
|
must.NoError(t, err)
|
2018-03-30 18:24:39 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
if alloc.DesiredStatus != structs.AllocDesiredStatusRun {
|
2023-04-11 18:31:13 +00:00
|
|
|
return fmt.Errorf("got status %v", alloc.DesiredStatus)
|
2018-03-30 18:24:39 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
if len(allocs) != 2 {
|
|
|
|
return fmt.Errorf("expected 2 allocs but got %d", len(allocs))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}),
|
|
|
|
wait.Timeout(500*time.Millisecond),
|
|
|
|
wait.Gap(50*time.Millisecond),
|
|
|
|
))
|
2018-03-30 18:24:39 +00:00
|
|
|
|
2023-04-11 18:31:13 +00:00
|
|
|
// Force drain the node
|
2018-03-30 18:24:39 +00:00
|
|
|
drainReq = &structs.NodeUpdateDrainRequest{
|
|
|
|
NodeID: n1.ID,
|
|
|
|
DrainStrategy: &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: -1 * time.Second, // Infinite
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
must.NoError(t, msgpackrpc.CallWithCodec(
|
|
|
|
codec, "Node.UpdateDrain", drainReq, &drainResp))
|
2018-03-30 18:24:39 +00:00
|
|
|
|
|
|
|
// Make sure the batch job is migrated
|
2023-04-11 18:31:13 +00:00
|
|
|
waitForAllocsStop(t, store, n1.ID, errCh)
|
|
|
|
|
|
|
|
// Wait for the node drain to be marked complete with the events we expect
|
|
|
|
waitForNodeDrainComplete(t, store, n1.ID, errCh, 4,
|
|
|
|
drainer.NodeDrainEventDetailDeadlined)
|
|
|
|
|
2018-03-30 18:24:39 +00:00
|
|
|
})
|
|
|
|
}
|
2018-03-30 16:33:23 +00:00
|
|
|
}
|
2023-04-11 18:31:13 +00:00
|
|
|
|
|
|
|
// waitForNodeDrainComplete is a test helper that verifies the node drain has
|
|
|
|
// been removed and that the expected Node events have been written
|
|
|
|
func waitForNodeDrainComplete(t *testing.T, store *state.StateStore, nodeID string,
|
|
|
|
errCh chan error, expectEvents int, expectDetail string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
var node *structs.Node
|
|
|
|
|
|
|
|
must.Wait(t, wait.InitialSuccess(wait.ErrorFunc(func() error {
|
|
|
|
if err := checkAllocPromoter(errCh); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
node, _ = store.NodeByID(nil, nodeID)
|
|
|
|
if node.DrainStrategy != nil {
|
|
|
|
return fmt.Errorf("has drain strategy still set")
|
|
|
|
}
|
|
|
|
// sometimes test gets a duplicate node drain complete event
|
|
|
|
if len(node.Events) < expectEvents {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"did not get enough events (expected %d): %v", expectEvents, node.Events)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}),
|
|
|
|
wait.Timeout(10*time.Second),
|
|
|
|
wait.Gap(50*time.Millisecond),
|
|
|
|
))
|
|
|
|
|
|
|
|
must.Eq(t, drainer.NodeDrainEventComplete, node.Events[expectEvents-1].Message)
|
|
|
|
if expectDetail != "" {
|
|
|
|
must.MapContainsKey(t, node.Events[expectEvents-1].Details, expectDetail,
|
|
|
|
must.Sprintf("%#v", node.Events[expectEvents-1].Details),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForPlacedAllocs(t *testing.T, store *state.StateStore, nodeID string, count int) {
|
|
|
|
t.Helper()
|
|
|
|
must.Wait(t, wait.InitialSuccess(
|
|
|
|
wait.BoolFunc(func() bool {
|
|
|
|
allocs, err := store.AllocsByNode(nil, nodeID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
return len(allocs) == count
|
|
|
|
}),
|
|
|
|
wait.Timeout(10*time.Second),
|
|
|
|
wait.Gap(50*time.Millisecond),
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForAllocsStop waits for all allocs on the node to be stopped
|
|
|
|
func waitForAllocsStop(t *testing.T, store *state.StateStore, nodeID string, errCh chan error) []*structs.Allocation {
|
|
|
|
t.Helper()
|
|
|
|
var finalAllocs []*structs.Allocation
|
|
|
|
must.Wait(t, wait.InitialSuccess(
|
|
|
|
wait.ErrorFunc(func() error {
|
|
|
|
if err := checkAllocPromoter(errCh); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
finalAllocs, err = store.AllocsByNode(nil, nodeID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
for _, alloc := range finalAllocs {
|
|
|
|
if alloc.DesiredStatus != structs.AllocDesiredStatusStop {
|
|
|
|
return fmt.Errorf("expected stop but got %s", alloc.DesiredStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}),
|
|
|
|
wait.Timeout(10*time.Second),
|
|
|
|
wait.Gap(50*time.Millisecond),
|
|
|
|
))
|
|
|
|
|
|
|
|
return finalAllocs
|
|
|
|
}
|