2015-09-11 23:51:18 +00:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
2018-03-28 21:01:54 +00:00
|
|
|
"context"
|
2015-09-11 23:51:18 +00:00
|
|
|
"fmt"
|
2015-09-17 19:40:51 +00:00
|
|
|
"reflect"
|
|
|
|
"sort"
|
2015-09-11 23:51:18 +00:00
|
|
|
"strings"
|
|
|
|
"testing"
|
2016-07-27 20:44:30 +00:00
|
|
|
"time"
|
2015-09-11 23:51:18 +00:00
|
|
|
|
2019-03-29 18:47:40 +00:00
|
|
|
"github.com/hashicorp/nomad/api/internal/testutil"
|
2018-02-07 02:51:34 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-09-11 23:51:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestNodes_List(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-12 04:28:21 +00:00
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
2015-09-11 23:51:18 +00:00
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
|
|
|
var qm *QueryMeta
|
2015-09-14 02:55:47 +00:00
|
|
|
var out []*NodeListStub
|
2015-09-11 23:51:18 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, qm, err = nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check that we got valid QueryMeta.
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
}
|
|
|
|
|
2015-12-24 10:46:59 +00:00
|
|
|
func TestNodes_PrefixList(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-12-24 10:46:59 +00:00
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
|
|
|
var qm *QueryMeta
|
|
|
|
var out []*NodeListStub
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Get the node ID
|
2016-05-27 10:38:07 +00:00
|
|
|
var nodeID string
|
2015-12-24 10:46:59 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Find node based on four character prefix
|
2016-01-06 21:46:57 +00:00
|
|
|
out, qm, err = nodes.PrefixList(nodeID[:4])
|
|
|
|
if err != nil {
|
2015-12-24 10:46:59 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
2016-01-06 21:46:57 +00:00
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
t.Fatalf("expected 1 node, got: %d ", n)
|
|
|
|
}
|
2015-12-24 10:46:59 +00:00
|
|
|
|
|
|
|
// Check that we got valid QueryMeta.
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
}
|
|
|
|
|
2020-10-09 05:21:41 +00:00
|
|
|
// TestNodes_List_Resources asserts that ?resources=true includes allocated and
|
|
|
|
// reserved resources in the response.
|
|
|
|
func TestNodes_List_Resources(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
|
|
|
var out []*NodeListStub
|
|
|
|
var err error
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err = nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// By default resources should *not* be included
|
|
|
|
require.Nil(t, out[0].NodeResources)
|
|
|
|
require.Nil(t, out[0].ReservedResources)
|
|
|
|
|
|
|
|
qo := &QueryOptions{
|
|
|
|
Params: map[string]string{"resources": "true"},
|
|
|
|
}
|
|
|
|
out, _, err = nodes.List(qo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, out[0].NodeResources)
|
|
|
|
require.NotNil(t, out[0].ReservedResources)
|
|
|
|
}
|
|
|
|
|
2015-09-11 23:51:18 +00:00
|
|
|
func TestNodes_Info(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2016-07-27 20:44:30 +00:00
|
|
|
startTime := time.Now().Unix()
|
2015-09-12 04:28:21 +00:00
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
2015-09-11 23:51:18 +00:00
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Retrieving a nonexistent node returns error
|
2016-01-14 20:57:43 +00:00
|
|
|
_, _, err := nodes.Info("12345678-abcd-efab-cdef-123456789abc", nil)
|
2015-09-11 23:51:18 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the node ID
|
|
|
|
var nodeID, dc string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
dc = out[0].Datacenter
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Querying for existing nodes returns properly
|
|
|
|
result, qm, err := nodes.Info(nodeID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
|
|
|
// Check that the result is what we expect
|
|
|
|
if result.ID != nodeID || result.Datacenter != dc {
|
|
|
|
t.Fatalf("expected %s (%s), got: %s (%s)",
|
|
|
|
nodeID, dc,
|
|
|
|
result.ID, result.Datacenter)
|
|
|
|
}
|
2016-07-27 20:44:30 +00:00
|
|
|
|
|
|
|
// Check that the StatusUpdatedAt field is being populated correctly
|
|
|
|
if result.StatusUpdatedAt < startTime {
|
|
|
|
t.Fatalf("start time: %v, status updated: %v", startTime, result.StatusUpdatedAt)
|
|
|
|
}
|
2018-03-08 14:34:08 +00:00
|
|
|
|
2018-03-14 00:59:37 +00:00
|
|
|
if len(result.Events) < 1 {
|
2018-03-08 14:34:08 +00:00
|
|
|
t.Fatalf("Expected at minimum the node register event to be populated: %+v", result)
|
|
|
|
}
|
2015-09-11 23:51:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestNodes_ToggleDrain(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2018-03-20 19:11:08 +00:00
|
|
|
require := require.New(t)
|
2015-09-12 04:28:21 +00:00
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
2015-09-11 23:51:18 +00:00
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
|
|
|
// Wait for node registration and get the ID
|
|
|
|
var nodeID string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check for drain mode
|
|
|
|
out, _, err := nodes.Info(nodeID, nil)
|
2018-03-20 19:11:08 +00:00
|
|
|
require.Nil(err)
|
2015-09-11 23:51:18 +00:00
|
|
|
if out.Drain {
|
|
|
|
t.Fatalf("drain mode should be off")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Toggle it on
|
2018-02-23 23:56:36 +00:00
|
|
|
spec := &DrainSpec{
|
|
|
|
Deadline: 10 * time.Second,
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
drainOut, err := nodes.UpdateDrain(nodeID, spec, false, nil)
|
2018-03-20 19:11:08 +00:00
|
|
|
require.Nil(err)
|
2018-03-27 22:53:24 +00:00
|
|
|
assertWriteMeta(t, &drainOut.WriteMeta)
|
2015-09-11 23:51:18 +00:00
|
|
|
|
|
|
|
// Check again
|
|
|
|
out, _, err = nodes.Info(nodeID, nil)
|
2018-03-20 19:11:08 +00:00
|
|
|
require.Nil(err)
|
2019-01-18 17:48:15 +00:00
|
|
|
if out.SchedulingEligibility != NodeSchedulingIneligible {
|
|
|
|
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
|
2015-09-11 23:51:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Toggle off again
|
2018-03-27 22:53:24 +00:00
|
|
|
drainOut, err = nodes.UpdateDrain(nodeID, nil, true, nil)
|
2018-03-20 19:11:08 +00:00
|
|
|
require.Nil(err)
|
2018-03-27 22:53:24 +00:00
|
|
|
assertWriteMeta(t, &drainOut.WriteMeta)
|
2015-09-11 23:51:18 +00:00
|
|
|
|
|
|
|
// Check again
|
|
|
|
out, _, err = nodes.Info(nodeID, nil)
|
2018-03-20 19:11:08 +00:00
|
|
|
require.Nil(err)
|
2015-09-11 23:51:18 +00:00
|
|
|
if out.Drain {
|
|
|
|
t.Fatalf("drain mode should be off")
|
|
|
|
}
|
2018-02-23 23:56:36 +00:00
|
|
|
if out.DrainStrategy != nil {
|
|
|
|
t.Fatalf("drain strategy should be unset")
|
|
|
|
}
|
2019-01-18 17:48:15 +00:00
|
|
|
if out.SchedulingEligibility != NodeSchedulingEligible {
|
2018-03-08 19:06:30 +00:00
|
|
|
t.Fatalf("should be eligible")
|
|
|
|
}
|
2015-09-11 23:51:18 +00:00
|
|
|
}
|
|
|
|
|
2018-02-27 20:59:27 +00:00
|
|
|
func TestNodes_ToggleEligibility(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
|
|
|
// Wait for node registration and get the ID
|
|
|
|
var nodeID string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check for eligibility
|
|
|
|
out, _, err := nodes.Info(nodeID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-01-18 17:48:15 +00:00
|
|
|
if out.SchedulingEligibility != NodeSchedulingEligible {
|
2018-02-27 20:59:27 +00:00
|
|
|
t.Fatalf("node should be eligible")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Toggle it off
|
2018-03-27 22:53:24 +00:00
|
|
|
eligOut, err := nodes.ToggleEligibility(nodeID, false, nil)
|
2018-02-27 20:59:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
assertWriteMeta(t, &eligOut.WriteMeta)
|
2018-02-27 20:59:27 +00:00
|
|
|
|
|
|
|
// Check again
|
|
|
|
out, _, err = nodes.Info(nodeID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-01-18 17:48:15 +00:00
|
|
|
if out.SchedulingEligibility != NodeSchedulingIneligible {
|
|
|
|
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
|
2018-02-27 20:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Toggle on
|
2018-03-27 22:53:24 +00:00
|
|
|
eligOut, err = nodes.ToggleEligibility(nodeID, true, nil)
|
2018-02-27 20:59:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
assertWriteMeta(t, &eligOut.WriteMeta)
|
2018-02-27 20:59:27 +00:00
|
|
|
|
|
|
|
// Check again
|
|
|
|
out, _, err = nodes.Info(nodeID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-01-18 17:48:15 +00:00
|
|
|
if out.SchedulingEligibility != NodeSchedulingEligible {
|
|
|
|
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingEligible)
|
2018-02-27 20:59:27 +00:00
|
|
|
}
|
2018-02-23 23:56:36 +00:00
|
|
|
if out.DrainStrategy != nil {
|
|
|
|
t.Fatalf("drain strategy should be unset")
|
|
|
|
}
|
2018-02-27 20:59:27 +00:00
|
|
|
}
|
|
|
|
|
2015-09-11 23:51:18 +00:00
|
|
|
func TestNodes_Allocations(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-11 23:51:18 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Looking up by a nonexistent node returns nothing. We
|
2015-09-11 23:51:18 +00:00
|
|
|
// don't check the index here because it's possible the node
|
|
|
|
// has already registered, in which case we will get a non-
|
|
|
|
// zero result anyways.
|
|
|
|
allocs, _, err := nodes.Allocations("nope", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if n := len(allocs); n != 0 {
|
|
|
|
t.Fatalf("expected 0 allocs, got: %d", n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNodes_ForceEvaluate(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-12 04:28:21 +00:00
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
2015-09-11 23:51:18 +00:00
|
|
|
defer s.Stop()
|
|
|
|
nodes := c.Nodes()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Force-eval on a nonexistent node fails
|
2016-01-14 20:57:43 +00:00
|
|
|
_, _, err := nodes.ForceEvaluate("12345678-abcd-efab-cdef-123456789abc", nil)
|
2015-09-11 23:51:18 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for node registration and get the ID
|
|
|
|
var nodeID string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := nodes.List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try force-eval again. We don't check the WriteMeta because
|
|
|
|
// there are no allocations to process, so we would get an index
|
|
|
|
// of zero. Same goes for the eval ID.
|
|
|
|
_, _, err = nodes.ForceEvaluate(nodeID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2015-09-17 19:40:51 +00:00
|
|
|
|
|
|
|
func TestNodes_Sort(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-17 19:40:51 +00:00
|
|
|
nodes := []*NodeListStub{
|
2017-09-26 22:26:33 +00:00
|
|
|
{CreateIndex: 2},
|
|
|
|
{CreateIndex: 1},
|
|
|
|
{CreateIndex: 5},
|
2015-09-17 19:40:51 +00:00
|
|
|
}
|
|
|
|
sort.Sort(NodeIndexSort(nodes))
|
|
|
|
|
|
|
|
expect := []*NodeListStub{
|
2017-09-26 22:26:33 +00:00
|
|
|
{CreateIndex: 5},
|
|
|
|
{CreateIndex: 2},
|
|
|
|
{CreateIndex: 1},
|
2015-09-17 19:40:51 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(nodes, expect) {
|
|
|
|
t.Fatalf("\n\n%#v\n\n%#v", nodes, expect)
|
|
|
|
}
|
|
|
|
}
|
2018-02-07 02:51:34 +00:00
|
|
|
|
2018-03-28 21:01:54 +00:00
|
|
|
// Unittest monitorDrainMultiplex when an error occurs
|
|
|
|
func TestNodes_MonitorDrain_Multiplex_Bad(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
multiplexCtx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
// monitorDrainMultiplex doesn't require anything on *Nodes, so we
|
|
|
|
// don't need to use a full Client
|
|
|
|
var nodeClient *Nodes
|
|
|
|
|
2018-03-30 18:07:40 +00:00
|
|
|
outCh := make(chan *MonitorMessage, 8)
|
|
|
|
nodeCh := make(chan *MonitorMessage, 1)
|
|
|
|
allocCh := make(chan *MonitorMessage, 8)
|
2018-03-28 21:01:54 +00:00
|
|
|
exitedCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(exitedCh)
|
2018-03-30 18:07:40 +00:00
|
|
|
nodeClient.monitorDrainMultiplex(ctx, cancel, outCh, nodeCh, allocCh)
|
2018-03-28 21:01:54 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Fake an alloc update
|
2018-03-30 18:07:40 +00:00
|
|
|
msg := Messagef(0, "alloc update")
|
2018-03-28 21:01:54 +00:00
|
|
|
allocCh <- msg
|
|
|
|
require.Equal(msg, <-outCh)
|
|
|
|
|
|
|
|
// Fake a node update
|
2018-03-30 18:07:40 +00:00
|
|
|
msg = Messagef(0, "node update")
|
2018-03-28 21:01:54 +00:00
|
|
|
nodeCh <- msg
|
|
|
|
require.Equal(msg, <-outCh)
|
|
|
|
|
|
|
|
// Fake an error that should shut everything down
|
2018-03-30 18:07:40 +00:00
|
|
|
msg = Messagef(MonitorMsgLevelError, "fake error")
|
|
|
|
nodeCh <- msg
|
2018-03-28 21:01:54 +00:00
|
|
|
require.Equal(msg, <-outCh)
|
|
|
|
|
|
|
|
_, ok := <-exitedCh
|
|
|
|
require.False(ok)
|
|
|
|
|
|
|
|
_, ok = <-outCh
|
|
|
|
require.False(ok)
|
|
|
|
|
|
|
|
// Exiting should also cancel the context that would be passed to the
|
|
|
|
// node & alloc watchers
|
|
|
|
select {
|
|
|
|
case <-multiplexCtx.Done():
|
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
t.Fatalf("context wasn't canceled")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unittest monitorDrainMultiplex when drain finishes
|
|
|
|
func TestNodes_MonitorDrain_Multiplex_Good(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
multiplexCtx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
// monitorDrainMultiplex doesn't require anything on *Nodes, so we
|
|
|
|
// don't need to use a full Client
|
|
|
|
var nodeClient *Nodes
|
|
|
|
|
2018-03-30 18:07:40 +00:00
|
|
|
outCh := make(chan *MonitorMessage, 8)
|
|
|
|
nodeCh := make(chan *MonitorMessage, 1)
|
|
|
|
allocCh := make(chan *MonitorMessage, 8)
|
2018-03-28 21:01:54 +00:00
|
|
|
exitedCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(exitedCh)
|
2018-03-30 18:07:40 +00:00
|
|
|
nodeClient.monitorDrainMultiplex(ctx, cancel, outCh, nodeCh, allocCh)
|
2018-03-28 21:01:54 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Fake a node updating and finishing
|
2018-03-30 18:07:40 +00:00
|
|
|
msg := Messagef(MonitorMsgLevelInfo, "node update")
|
2018-03-28 21:01:54 +00:00
|
|
|
nodeCh <- msg
|
|
|
|
close(nodeCh)
|
|
|
|
require.Equal(msg, <-outCh)
|
|
|
|
|
|
|
|
// Nothing else should have exited yet
|
|
|
|
select {
|
|
|
|
case msg, ok := <-outCh:
|
|
|
|
if ok {
|
|
|
|
t.Fatalf("unexpected output: %q", msg)
|
|
|
|
}
|
|
|
|
t.Fatalf("out channel closed unexpectedly")
|
|
|
|
case <-exitedCh:
|
|
|
|
t.Fatalf("multiplexer exited unexpectedly")
|
|
|
|
case <-multiplexCtx.Done():
|
|
|
|
t.Fatalf("multiplexer context canceled unexpectedly")
|
|
|
|
case <-time.After(10 * time.Millisecond):
|
|
|
|
t.Logf("multiplexer still running as expected")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fake an alloc update coming in after the node monitor has finished
|
2018-03-30 18:07:40 +00:00
|
|
|
msg = Messagef(0, "alloc update")
|
2018-03-28 21:01:54 +00:00
|
|
|
allocCh <- msg
|
|
|
|
require.Equal(msg, <-outCh)
|
|
|
|
|
|
|
|
// Closing the allocCh should cause everything to exit
|
|
|
|
close(allocCh)
|
|
|
|
|
|
|
|
_, ok := <-exitedCh
|
|
|
|
require.False(ok)
|
|
|
|
|
|
|
|
_, ok = <-outCh
|
|
|
|
require.False(ok)
|
|
|
|
|
|
|
|
// Exiting should also cancel the context that would be passed to the
|
|
|
|
// node & alloc watchers
|
|
|
|
select {
|
|
|
|
case <-multiplexCtx.Done():
|
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
t.Fatalf("context wasn't canceled")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNodes_DrainStrategy_Equal(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// nil
|
|
|
|
var d *DrainStrategy
|
|
|
|
require.True(d.Equal(nil))
|
|
|
|
|
|
|
|
o := &DrainStrategy{}
|
|
|
|
require.False(d.Equal(o))
|
|
|
|
require.False(o.Equal(d))
|
|
|
|
|
|
|
|
d = &DrainStrategy{}
|
|
|
|
require.True(d.Equal(o))
|
|
|
|
|
|
|
|
// ForceDeadline
|
|
|
|
d.ForceDeadline = time.Now()
|
|
|
|
require.False(d.Equal(o))
|
|
|
|
|
|
|
|
o.ForceDeadline = d.ForceDeadline
|
|
|
|
require.True(d.Equal(o))
|
|
|
|
|
|
|
|
// Deadline
|
|
|
|
d.Deadline = 1
|
|
|
|
require.False(d.Equal(o))
|
|
|
|
|
|
|
|
o.Deadline = 1
|
|
|
|
require.True(d.Equal(o))
|
|
|
|
|
|
|
|
// IgnoreSystemJobs
|
|
|
|
d.IgnoreSystemJobs = true
|
|
|
|
require.False(d.Equal(o))
|
|
|
|
|
|
|
|
o.IgnoreSystemJobs = true
|
|
|
|
require.True(d.Equal(o))
|
|
|
|
}
|
2018-11-14 16:51:50 +00:00
|
|
|
|
2020-08-14 07:40:03 +00:00
|
|
|
func TestNodes_Purge(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
|
|
|
c.DevMode = true
|
|
|
|
})
|
|
|
|
defer s.Stop()
|
|
|
|
|
|
|
|
// Purge on a nonexistent node fails.
|
|
|
|
_, _, err := c.Nodes().Purge("12345678-abcd-efab-cdef-123456789abc", nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for node registration and get the ID so we can attempt to purge a
|
|
|
|
// node that exists.
|
|
|
|
var nodeID string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out, _, err := c.Nodes().List(nil)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if n := len(out); n != 1 {
|
|
|
|
return false, fmt.Errorf("expected 1 node, got: %d", n)
|
|
|
|
}
|
|
|
|
nodeID = out[0].ID
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Perform the node purge and check the response objects.
|
|
|
|
out, meta, err := c.Nodes().Purge(nodeID, nil)
|
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(out)
|
|
|
|
|
|
|
|
// We can't use assertQueryMeta here, as the RPC response does not populate
|
|
|
|
// the known leader field.
|
|
|
|
require.Greater(meta.LastIndex, uint64(0))
|
|
|
|
}
|
|
|
|
|
2018-11-14 16:51:50 +00:00
|
|
|
func TestNodeStatValueFormatting(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
expected string
|
|
|
|
value StatValue
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"true",
|
2019-01-18 18:28:35 +00:00
|
|
|
StatValue{BoolVal: boolToPtr(true)},
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"false",
|
2019-01-18 18:28:35 +00:00
|
|
|
StatValue{BoolVal: boolToPtr(false)},
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"myvalue",
|
2019-01-18 18:28:35 +00:00
|
|
|
StatValue{StringVal: stringToPtr("myvalue")},
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"2.718",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
FloatNumeratorVal: float64ToPtr(2.718),
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2.718 / 3.14",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
FloatNumeratorVal: float64ToPtr(2.718),
|
|
|
|
FloatDenominatorVal: float64ToPtr(3.14),
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2.718 MHz",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
FloatNumeratorVal: float64ToPtr(2.718),
|
2018-11-14 16:51:50 +00:00
|
|
|
Unit: "MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2.718 / 3.14 MHz",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
FloatNumeratorVal: float64ToPtr(2.718),
|
|
|
|
FloatDenominatorVal: float64ToPtr(3.14),
|
2018-11-14 16:51:50 +00:00
|
|
|
Unit: "MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
IntNumeratorVal: int64ToPtr(2),
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2 / 3",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
IntNumeratorVal: int64ToPtr(2),
|
|
|
|
IntDenominatorVal: int64ToPtr(3),
|
2018-11-14 16:51:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2 MHz",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
IntNumeratorVal: int64ToPtr(2),
|
2018-11-14 16:51:50 +00:00
|
|
|
Unit: "MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"2 / 3 MHz",
|
|
|
|
StatValue{
|
2019-01-18 18:28:35 +00:00
|
|
|
IntNumeratorVal: int64ToPtr(2),
|
|
|
|
IntDenominatorVal: int64ToPtr(3),
|
2018-11-14 16:51:50 +00:00
|
|
|
Unit: "MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, c := range cases {
|
|
|
|
t.Run(fmt.Sprintf("case %d %v", i, c.expected), func(t *testing.T) {
|
|
|
|
formatted := c.value.String()
|
|
|
|
require.Equal(t, c.expected, formatted)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|