restored Node.Sanitize() for RPC endpoints
multiple other updates from code review
This commit is contained in:
parent
04081a983f
commit
770c9cecb5
|
@ -155,13 +155,12 @@ func TestEventStream_PayloadValue(t *testing.T) {
|
|||
require.NotEmpty(t, n.ID)
|
||||
|
||||
// perform a raw decoding and look for:
|
||||
// - "ID", to make sure that raw decoding is correct
|
||||
// - "SecretID", to make sure it's not present
|
||||
// - "ID" to make sure that raw decoding is working correctly
|
||||
// - "SecretID" to make sure it's not present
|
||||
raw := make(map[string]map[string]interface{}, 0)
|
||||
cfg := &mapstructure.DecoderConfig{
|
||||
Result: &raw,
|
||||
}
|
||||
|
||||
dec, err := mapstructure.NewDecoder(cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, dec.Decode(e.Payload))
|
||||
|
|
|
@ -202,7 +202,9 @@ func TestNodes_NoSecretID(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
})
|
||||
|
||||
// Query the node, ensure that .SecretID was not returned by the HTTP server
|
||||
// perform a raw http call and make sure that:
|
||||
// - "ID" to make sure that raw decoding is working correctly
|
||||
// - "SecretID" to make sure it's not present
|
||||
resp := make(map[string]interface{})
|
||||
_, err := c.query("/v1/node/"+nodeID, &resp, nil)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/hashicorp/nomad/command/agent/monitor"
|
||||
"github.com/hashicorp/nomad/command/agent/pprof"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
|
@ -124,7 +124,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) {
|
|||
frames := make(chan *sframer.StreamFrame, streamFramesBuffer)
|
||||
errCh := make(chan error)
|
||||
var buf bytes.Buffer
|
||||
frameCodec := codec.NewEncoder(&buf, handlers.JsonHandle)
|
||||
frameCodec := codec.NewEncoder(&buf, jsonhandles.JsonHandle)
|
||||
|
||||
framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024)
|
||||
framer.Run()
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
nstructs "github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
|
@ -281,7 +281,7 @@ func newExecStream(decoder *codec.Decoder, encoder *codec.Encoder) drivers.ExecT
|
|||
|
||||
buf: buf,
|
||||
encoder: encoder,
|
||||
frameCodec: codec.NewEncoder(buf, handlers.JsonHandle),
|
||||
frameCodec: codec.NewEncoder(buf, jsonhandles.JsonHandle),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
sframer "github.com/hashicorp/nomad/client/lib/streamframer"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
|
@ -239,7 +239,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) {
|
|||
frames := make(chan *sframer.StreamFrame, streamFramesBuffer)
|
||||
errCh := make(chan error)
|
||||
var buf bytes.Buffer
|
||||
frameCodec := codec.NewEncoder(&buf, handlers.JsonHandle)
|
||||
frameCodec := codec.NewEncoder(&buf, jsonhandles.JsonHandle)
|
||||
|
||||
// Create the framer
|
||||
framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
|
||||
|
@ -470,7 +470,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) {
|
|||
|
||||
var streamErr error
|
||||
buf := new(bytes.Buffer)
|
||||
frameCodec := codec.NewEncoder(buf, handlers.JsonHandle)
|
||||
frameCodec := codec.NewEncoder(buf, jsonhandles.JsonHandle)
|
||||
OUTER:
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/nomad/helper/noxssrw"
|
||||
"github.com/hashicorp/nomad/helper/tlsutil"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
|
@ -496,13 +496,13 @@ func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Reque
|
|||
if obj != nil {
|
||||
var buf bytes.Buffer
|
||||
if prettyPrint {
|
||||
enc := codec.NewEncoder(&buf, handlers.JsonHandlePretty)
|
||||
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandlePretty)
|
||||
err = enc.Encode(obj)
|
||||
if err == nil {
|
||||
buf.Write([]byte("\n"))
|
||||
}
|
||||
} else {
|
||||
enc := codec.NewEncoder(&buf, handlers.JsonHandleWithExtensions)
|
||||
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandleWithExtensions)
|
||||
err = enc.Encode(obj)
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
@ -30,8 +30,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
nomadjson "github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
)
|
||||
|
||||
// makeHTTPServer returns a test server whose logs will be written to
|
||||
|
@ -315,11 +314,11 @@ func testPrettyPrint(pretty string, prettyFmt bool, t *testing.T) {
|
|||
var expected bytes.Buffer
|
||||
var err error
|
||||
if prettyFmt {
|
||||
enc := codec.NewEncoder(&expected, nomadjson.JsonHandlePretty)
|
||||
enc := codec.NewEncoder(&expected, jsonhandles.JsonHandlePretty)
|
||||
err = enc.Encode(r)
|
||||
expected.WriteByte('\n')
|
||||
} else {
|
||||
enc := codec.NewEncoder(&expected, nomadjson.JsonHandleWithExtensions)
|
||||
enc := codec.NewEncoder(&expected, jsonhandles.JsonHandleWithExtensions)
|
||||
err = enc.Encode(r)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -1249,13 +1248,13 @@ func Test_decodeBody(t *testing.T) {
|
|||
// BenchmarkHTTPServer_JSONEncodingWithExtensions benchmarks the performance of
|
||||
// encoding JSON objects using extensions
|
||||
func BenchmarkHTTPServer_JSONEncodingWithExtensions(b *testing.B) {
|
||||
benchmarkJsonEncoding(b, handlers.JsonHandleWithExtensions)
|
||||
benchmarkJsonEncoding(b, jsonhandles.JsonHandleWithExtensions)
|
||||
}
|
||||
|
||||
// BenchmarkHTTPServer_JSONEncodingWithoutExtensions benchmarks the performance of
|
||||
// encoding JSON objects using extensions
|
||||
func BenchmarkHTTPServer_JSONEncodingWithoutExtensions(b *testing.B) {
|
||||
benchmarkJsonEncoding(b, handlers.JsonHandle)
|
||||
benchmarkJsonEncoding(b, jsonhandles.JsonHandle)
|
||||
}
|
||||
|
||||
func benchmarkJsonEncoding(b *testing.B, handle *codec.JsonHandle) {
|
||||
|
|
|
@ -62,9 +62,4 @@ a few ways that this can be accomplished:
|
|||
This conversion can be performed in-situ in the agent HTTP handler, as long as the conversion doesn't need to
|
||||
appear in other handlers.
|
||||
Otherwise, it is possible to register an extension on the JSON encoding used by the HTTP agent; these extensions
|
||||
can be put in `nomad/json/extensions.go`.
|
||||
|
||||
Note, for simple transformations to encoding (like renaming or neglecting fields), we can use field tags on the structs
|
||||
from `nomad/structs`. Our msgpack [encoding](http://ugorji.net/blog/go-codec-primer#drop-in-replacement-for-encoding-json-json-key-in-struct-tag-supported)
|
||||
only uses the [`codec` tag](https://github.com/hashicorp/nomad/blob/v1.0.0/nomad/structs/structs.go#L10557-L10558).
|
||||
Therefore, the `json` tag is available for customizing API output when encoding `structs` objects. See `structs.Node.SecretID`, for example.
|
||||
can be put in `nomad/json/extensions.go`.
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/hashicorp/nomad/plugins/shared/hclspec"
|
||||
)
|
||||
|
@ -122,7 +122,7 @@ func JsonConfigToInterface(t *testing.T, config string) interface{} {
|
|||
t.Helper()
|
||||
|
||||
// Decode from json
|
||||
dec := codec.NewDecoderBytes([]byte(config), handlers.JsonHandle)
|
||||
dec := codec.NewDecoderBytes([]byte(config), jsonhandles.JsonHandle)
|
||||
|
||||
var m map[string]interface{}
|
||||
err := dec.Decode(&m)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
hjson "github.com/hashicorp/hcl/v2/json"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
|
@ -28,7 +28,7 @@ func ParseHclInterface(val interface{}, spec hcldec.Spec, vars map[string]cty.Va
|
|||
|
||||
// Encode to json
|
||||
var buf bytes.Buffer
|
||||
enc := codec.NewEncoder(&buf, handlers.JsonHandle)
|
||||
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandle)
|
||||
err := enc.Encode(val)
|
||||
if err != nil {
|
||||
// Convert to a hcl diagnostics message
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/hashicorp/nomad/command/agent/monitor"
|
||||
"github.com/hashicorp/nomad/command/agent/pprof"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
|
@ -187,7 +187,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) {
|
|||
frames := make(chan *sframer.StreamFrame, 32)
|
||||
errCh := make(chan error)
|
||||
var buf bytes.Buffer
|
||||
frameCodec := codec.NewEncoder(&buf, handlers.JsonHandle)
|
||||
frameCodec := codec.NewEncoder(&buf, jsonhandles.JsonHandle)
|
||||
|
||||
framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024)
|
||||
framer.Run()
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// init register all extensions used by the API HTTP server when encoding JSON
|
||||
func init() {
|
||||
// TODO: this could be simplified by looking up the base type in the case of a pointer type
|
||||
registerExtension(reflect.TypeOf(structs.Node{}), nodeExt)
|
||||
registerExtension(reflect.TypeOf(&structs.Node{}), nodeExt)
|
||||
}
|
||||
|
||||
// nodeExt adds the legacy field .Drain back to encoded Node objects
|
||||
func nodeExt(v interface{}) interface{} {
|
||||
node := v.(*structs.Node)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
type NodeAlias structs.Node
|
||||
return &struct {
|
||||
*NodeAlias
|
||||
Drain bool
|
||||
}{
|
||||
NodeAlias: (*NodeAlias)(node),
|
||||
Drain: node.DrainStrategy != nil,
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package json
|
||||
package jsonhandles
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
@ -9,16 +9,6 @@ import (
|
|||
// extendFunc is a mapping from one struct to another, to change the shape of the encoded JSON
|
||||
type extendFunc func(interface{}) interface{}
|
||||
|
||||
var (
|
||||
// extendedTypes is a mapping of extended types to their extension function
|
||||
extendedTypes = map[reflect.Type]extendFunc{}
|
||||
)
|
||||
|
||||
// registerExtension registers an extension function against a particular type
|
||||
func registerExtension(tpe reflect.Type, ext extendFunc) {
|
||||
extendedTypes[tpe] = ext
|
||||
}
|
||||
|
||||
// nomadJsonEncodingExtensions is a catch-all go-msgpack extension
|
||||
// it looks up the types in the list of registered extension functions and applies it
|
||||
type nomadJsonEncodingExtensions struct{}
|
|
@ -0,0 +1,33 @@
|
|||
package jsonhandles
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
var (
|
||||
// extendedTypes is a mapping of extended types to their extension function
|
||||
// TODO: the duplicates could be simplified by looking up the base type in the case of a pointer type in ConvertExt
|
||||
extendedTypes = map[reflect.Type]extendFunc{
|
||||
reflect.TypeOf(structs.Node{}): nodeExt,
|
||||
reflect.TypeOf(&structs.Node{}): nodeExt,
|
||||
}
|
||||
)
|
||||
|
||||
// nodeExt ensures the node is sanitized and adds the legacy field .Drain back to encoded Node objects
|
||||
func nodeExt(v interface{}) interface{} {
|
||||
node := v.(*structs.Node).Sanitize()
|
||||
// transform to a struct with inlined Node fields plus the Drain field
|
||||
// - using defined type (not an alias!) EmbeddedNode gives us free conversion to a distinct type
|
||||
// - distinct type prevents this encoding extension from being called recursively/infinitely on the embedding
|
||||
// - pointers mean the conversion function doesn't have to make a copy during conversion
|
||||
type EmbeddedNode structs.Node
|
||||
return &struct {
|
||||
*EmbeddedNode
|
||||
Drain bool
|
||||
}{
|
||||
EmbeddedNode: (*EmbeddedNode)(node),
|
||||
Drain: node != nil && node.DrainStrategy != nil,
|
||||
}
|
||||
}
|
|
@ -1,9 +1,7 @@
|
|||
package handlers
|
||||
package jsonhandles
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/json"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -14,10 +12,10 @@ var (
|
|||
JsonHandle = &codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
}
|
||||
JsonHandleWithExtensions = json.NomadJsonEncodingExtensions(&codec.JsonHandle{
|
||||
JsonHandleWithExtensions = NomadJsonEncodingExtensions(&codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
})
|
||||
JsonHandlePretty = json.NomadJsonEncodingExtensions(&codec.JsonHandle{
|
||||
JsonHandlePretty = NomadJsonEncodingExtensions(&codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
Indent: 4,
|
||||
})
|
|
@ -801,6 +801,7 @@ func (n *Node) GetNode(args *structs.NodeSpecificRequest,
|
|||
|
||||
// Setup the output
|
||||
if out != nil {
|
||||
out = out.Sanitize()
|
||||
reply.Node = out
|
||||
reply.Index = out.ModifyIndex
|
||||
} else {
|
||||
|
|
|
@ -1314,6 +1314,7 @@ func TestClientEndpoint_GetNode(t *testing.T) {
|
|||
|
||||
// Update the status updated at value
|
||||
node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
|
||||
node.SecretID = ""
|
||||
node.Events = resp2.Node.Events
|
||||
if !reflect.DeepEqual(node, resp2.Node) {
|
||||
t.Fatalf("bad: %#v \n %#v", node, resp2.Node)
|
||||
|
|
|
@ -647,10 +647,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
|
|||
} else if node.Status != structs.NodeStatusReady {
|
||||
return false, "node is not ready for placements", nil
|
||||
} else if node.SchedulingEligibility == structs.NodeSchedulingIneligible {
|
||||
return false, "node is not eligible for draining", nil
|
||||
} else if node.DrainStrategy != nil {
|
||||
// Deprecate in favor of scheduling eligibility and remove post-0.8
|
||||
return false, "node is draining", nil
|
||||
return false, "node is not eligible", nil
|
||||
}
|
||||
|
||||
// Get the existing allocations that are non-terminal
|
||||
|
|
|
@ -80,6 +80,7 @@ func eventFromChange(change memdb.Change) (structs.Event, bool) {
|
|||
return structs.Event{}, false
|
||||
}
|
||||
|
||||
before = before.Sanitize()
|
||||
return structs.Event{
|
||||
Topic: structs.TopicNode,
|
||||
Key: before.ID,
|
||||
|
@ -175,6 +176,7 @@ func eventFromChange(change memdb.Change) (structs.Event, bool) {
|
|||
return structs.Event{}, false
|
||||
}
|
||||
|
||||
after = after.Sanitize()
|
||||
return structs.Event{
|
||||
Topic: structs.TopicNode,
|
||||
Key: after.ID,
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/json/handlers"
|
||||
"github.com/hashicorp/nomad/nomad/jsonhandles"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (n *JsonStream) Send(v interface{}) error {
|
|||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
enc := codec.NewEncoder(&buf, handlers.JsonHandleWithExtensions)
|
||||
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandleWithExtensions)
|
||||
err := enc.Encode(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling json for stream: %w", err)
|
||||
|
|
|
@ -1907,6 +1907,20 @@ type Node struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// Sanitize returns a copy of the Node omitting confidential fields
|
||||
// It only returns a copy if the Node contains the confidential fields
|
||||
func (n *Node) Sanitize() *Node {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
if n.SecretID == "" {
|
||||
return n
|
||||
}
|
||||
clean := n.Copy()
|
||||
clean.SecretID = ""
|
||||
return clean
|
||||
}
|
||||
|
||||
// Ready returns true if the node is ready for running allocations
|
||||
func (n *Node) Ready() bool {
|
||||
return n.Status == NodeStatusReady && n.DrainStrategy == nil && n.SchedulingEligibility == NodeSchedulingEligible
|
||||
|
@ -1917,6 +1931,16 @@ func (n *Node) Canonicalize() {
|
|||
return
|
||||
}
|
||||
|
||||
// Ensure SchedulingEligibility is set whenever draining so the plan applier and other scheduling logic only need
|
||||
// to check SchedulingEligibility when determining whether a placement is feasible on a node.
|
||||
if n.SchedulingEligibility == "" {
|
||||
if n.DrainStrategy != nil {
|
||||
n.SchedulingEligibility = NodeSchedulingIneligible
|
||||
} else {
|
||||
n.SchedulingEligibility = NodeSchedulingEligible
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT remove in 1.0
|
||||
// In v0.12.0 we introduced a separate node specific network resource struct
|
||||
// so we need to covert any pre 0.12 clients to the correct struct
|
||||
|
@ -1940,14 +1964,6 @@ func (n *Node) Canonicalize() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n.SchedulingEligibility == "" {
|
||||
if n.DrainStrategy != nil {
|
||||
n.SchedulingEligibility = NodeSchedulingIneligible
|
||||
} else {
|
||||
n.SchedulingEligibility = NodeSchedulingEligible
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) Copy() *Node {
|
||||
|
|
|
@ -5601,6 +5601,31 @@ func TestNode_Copy(t *testing.T) {
|
|||
require.Equal(node.Drivers, node2.Drivers)
|
||||
}
|
||||
|
||||
func TestNode_Sanitize(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testCases := []*Node{
|
||||
nil,
|
||||
{
|
||||
ID: uuid.Generate(),
|
||||
SecretID: "",
|
||||
},
|
||||
{
|
||||
ID: uuid.Generate(),
|
||||
SecretID: uuid.Generate(),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
sanitized := tc.Sanitize()
|
||||
if tc == nil {
|
||||
require.Nil(sanitized)
|
||||
} else {
|
||||
require.NotNil(sanitized)
|
||||
require.Empty(sanitized.SecretID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpread_Validate(t *testing.T) {
|
||||
type tc struct {
|
||||
spread *Spread
|
||||
|
|
|
@ -252,13 +252,7 @@ func readyNodesInDCs(state State, dcs []string) ([]*structs.Node, map[string]int
|
|||
|
||||
// Filter on datacenter and status
|
||||
node := raw.(*structs.Node)
|
||||
if node.Status != structs.NodeStatusReady {
|
||||
continue
|
||||
}
|
||||
if node.DrainStrategy != nil {
|
||||
continue
|
||||
}
|
||||
if node.SchedulingEligibility != structs.NodeSchedulingEligible {
|
||||
if !node.Ready() {
|
||||
continue
|
||||
}
|
||||
if _, ok := dcMap[node.Datacenter]; !ok {
|
||||
|
|
Loading…
Reference in New Issue