scheduler: move proposed alloc logic to Context

This commit is contained in:
Armon Dadgar 2015-08-16 10:28:58 -07:00
parent 0dff5a77a2
commit f4e9ef8d1e
3 changed files with 129 additions and 19 deletions

View File

@ -22,6 +22,11 @@ type Context interface {
// Reset is invoked after making a placement
Reset()
// ProposedAllocs returns the proposed allocations for a node
// which is the existing allocations, removing evictions, and
// adding any planned placements.
ProposedAllocs(nodeID string) ([]*structs.Allocation, error)
}
// EvalContext is a Context used during an Evaluation
@ -66,3 +71,25 @@ func (e *EvalContext) SetState(s State) {
func (e *EvalContext) Reset() {
e.metrics = new(structs.AllocMetric)
}
func (e *EvalContext) ProposedAllocs(nodeID string) ([]*structs.Allocation, error) {
// Get the existing allocations
existingAlloc, err := e.state.AllocsByNode(nodeID)
if err != nil {
return nil, err
}
// Determine the proposed allocation by first removing allocations
// that are planned evictions and adding the new allocations.
proposed := existingAlloc
if evict := e.plan.NodeEvict[nodeID]; len(evict) > 0 {
proposed = structs.RemoveAllocs(existingAlloc, evict)
}
proposed = append(proposed, e.plan.NodeAllocation[nodeID]...)
// Ensure the return is not nil
if proposed == nil {
proposed = make([]*structs.Allocation, 0)
}
return proposed, nil
}

View File

@ -5,6 +5,7 @@ import (
"os"
"testing"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -24,3 +25,84 @@ func testContext(t *testing.T) (*state.StateStore, *EvalContext) {
ctx := NewEvalContext(state, plan, logger)
return state, ctx
}
func TestEvalContext_ProposedAlloc(t *testing.T) {
state, ctx := testContext(t)
nodes := []*RankedNode{
&RankedNode{
Node: &structs.Node{
// Perfect fit
ID: mock.GenerateUUID(),
Resources: &structs.Resources{
CPU: 2048,
MemoryMB: 2048,
},
},
},
&RankedNode{
Node: &structs.Node{
// Perfect fit
ID: mock.GenerateUUID(),
Resources: &structs.Resources{
CPU: 2048,
MemoryMB: 2048,
},
},
},
}
// Add existing allocations
alloc1 := &structs.Allocation{
ID: mock.GenerateUUID(),
EvalID: mock.GenerateUUID(),
NodeID: nodes[0].Node.ID,
JobID: mock.GenerateUUID(),
Resources: &structs.Resources{
CPU: 2048,
MemoryMB: 2048,
},
Status: structs.AllocStatusPending,
}
alloc2 := &structs.Allocation{
ID: mock.GenerateUUID(),
EvalID: mock.GenerateUUID(),
NodeID: nodes[1].Node.ID,
JobID: mock.GenerateUUID(),
Resources: &structs.Resources{
CPU: 1024,
MemoryMB: 1024,
},
Status: structs.AllocStatusPending,
}
noErr(t, state.UpdateAllocations(1000, nil, []*structs.Allocation{alloc1, alloc2}))
// Add a planned eviction to alloc1
plan := ctx.Plan()
plan.NodeEvict[nodes[0].Node.ID] = []string{alloc1.ID}
// Add a planned placement to node1
plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
&structs.Allocation{
Resources: &structs.Resources{
CPU: 1024,
MemoryMB: 1024,
},
},
}
proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(proposed) != 0 {
t.Fatalf("bad: %#v", proposed)
}
proposed, err = ctx.ProposedAllocs(nodes[1].Node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(proposed) != 2 {
t.Fatalf("bad: %#v", proposed)
}
}

View File

@ -12,6 +12,10 @@ import (
type RankedNode struct {
Node *structs.Node
Score float64
// Allocs is used to cache the proposed allocations on the
// node. This can be shared between iterators that require it.
Proposed []*structs.Allocation
}
func (r *RankedNode) GoString() string {
@ -134,9 +138,6 @@ func (iter *BinPackIterator) SetPriority(p int) {
}
func (iter *BinPackIterator) Next() *RankedNode {
ctx := iter.ctx
state := ctx.State()
plan := ctx.Plan()
for {
// Get the next potential option
option := iter.source.Next()
@ -145,22 +146,21 @@ func (iter *BinPackIterator) Next() *RankedNode {
}
nodeID := option.Node.ID
// Get the existing allocations
existingAlloc, err := state.AllocsByNode(nodeID)
if err != nil {
iter.ctx.Logger().Printf("[ERR] sched.binpack: failed to get allocations for '%s': %v",
nodeID, err)
continue
// Get the proposed allocations
var proposed []*structs.Allocation
if option.Proposed != nil {
proposed = option.Proposed
} else {
p, err := iter.ctx.ProposedAllocs(nodeID)
if err != nil {
iter.ctx.Logger().Printf("[ERR] sched.binpack: failed to get proposed allocations for '%s': %v",
nodeID, err)
continue
}
proposed = p
option.Proposed = p
}
// Determine the proposed allocation by first removing allocations
// that are planned evictions and adding the new allocations.
proposed := existingAlloc
if evict := plan.NodeEvict[nodeID]; len(evict) > 0 {
proposed = structs.RemoveAllocs(existingAlloc, evict)
}
proposed = append(proposed, plan.NodeAllocation[nodeID]...)
// Add the resources we are trying to fit
proposed = append(proposed, &structs.Allocation{Resources: iter.resources})
@ -177,8 +177,9 @@ func (iter *BinPackIterator) Next() *RankedNode {
// carefully.
// Score the fit normally otherwise
option.Score = structs.ScoreFit(option.Node, util)
iter.ctx.Metrics().ScoreNode(option.Node, "binpack", option.Score)
fitness := structs.ScoreFit(option.Node, util)
option.Score += fitness
iter.ctx.Metrics().ScoreNode(option.Node, "binpack", fitness)
return option
}
}