scheduler: working on bin pack

This commit is contained in:
Armon Dadgar 2015-08-13 11:54:59 -07:00
parent 861a5e2097
commit df21ab3d10
8 changed files with 181 additions and 98 deletions

View File

@ -171,5 +171,6 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
proposed = append(proposed, plan.NodeAllocation[nodeID]...)
// Check if these allocations fit
return structs.AllocsFit(node, proposed)
fit, _, err := structs.AllocsFit(node, proposed)
return fit, err
}

View File

@ -1,5 +1,7 @@
package structs
import "math"
// RemoveAllocs is used to remove any allocs with the given IDs
// from the list of allocations
func RemoveAllocs(alloc []*Allocation, remove []string) []*Allocation {
@ -39,7 +41,7 @@ func PortsOvercommited(r *Resources) bool {
}
// AllocsFit checks if a given set of allocations will fit on a node
func AllocsFit(node *Node, allocs []*Allocation) (bool, error) {
func AllocsFit(node *Node, allocs []*Allocation) (bool, *Resources, error) {
// Compute the utilization from zero
used := new(Resources)
for _, net := range node.Resources.Networks {
@ -52,28 +54,65 @@ func AllocsFit(node *Node, allocs []*Allocation) (bool, error) {
// Add the reserved resources of the node
if node.Reserved != nil {
if err := used.Add(node.Reserved); err != nil {
return false, err
return false, nil, err
}
}
// For each alloc, add the resources
for _, alloc := range allocs {
if err := used.Add(alloc.Resources); err != nil {
return false, err
return false, nil, err
}
}
// Check that the node resources are a super set of those
// that are being allocated
if !node.Resources.Superset(used) {
return false, nil
return false, used, nil
}
// Ensure ports are not over commited
if PortsOvercommited(used) {
return false, nil
return false, used, nil
}
// Allocations fit!
return true, nil
return true, used, nil
}
// ScoreFit is used to score the fit based on the Google work published here:
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
// This is equivalent to their BestFit v3
func ScoreFit(node *Node, util *Resources) float64 {
// Determine the node availability
nodeCpu := node.Resources.CPU
if node.Reserved != nil {
nodeCpu -= node.Reserved.CPU
}
nodeMem := float64(node.Resources.MemoryMB)
if node.Reserved != nil {
nodeMem -= float64(node.Reserved.MemoryMB)
}
// Compute the free percentage
freePctCpu := 1 - (util.CPU / nodeCpu)
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
// Total will be "maximized" the smaller the value is.
// At 100% utilization, the total is 2, while at 0% util it is 20.
total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
// Invert so that the "maximized" total represents a high-value
// score. Because the floor is 20, we simply use that as an anchor.
// This means at a perfect fit, we return 18 as the score.
score := 20.0 - total
// Bound the score, just in case
// If the score is over 18, that means we've overfit the node.
if score > 18.0 {
score = 18.0
} else if score < 0 {
score = 0
}
return score
}

View File

@ -87,7 +87,7 @@ func TestAllocsFit(t *testing.T) {
}
// Should fit one allocation
fit, err := AllocsFit(n, []*Allocation{a1})
fit, used, err := AllocsFit(n, []*Allocation{a1})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -95,12 +95,71 @@ func TestAllocsFit(t *testing.T) {
t.Fatalf("Bad")
}
// Sanity check the used resources
if used.CPU != 2.0 {
t.Fatalf("bad: %#v", used)
}
if used.MemoryMB != 2048 {
t.Fatalf("bad: %#v", used)
}
// Should not fit second allocation
fit, err = AllocsFit(n, []*Allocation{a1, a1})
fit, used, err = AllocsFit(n, []*Allocation{a1, a1})
if err != nil {
t.Fatalf("err: %v", err)
}
if fit {
t.Fatalf("Bad")
}
// Sanity check the used resources
if used.CPU != 3.0 {
t.Fatalf("bad: %#v", used)
}
if used.MemoryMB != 3072 {
t.Fatalf("bad: %#v", used)
}
}
func TestScoreFit(t *testing.T) {
node := &Node{}
node.Resources = &Resources{
CPU: 4096,
MemoryMB: 8192,
}
node.Reserved = &Resources{
CPU: 2048,
MemoryMB: 4096,
}
// Test a perfect fit
util := &Resources{
CPU: 2048,
MemoryMB: 4096,
}
score := ScoreFit(node, util)
if score != 18.0 {
t.Fatalf("bad: %v", score)
}
// Test the worst fit
util = &Resources{
CPU: 0,
MemoryMB: 0,
}
score = ScoreFit(node, util)
if score != 0.0 {
t.Fatalf("bad: %v", score)
}
// Test a mid-case scenario
util = &Resources{
CPU: 1024,
MemoryMB: 2048,
}
score = ScoreFit(node, util)
if score < 10.0 || score > 16.0 {
t.Fatalf("bad: %v", score)
}
}

View File

@ -1,19 +1,37 @@
package scheduler
import (
"log"
"github.com/hashicorp/nomad/nomad/structs"
)
// Context is used to track contextual information used for placement
type Context interface {
// State is used to inspect the current global state
State() State
// Plan returns the current plan
Plan() *structs.Plan
// Logger provides a way to log
Logger() *log.Logger
}
// EvalContext is a Context used during an Evaluation
type EvalContext struct {
state State
plan *structs.Plan
logger *log.Logger
}
// NewEvalContext constructs a new EvalContext
func NewEvalContext(s State) *EvalContext {
ctx := &EvalContext{}
func NewEvalContext(s State, p *structs.Plan, log *log.Logger) *EvalContext {
ctx := &EvalContext{
state: s,
plan: p,
logger: log,
}
return ctx
}
@ -21,6 +39,14 @@ func (e *EvalContext) State() State {
return e.state
}
func (e *EvalContext) Plan() *structs.Plan {
return e.plan
}
func (e *EvalContext) Logger() *log.Logger {
return e.logger
}
func (e *EvalContext) SetState(s State) {
e.state = s
}

View File

@ -1,10 +1,12 @@
package scheduler
import (
"log"
"os"
"testing"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
func testContext(t *testing.T) (*state.StateStore, *EvalContext) {
@ -12,7 +14,10 @@ func testContext(t *testing.T) (*state.StateStore, *EvalContext) {
if err != nil {
t.Fatalf("err: %v", err)
}
plan := new(structs.Plan)
ctx := NewEvalContext(state)
logger := log.New(os.Stderr, "", log.LstdFlags)
ctx := NewEvalContext(state, plan, logger)
return state, ctx
}

View File

@ -1,10 +1,6 @@
package scheduler
import (
"math"
"github.com/hashicorp/nomad/nomad/structs"
)
import "github.com/hashicorp/nomad/nomad/structs"
// Rank is used to provide a score and various ranking metadata
// along with a node when iterating. This state can be modified as
@ -102,50 +98,46 @@ func NewBinPackIterator(ctx Context, source RankIterator, resources *structs.Res
}
func (iter *BinPackIterator) Next() *RankedNode {
ctx := iter.ctx
state := ctx.State()
plan := ctx.Plan()
for {
// Get the next potential option
option := iter.source.Next()
if option == nil {
return nil
}
nodeID := option.Node.ID
// TODO: Evaluate the bin packing
// Get the existing allocations
existingAlloc, err := state.AllocsByNode(nodeID)
if err != nil {
iter.ctx.Logger().Printf("[ERR] sched.binpack: failed to get allocations for '%s': %v",
nodeID, err)
continue
}
// Determine the proposed allocation by first removing allocations
// that are planned evictions and adding the new allocations.
proposed := existingAlloc
if evict := plan.NodeEvict[nodeID]; len(evict) > 0 {
proposed = structs.RemoveAllocs(existingAlloc, evict)
}
proposed = append(proposed, plan.NodeAllocation[nodeID]...)
// Add the resources we are trying to fit
proposed = append(proposed, &structs.Allocation{Resources: iter.resources})
// Check if these allocations fit, use a negative score
// to indicate an impossible choice
fit, util, _ := structs.AllocsFit(option.Node, proposed)
if !fit {
option.Score = -1
return option
}
// Score the fit normally otherwise
option.Score = structs.ScoreFit(option.Node, util)
return option
}
}
// scoreFit is used to score the fit based on the Google work published here:
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
// This is equivalent to their BestFit v3
func scoreFit(node *structs.Node, util *structs.Resources) float64 {
// Determine the node availability
nodeCpu := node.Resources.CPU
if node.Reserved != nil {
nodeCpu -= node.Reserved.CPU
}
nodeMem := float64(node.Resources.MemoryMB)
if node.Reserved != nil {
nodeMem -= float64(node.Reserved.MemoryMB)
}
// Compute the free percentage
freePctCpu := 1 - (util.CPU / nodeCpu)
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
// Total will be "maximized" the smaller the value is.
// At 100% utilization, the total is 2, while at 0% util it is 20.
total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
// Invert so that the "maximized" total represents a high-value
// score. Because the floor is 20, we simply use that as an anchor.
// This means at a perfect fit, we return 18 as the score.
score := 20.0 - total
// Bound the score, just in case
// If the score is over 18, that means we've overfit the node.
if score > 18.0 {
score = 18.0
} else if score < 0 {
score = 0
}
return score
}

View File

@ -33,45 +33,3 @@ func TestFeasibleRankIterator(t *testing.T) {
func TestBinPackIterator(t *testing.T) {
}
func TestScoreFit(t *testing.T) {
node := mock.Node()
node.Resources = &structs.Resources{
CPU: 4096,
MemoryMB: 8192,
}
node.Reserved = &structs.Resources{
CPU: 2048,
MemoryMB: 4096,
}
// Test a perfect fit
util := &structs.Resources{
CPU: 2048,
MemoryMB: 4096,
}
score := scoreFit(node, util)
if score != 18.0 {
t.Fatalf("bad: %v", score)
}
// Test the worst fit
util = &structs.Resources{
CPU: 0,
MemoryMB: 0,
}
score = scoreFit(node, util)
if score != 0.0 {
t.Fatalf("bad: %v", score)
}
// Test a mid-case scenario
util = &structs.Resources{
CPU: 1024,
MemoryMB: 2048,
}
score = scoreFit(node, util)
if score < 10.0 || score > 16.0 {
t.Fatalf("bad: %v", score)
}
}

View File

@ -58,6 +58,9 @@ type State interface {
// AllocsByJob returns the allocations by JobID
AllocsByJob(jobID string) ([]*structs.Allocation, error)
// AllocsByNode returns all the allocations by node
AllocsByNode(node string) ([]*structs.Allocation, error)
// GetNodeByID is used to lookup a node by ID
GetNodeByID(nodeID string) (*structs.Node, error)