Merge branch 'master' of github.com:hashicorp/nomad

This commit is contained in:
Diptanu Choudhury 2016-04-10 01:59:04 -07:00
commit 7290883a3a
18 changed files with 368 additions and 173 deletions

View file

@ -22,6 +22,7 @@ IMPROVEMENTS:
* client: `artifact` block now supports downloading paths relative to the
task's directory [GH-944]
* discovery: Support script based health checks [GH-986]
* driver/docker: Support for `tty` and `interactive` options [GH-1059]
BUG FIXES:
* core: Fix issue where in-place updated allocation double counted resources

View file

@ -73,7 +73,8 @@ type DockerDriverConfig struct {
Labels map[string]string `mapstructure:"-"` // Labels to set when the container starts up
Auth []DockerDriverAuth `mapstructure:"auth"` // Authentication credentials for a private Docker registry
SSL bool `mapstructure:"ssl"` // Flag indicating repository is served via https
TTY bool `mapstructure:"tty"`
TTY bool `mapstructure:"tty"` // Allocate a Pseudo-TTY
Interactive bool `mapstructure:"interactive"` // Keep STDIN open even if not attached
}
func (c *DockerDriverConfig) Init() error {
@ -235,10 +236,11 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task,
d.taskEnv.SetTaskLocalDir(filepath.Join("/", allocdir.TaskLocal))
config := &docker.Config{
Image: driverConfig.ImageName,
Hostname: driverConfig.Hostname,
User: task.User,
Tty: driverConfig.TTY,
Image: driverConfig.ImageName,
Hostname: driverConfig.Hostname,
User: task.User,
Tty: driverConfig.TTY,
OpenStdin: driverConfig.Interactive,
}
hostConfig := &docker.HostConfig{

View file

@ -127,29 +127,10 @@ func (a *Agent) serverConfig() (*nomad.Config, error) {
}
if addr := a.config.Addresses.RPC; addr != "" {
conf.RPCAddr.IP = net.ParseIP(addr)
} else if device := a.config.Interfaces.RPC; device != "" {
ip, err := ipOfDevice(device)
if err != nil {
return nil, err
}
conf.RPCAddr.IP = ip
}
if addr := a.config.Addresses.Serf; addr != "" {
conf.SerfConfig.MemberlistConfig.BindAddr = addr
} else if device := a.config.Interfaces.Serf; device != "" {
ip, err := ipOfDevice(device)
if err != nil {
return nil, err
}
conf.SerfConfig.MemberlistConfig.BindAddr = ip.String()
}
if device := a.config.Interfaces.HTTP; device != "" && a.config.Addresses.HTTP == "" {
ip, err := ipOfDevice(device)
if err != nil {
return nil, err
}
a.config.Addresses.HTTP = ip.String()
if addr := a.config.Addresses.Serf; addr != "" {
conf.SerfConfig.MemberlistConfig.BindAddr = addr
}
// Set up the ports
@ -232,18 +213,11 @@ func (a *Agent) clientConfig() (*clientconfig.Config, error) {
// Setting the proper HTTP Addr
httpAddr := fmt.Sprintf("%s:%d", a.config.BindAddr, a.config.Ports.HTTP)
if a.config.Addresses.HTTP != "" && a.config.AdvertiseAddrs.HTTP == "" && a.config.Interfaces.HTTP == "" {
if a.config.Addresses.HTTP != "" && a.config.AdvertiseAddrs.HTTP == "" {
httpAddr = fmt.Sprintf("%s:%d", a.config.Addresses.HTTP, a.config.Ports.HTTP)
if _, err := net.ResolveTCPAddr("tcp", httpAddr); err != nil {
return nil, fmt.Errorf("error resolving http addr: %v:", err)
}
} else if a.config.Interfaces.HTTP != "" && a.config.AdvertiseAddrs.HTTP == "" {
ip, err := ipOfDevice(a.config.Interfaces.HTTP)
if err != nil {
return nil, fmt.Errorf("error finding ip address from interface %q: %v", a.config.Interfaces.HTTP, err)
}
a.config.Addresses.HTTP = ip.String()
httpAddr = fmt.Sprintf("%s:%d", ip.String(), a.config.Ports.HTTP)
} else if a.config.AdvertiseAddrs.HTTP != "" {
addr, err := net.ResolveTCPAddr("tcp", a.config.AdvertiseAddrs.HTTP)
if err != nil {

View file

@ -46,10 +46,6 @@ type Config struct {
// Addresses is used to override the network addresses we bind to.
Addresses *Addresses `mapstructure:"addresses"`
// Interfaces is used to override the network addresses we bind to by
// providing device names
Interfaces *Interfaces `mapstructure:"interfaces"`
// AdvertiseAddrs is used to control the addresses we advertise.
AdvertiseAddrs *AdvertiseAddrs `mapstructure:"advertise"`
@ -259,14 +255,6 @@ type Addresses struct {
Serf string `mapstructure:"serf"`
}
// Interfaces provides an alternative to the Addresses configuration. We pick an
// ip configured on the devide specified and use that to bind.
type Interfaces struct {
HTTP string `mapstructure:"http"`
RPC string `mapstructure:"rpc"`
Serf string `mapstructure:"serf"`
}
// AdvertiseAddrs is used to control the addresses we advertise out for
// different network services. Not all network services support an
// advertise address. All are optional and default to BindAddr.
@ -378,7 +366,6 @@ func DefaultConfig() *Config {
Serf: 4648,
},
Addresses: &Addresses{},
Interfaces: &Interfaces{},
AdvertiseAddrs: &AdvertiseAddrs{},
Atlas: &AtlasConfig{},
Client: &ClientConfig{
@ -509,14 +496,6 @@ func (c *Config) Merge(b *Config) *Config {
result.Addresses = result.Addresses.Merge(b.Addresses)
}
// Apply the interfaces config
if result.Interfaces == nil && b.Interfaces != nil {
interfaces := *b.Interfaces
result.Interfaces = &interfaces
} else if b.Interfaces != nil {
result.Interfaces = result.Interfaces.Merge(b.Interfaces)
}
// Apply the advertise addrs config
if result.AdvertiseAddrs == nil && b.AdvertiseAddrs != nil {
advertise := *b.AdvertiseAddrs
@ -696,22 +675,6 @@ func (a *Addresses) Merge(b *Addresses) *Addresses {
return &result
}
// Merge is used to merge two interfaces configs together.
func (i *Interfaces) Merge(b *Interfaces) *Interfaces {
result := *i
if b.HTTP != "" {
result.HTTP = b.HTTP
}
if b.RPC != "" {
result.RPC = b.RPC
}
if b.Serf != "" {
result.Serf = b.Serf
}
return &result
}
// Merge merges two advertise addrs configs together.
func (a *AdvertiseAddrs) Merge(b *AdvertiseAddrs) *AdvertiseAddrs {
result := *a

View file

@ -130,13 +130,6 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
}
}
// Parse interfaces
if o := list.Filter("interfaces"); len(o.Items) > 0 {
if err := parseInterfaces(&result.Interfaces, o); err != nil {
return multierror.Prefix(err, "interfaces ->")
}
}
// Parse advertise
if o := list.Filter("advertise"); len(o.Items) > 0 {
if err := parseAdvertise(&result.AdvertiseAddrs, o); err != nil {
@ -253,38 +246,6 @@ func parseAddresses(result **Addresses, list *ast.ObjectList) error {
return nil
}
func parseInterfaces(result **Interfaces, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'interfaces' block allowed")
}
// Get our interfaces object
listVal := list.Items[0].Val
// Check for the invalid keys
valid := []string{
"http",
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var interfaces Interfaces
if err := mapstructure.WeakDecode(m, &interfaces); err != nil {
return err
}
*result = &interfaces
return nil
}
func parseAdvertise(result **AdvertiseAddrs, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {

View file

@ -1,6 +1,7 @@
package command
import (
"fmt"
"math/rand"
"time"
@ -29,6 +30,12 @@ func (f *FSCommand) Run(args []string) int {
func getRandomJobAlloc(client *api.Client, jobID string) (string, error) {
var runningAllocs []*api.AllocationListStub
allocs, _, err := client.Jobs().Allocations(jobID, nil)
// Check that the job actually has allocations
if len(allocs) == 0 {
return "", fmt.Errorf("job %q doesn't exist or it has no allocations", jobID)
}
for _, v := range allocs {
if v.ClientStatus == "running" {
runningAllocs = append(runningAllocs, v)

View file

@ -71,6 +71,7 @@ func (f *FSCatCommand) Run(args []string) int {
allocID, err = getRandomJobAlloc(client, args[0])
if err != nil {
f.Ui.Error(fmt.Sprintf("Error querying API: %v", err))
return 1
}
}

View file

@ -76,6 +76,7 @@ func (f *FSStatCommand) Run(args []string) int {
allocID, err = getRandomJobAlloc(client, args[0])
if err != nil {
f.Ui.Error(fmt.Sprintf("Error querying API: %v", err))
return 1
}
}

View file

@ -28,19 +28,35 @@ func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Schedule
}
// Process is used to implement the scheduler.Scheduler interface
func (s *CoreScheduler) Process(eval *structs.Evaluation) error {
func (c *CoreScheduler) Process(eval *structs.Evaluation) error {
switch eval.JobID {
case structs.CoreJobEvalGC:
return s.evalGC(eval)
return c.evalGC(eval)
case structs.CoreJobNodeGC:
return s.nodeGC(eval)
return c.nodeGC(eval)
case structs.CoreJobJobGC:
return s.jobGC(eval)
return c.jobGC(eval)
case structs.CoreJobForceGC:
return c.forceGC(eval)
default:
return fmt.Errorf("core scheduler cannot handle job '%s'", eval.JobID)
}
}
// forceGC is used to garbage collect all eligible objects.
func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
if err := c.jobGC(eval); err != nil {
return err
}
if err := c.evalGC(eval); err != nil {
return err
}
// Node GC must occur after the others to ensure the allocations are
// cleared.
return c.nodeGC(eval)
}
// jobGC is used to garbage collect eligible jobs.
func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
// Get all the jobs eligible for garbage collection.
@ -50,7 +66,7 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
}
var oldThreshold uint64
if eval.TriggeredBy == structs.EvalTriggerForceGC {
if eval.JobID == structs.CoreJobForceGC {
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
@ -60,9 +76,9 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.JobGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: job GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.JobGCThreshold)
}
c.srv.logger.Printf("[DEBUG] sched.core: job GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.JobGCThreshold)
// Collect the allocations, evaluations and jobs to GC
var gcAlloc, gcEval, gcJob []string
@ -137,7 +153,7 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
}
var oldThreshold uint64
if eval.TriggeredBy == structs.EvalTriggerForceGC {
if eval.JobID == structs.CoreJobForceGC {
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
@ -149,9 +165,9 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.EvalGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.EvalGCThreshold)
}
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.EvalGCThreshold)
// Collect the allocations and evaluations to GC
var gcAlloc, gcEval []string
@ -163,12 +179,22 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
return err
}
// If the eval is from a "batch" job we don't want to garbage collect
// its allocations. If there is a long running batch job and its
// If the eval is from a running "batch" job we don't want to garbage
// collect its allocations. If there is a long running batch job and its
// terminal allocations get GC'd the scheduler would re-run the
// allocations.
if len(allocs) != 0 && eval.Type == structs.JobTypeBatch {
continue
if eval.Type == structs.JobTypeBatch {
// Check if the job is running
job, err := c.snap.JobByID(eval.JobID)
if err != nil {
return err
}
// If the job has been deregistered, we want to garbage collect the
// allocations and evaluations.
if job != nil && len(allocs) != 0 {
continue
}
}
if gc {
@ -257,7 +283,7 @@ func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
}
var oldThreshold uint64
if eval.TriggeredBy == structs.EvalTriggerForceGC {
if eval.JobID == structs.CoreJobForceGC {
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
@ -269,9 +295,9 @@ func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.NodeGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: node GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.NodeGCThreshold)
}
c.srv.logger.Printf("[DEBUG] sched.core: node GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.NodeGCThreshold)
// Collect the nodes to GC
var gcNode []string

View file

@ -113,7 +113,77 @@ func TestCoreScheduler_EvalGC_Batch_NoAllocs(t *testing.T) {
}
}
func TestCoreScheduler_EvalGC_Batch_Allocs(t *testing.T) {
func TestCoreScheduler_EvalGC_Batch_Allocs_WithJob(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// Insert job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert "dead" eval
eval := mock.Eval()
eval.Type = structs.JobTypeBatch
eval.Status = structs.EvalStatusFailed
eval.JobID = job.ID
if err := state.UpsertEvals(1001, []*structs.Evaluation{eval}); err != nil {
t.Fatalf("err: %v", err)
}
// Insert "dead" alloc
alloc := mock.Alloc()
alloc.EvalID = eval.ID
alloc.JobID = job.ID
alloc.DesiredStatus = structs.AllocDesiredStatusFailed
err = state.UpsertAllocs(1002, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Update the time tables to make this work
tt := s1.fsm.TimeTable()
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobEvalGC)
gc.ModifyIndex = 2000
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Shouldn't be gone because there are associated allocs.
out, err := state.EvalByID(eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
}
func TestCoreScheduler_EvalGC_Batch_Allocs_NoJob(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@ -156,22 +226,14 @@ func TestCoreScheduler_EvalGC_Batch_Allocs(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Shouldn't be gone because there are associated allocs.
// Should be gone because the job is deregistered.
out, err := state.EvalByID(eval.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
if out != nil {
t.Fatalf("bad: %v", out)
}
outA, err := state.AllocByID(alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outA == nil {
t.Fatalf("bad: %v", outA)
}
}
func TestCoreScheduler_EvalGC_Force(t *testing.T) {
@ -205,7 +267,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) {
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.forceCoreJobEval(structs.CoreJobEvalGC)
gc := s1.coreJobEval(structs.CoreJobForceGC)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
@ -294,7 +356,7 @@ func TestCoreScheduler_NodeGC_Force(t *testing.T) {
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.forceCoreJobEval(structs.CoreJobNodeGC)
gc := s1.coreJobEval(structs.CoreJobForceGC)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
@ -502,7 +564,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) {
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.forceCoreJobEval(structs.CoreJobJobGC)
gc := s1.coreJobEval(structs.CoreJobForceGC)
err = core.Process(gc)
if err != nil {
t.Fatalf("test(%s) err: %v", test.test, err)

View file

@ -270,14 +270,6 @@ func (s *Server) coreJobEval(job string) *structs.Evaluation {
}
}
// forceCoreJobEval returns an evaluation for a core job that will ignore GC
// cutoffs.
func (s *Server) forceCoreJobEval(job string) *structs.Evaluation {
eval := s.coreJobEval(job)
eval.TriggeredBy = structs.EvalTriggerForceGC
return eval
}
// reapFailedEvaluations is used to reap evaluations that
// have reached their delivery limit and should be failed
func (s *Server) reapFailedEvaluations(stopCh chan struct{}) {

View file

@ -2435,7 +2435,6 @@ const (
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerScheduled = "scheduled"
EvalTriggerForceGC = "force-gc"
EvalTriggerRollingUpdate = "rolling-update"
)
@ -2456,6 +2455,9 @@ const (
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result

View file

@ -16,8 +16,6 @@ func (s *System) GarbageCollect(args *structs.GenericRequest, reply *structs.Gen
return err
}
s.srv.evalBroker.Enqueue(s.srv.forceCoreJobEval(structs.CoreJobEvalGC))
s.srv.evalBroker.Enqueue(s.srv.forceCoreJobEval(structs.CoreJobNodeGC))
s.srv.evalBroker.Enqueue(s.srv.forceCoreJobEval(structs.CoreJobJobGC))
s.srv.evalBroker.Enqueue(s.srv.coreJobEval(structs.CoreJobForceGC))
return nil
}

16
vendor/github.com/hashicorp/go-getter/appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,16 @@
version: "build-{branch}-{build}"
image: Visual Studio 2015
clone_folder: c:\gopath\github.com\hashicorp\go-getter
environment:
GOPATH: c:\gopath
install:
- cmd: >-
echo %Path%
go version
go env
go get -d -v -t ./...
build_script:
- cmd: go test -v ./...

93
vendor/github.com/hashicorp/go-getter/get_file_unix.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// +build !windows
package getter
import (
"fmt"
"io"
"net/url"
"os"
"path/filepath"
)
func (g *FileGetter) Get(dst string, u *url.URL) error {
// The source path must exist and be a directory to be usable.
if fi, err := os.Stat(u.Path); err != nil {
return fmt.Errorf("source path error: %s", err)
} else if !fi.IsDir() {
return fmt.Errorf("source path must be a directory")
}
fi, err := os.Lstat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
// If the destination already exists, it must be a symlink
if err == nil {
mode := fi.Mode()
if mode&os.ModeSymlink == 0 {
return fmt.Errorf("destination exists and is not a symlink")
}
// Remove the destination
if err := os.Remove(dst); err != nil {
return err
}
}
// Create all the parent directories
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err
}
return os.Symlink(u.Path, dst)
}
func (g *FileGetter) GetFile(dst string, u *url.URL) error {
// The source path must exist and be a directory to be usable.
if fi, err := os.Stat(u.Path); err != nil {
return fmt.Errorf("source path error: %s", err)
} else if fi.IsDir() {
return fmt.Errorf("source path must be a file")
}
_, err := os.Lstat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
// If the destination already exists, it must be a symlink
if err == nil {
// Remove the destination
if err := os.Remove(dst); err != nil {
return err
}
}
// Create all the parent directories
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err
}
// If we're not copying, just symlink and we're done
if !g.Copy {
return os.Symlink(u.Path, dst)
}
// Copy
srcF, err := os.Open(u.Path)
if err != nil {
return err
}
defer srcF.Close()
dstF, err := os.Create(dst)
if err != nil {
return err
}
defer dstF.Close()
_, err = io.Copy(dstF, srcF)
return err
}

View file

@ -0,0 +1,110 @@
// +build windows
package getter
import (
"fmt"
"io"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
)
func (g *FileGetter) Get(dst string, u *url.URL) error {
// The source path must exist and be a directory to be usable.
if fi, err := os.Stat(u.Path); err != nil {
return fmt.Errorf("source path error: %s", err)
} else if !fi.IsDir() {
return fmt.Errorf("source path must be a directory")
}
fi, err := os.Lstat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
// If the destination already exists, it must be a symlink
if err == nil {
mode := fi.Mode()
if mode&os.ModeSymlink == 0 {
return fmt.Errorf("destination exists and is not a symlink")
}
// Remove the destination
if err := os.Remove(dst); err != nil {
return err
}
}
// Create all the parent directories
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err
}
sourcePath := toBackslash(u.Path)
// Use mklink to create a junction point
output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
}
return nil
}
func (g *FileGetter) GetFile(dst string, u *url.URL) error {
// The source path must exist and be a directory to be usable.
if fi, err := os.Stat(u.Path); err != nil {
return fmt.Errorf("source path error: %s", err)
} else if fi.IsDir() {
return fmt.Errorf("source path must be a file")
}
_, err := os.Lstat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
// If the destination already exists, it must be a symlink
if err == nil {
// Remove the destination
if err := os.Remove(dst); err != nil {
return err
}
}
// Create all the parent directories
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err
}
// If we're not copying, just symlink and we're done
if !g.Copy {
return os.Symlink(u.Path, dst)
}
// Copy
srcF, err := os.Open(u.Path)
if err != nil {
return err
}
defer srcF.Close()
dstF, err := os.Create(dst)
if err != nil {
return err
}
defer dstF.Close()
_, err = io.Copy(dstF, srcF)
return err
}
// toBackslash returns the result of replacing each slash character
// in path with a backslash ('\') character. Multiple separators are
// replaced by multiple backslashes.
func toBackslash(path string) string {
return strings.Replace(path, "/", "\\", -1)
}

View file

@ -144,22 +144,6 @@ nodes, unless otherwise specified:
server nodes from the same datacenter if possible. Used only on server
nodes.
* <a id="interfaces">`interfaces`</a>: Provides an alternative to the
`addresses` configuration. Operators can provide network device names to which
Nomad binds individual network services. Nomad looks for the first IPv4
address configured for the device and uses it, and if no IPv4 address is
present then it looks for an IPv6 address. The value is a map of device names of
network interfaces and supports the following keys:
<br>
* `http`: The device name the HTTP server is bound to. Applies to both clients and servers.
* `rpc`: The device name to bind the internal RPC interfaces to. Should be exposed
only to other cluster members if possible. Used only on server nodes, but
must be accessible from all agents.
* `serf`: The device name used to bind the gossip layer to. Both a TCP and UDP
listener will be exposed on this address. Should be restricted to only
server nodes from the same datacenter if possible. Used only on server
nodes.
* `advertise`: Controls the advertise address for individual network services.
This can be used to advertise a different address to the peers of a server
node to support more complex network configurations such as NAT. This
@ -297,10 +281,6 @@ configured on server nodes.
used to register the client with the server nodes and advertise the
available resources so that the agent can receive work. If a port is not specified
in the array of server addresses, the default port `4647` will be used.
* <a id="node_id">`node_id`</a>: This is the value used to uniquely identify
the local agent's node registration with the servers. This can be any
arbitrary string but must be unique to the cluster. By default, if not
specified, a randomly- generate UUID will be used.
* <a id="node_class">`node_class`</a>: A string used to logically group client
nodes by class. This can be used during job placement as a filter. This
option is not required and has no default.

View file

@ -97,6 +97,12 @@ The following options are available for use in the job specification.
* `auth` - (Optional) Provide authentication for a private registry (see below).
* `tty` - (Optional) `true` or `false` (default). Allocate a pseudo-TTY for the
container.
* `interactive` - (Optional) `true` or `false` (default). Keep STDIN open on
the container.
### Container Name
Nomad creates a container after pulling an image. Containers are named