Fixed merged conflict
This commit is contained in:
commit
4527410e33
|
@ -11,7 +11,7 @@ go:
|
|||
- tip
|
||||
|
||||
env:
|
||||
- TRAVIS_RUN=true DOCKER_VERSION=1.9.1
|
||||
- DOCKER_VERSION=1.9.1
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
BACKWARDS INCOMPATIBILITIES:
|
||||
* core: Improved restart policy with more user configuration [GH-594]
|
||||
* core/cli: Print short identifiers [GH-675]
|
||||
* core/consul: Validate service name doesn't include period [GH-770]
|
||||
* core/jobspec: Variables/constraints interpreted using ${} notation [GH-675]
|
||||
* client: Environment variable containing address for each allocated port
|
||||
[GH-704]
|
||||
|
@ -26,6 +27,7 @@ IMPROVEMENTS:
|
|||
* core/client: Client pulls minimum set of required allocations [GH-731]
|
||||
* core/jobspec: Default task resources and validation [GH-739]
|
||||
* cli: Output of agent-info is sorted [GH-617]
|
||||
* cli: Eval monitor detects zero wait condition [GH-776]
|
||||
* cli: Ability to navigate allocation directories [GH-709]
|
||||
* client: Handle updates to tasks Restart Policy and KillTimeout [GH-751]
|
||||
* client: Create a tmp/ directory inside each task directory [GH-757]
|
||||
|
@ -34,6 +36,8 @@ IMPROVEMENTS:
|
|||
* drivers: Interpret Nomad variables in environment variables/args [GH-653]
|
||||
* driver/rkt: Add support for CPU/Memory isolation [GH-610]
|
||||
* driver/rkt: Add support for mounting alloc/task directory [GH-645]
|
||||
* driver/docker: Support for .dockercfg based auth for private registries
|
||||
[GH-773]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Node drain could only be partially applied [GH-750]
|
||||
|
@ -42,6 +46,7 @@ BUG FIXES:
|
|||
* client: Handle non-200 codes when parsing AWS metadata [GH-614]
|
||||
* client: Cleanup of the allocation directory [GH-755]
|
||||
* client: Unmounted of shared alloc dir when client is rebooted [GH-755]
|
||||
* client/consul: Service name changes handled properly [GH-766]
|
||||
* driver/rkt: handle broader format of rkt version outputs [GH-745]
|
||||
* driver/qemu: failed to load image and kvm accelerator fixes [GH-656]
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ Developing Nomad
|
|||
|
||||
If you wish to work on Nomad itself or any of its built-in systems,
|
||||
you will first need [Go](https://www.golang.org) installed on your
|
||||
machine (version 1.4+ is *required*).
|
||||
machine (version 1.5+ is *required*).
|
||||
|
||||
**Developing with Vagrant**
|
||||
There is an included Vagrantfile that can help bootstrap the process. The
|
||||
|
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
|
@ -62,6 +62,8 @@ sudo usermod -aG docker vagrant
|
|||
|
||||
# Setup Nomad for development
|
||||
cd /opt/gopath/src/github.com/hashicorp/nomad && make updatedeps
|
||||
# Install gox
|
||||
go get github.com/mitchellh/gox
|
||||
|
||||
# CD into the nomad working directory when we login to the VM
|
||||
grep "cd /opt/gopath/src/github.com/hashicorp/nomad" ~/.profile || echo "cd /opt/gopath/src/github.com/hashicorp/nomad" >> ~/.profile
|
||||
|
|
|
@ -364,8 +364,6 @@ func (r *AllocRunner) Run() {
|
|||
continue
|
||||
}
|
||||
|
||||
// Merge in the task resources
|
||||
task.Resources = alloc.TaskResources[task.Name]
|
||||
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.alloc,
|
||||
task, r.consulService)
|
||||
r.tasks[task.Name] = tr
|
||||
|
@ -392,22 +390,6 @@ OUTER:
|
|||
r.taskLock.RLock()
|
||||
for _, task := range tg.Tasks {
|
||||
tr := r.tasks[task.Name]
|
||||
|
||||
// Merge in the task resources
|
||||
task.Resources = update.TaskResources[task.Name]
|
||||
FOUND:
|
||||
for _, updateGroup := range update.Job.TaskGroups {
|
||||
if tg.Name != updateGroup.Name {
|
||||
continue
|
||||
}
|
||||
for _, updateTask := range updateGroup.Tasks {
|
||||
if updateTask.Name != task.Name {
|
||||
continue
|
||||
}
|
||||
task.Services = updateTask.Services
|
||||
break FOUND
|
||||
}
|
||||
}
|
||||
tr.Update(update)
|
||||
}
|
||||
r.taskLock.RUnlock()
|
||||
|
|
|
@ -76,6 +76,7 @@ type ConsulService struct {
|
|||
|
||||
trackedTasks map[string]*trackedTask
|
||||
serviceStates map[string]string
|
||||
allocToService map[string][]string
|
||||
trackedTskLock sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -130,12 +131,13 @@ func NewConsulService(config *consulServiceConfig) (*ConsulService, error) {
|
|||
}
|
||||
|
||||
consulService := ConsulService{
|
||||
client: &consulApiClient{client: c},
|
||||
logger: config.logger,
|
||||
node: config.node,
|
||||
trackedTasks: make(map[string]*trackedTask),
|
||||
serviceStates: make(map[string]string),
|
||||
shutdownCh: make(chan struct{}),
|
||||
client: &consulApiClient{client: c},
|
||||
logger: config.logger,
|
||||
node: config.node,
|
||||
trackedTasks: make(map[string]*trackedTask),
|
||||
serviceStates: make(map[string]string),
|
||||
allocToService: make(map[string][]string),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
return &consulService, nil
|
||||
|
@ -148,8 +150,18 @@ func (c *ConsulService) Register(task *structs.Task, alloc *structs.Allocation)
|
|||
c.trackedTskLock.Lock()
|
||||
tt := &trackedTask{task: task, alloc: alloc}
|
||||
c.trackedTasks[fmt.Sprintf("%s-%s", alloc.ID, task.Name)] = tt
|
||||
|
||||
// Delete any previously registered service as the same alloc is being
|
||||
// re-registered.
|
||||
for _, service := range c.allocToService[alloc.ID] {
|
||||
delete(c.serviceStates, service)
|
||||
}
|
||||
c.trackedTskLock.Unlock()
|
||||
|
||||
for _, service := range task.Services {
|
||||
// Track the services this alloc is registering.
|
||||
c.allocToService[alloc.ID] = append(c.allocToService[alloc.ID], service.Name)
|
||||
|
||||
c.logger.Printf("[INFO] consul: registering service %s with consul.", service.Name)
|
||||
if err := c.registerService(service, task, alloc); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
|
@ -165,6 +177,7 @@ func (c *ConsulService) Deregister(task *structs.Task, alloc *structs.Allocation
|
|||
var mErr multierror.Error
|
||||
c.trackedTskLock.Lock()
|
||||
delete(c.trackedTasks, fmt.Sprintf("%s-%s", alloc.ID, task.Name))
|
||||
delete(c.allocToService, alloc.ID)
|
||||
c.trackedTskLock.Unlock()
|
||||
for _, service := range task.Services {
|
||||
serviceID := alloc.Services[service.Name]
|
||||
|
@ -229,14 +242,14 @@ func (c *ConsulService) performSync() {
|
|||
// Add new services which Consul agent isn't aware of
|
||||
knownServices[serviceID] = struct{}{}
|
||||
if _, ok := consulServices[serviceID]; !ok {
|
||||
c.printLogMessage("[INFO] consul: registering service %s with consul.", service.Name)
|
||||
c.printLogMessage("[INFO] consul: perform sync, registering service %s with consul.", service.Name)
|
||||
c.registerService(service, trackedTask.task, trackedTask.alloc)
|
||||
continue
|
||||
}
|
||||
|
||||
// If a service has changed, re-register it with Consul agent
|
||||
if service.Hash() != c.serviceStates[serviceID] {
|
||||
c.printLogMessage("[INFO] consul: reregistering service %s with consul.", service.Name)
|
||||
c.printLogMessage("[INFO] consul: perform sync hash change, reregistering service %s with consul.", service.Name)
|
||||
c.registerService(service, trackedTask.task, trackedTask.alloc)
|
||||
continue
|
||||
}
|
||||
|
@ -268,7 +281,7 @@ func (c *ConsulService) performSync() {
|
|||
for _, consulService := range consulServices {
|
||||
if _, ok := knownServices[consulService.ID]; !ok {
|
||||
delete(c.serviceStates, consulService.ID)
|
||||
c.printLogMessage("[INFO] consul: deregistering service %v with consul", consulService.Service)
|
||||
c.printLogMessage("[INFO] consul: perform sync, deregistering service %v with consul", consulService.Service)
|
||||
c.deregisterService(consulService.ID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -441,6 +442,17 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
|
|||
}
|
||||
}
|
||||
|
||||
if authConfig := d.config.Read("docker.auth.config"); authConfig != "" {
|
||||
if f, err := os.Open(authConfig); err == nil {
|
||||
defer f.Close()
|
||||
if authConfigurations, err := docker.NewAuthConfigurations(f); err == nil {
|
||||
if authConfiguration, ok := authConfigurations.Configs[repo]; ok {
|
||||
authOptions = authConfiguration
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = client.PullImage(pullOptions, authOptions)
|
||||
if err != nil {
|
||||
d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s", repo, tag, err)
|
||||
|
|
|
@ -10,9 +10,11 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/driver"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
|
||||
cstructs "github.com/hashicorp/nomad/client/driver/structs"
|
||||
)
|
||||
|
@ -54,6 +56,9 @@ func NewTaskRunner(logger *log.Logger, config *config.Config,
|
|||
alloc *structs.Allocation, task *structs.Task,
|
||||
consulService *ConsulService) *TaskRunner {
|
||||
|
||||
// Merge in the task resources
|
||||
task.Resources = alloc.TaskResources[task.Name]
|
||||
|
||||
// Build the restart tracker.
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
if tg == nil {
|
||||
|
@ -319,21 +324,24 @@ func (r *TaskRunner) handleUpdate(update *structs.Allocation) error {
|
|||
}
|
||||
|
||||
// Extract the task.
|
||||
var task *structs.Task
|
||||
var updatedTask *structs.Task
|
||||
for _, t := range tg.Tasks {
|
||||
if t.Name == r.task.Name {
|
||||
task = t
|
||||
updatedTask = t
|
||||
}
|
||||
}
|
||||
if task == nil {
|
||||
if updatedTask == nil {
|
||||
return fmt.Errorf("task group %q doesn't contain task %q", tg.Name, r.task.Name)
|
||||
}
|
||||
r.task = task
|
||||
|
||||
// Merge in the task resources
|
||||
updatedTask.Resources = update.TaskResources[updatedTask.Name]
|
||||
|
||||
// Update will update resources and store the new kill timeout.
|
||||
var mErr multierror.Error
|
||||
if r.handle != nil {
|
||||
if err := r.handle.Update(task); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to update task '%s' for alloc '%s': %v", r.task.Name, r.alloc.ID, err)
|
||||
if err := r.handle.Update(updatedTask); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("updating task resources failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -342,14 +350,26 @@ func (r *TaskRunner) handleUpdate(update *structs.Allocation) error {
|
|||
r.restartTracker.SetPolicy(tg.RestartPolicy)
|
||||
}
|
||||
|
||||
/* TODO
|
||||
// Re-register the task to consul and store the updated alloc.
|
||||
r.consulService.Deregister(r.task, r.alloc)
|
||||
r.alloc = update
|
||||
r.consulService.Register(r.task, r.alloc)
|
||||
*/
|
||||
// Hash services returns the hash of the task's services
|
||||
hashServices := func(task *structs.Task) uint64 {
|
||||
h, err := hashstructure.Hash(task.Services, nil)
|
||||
if err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("hashing services failed %#v: %v", task.Services, err))
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
return nil
|
||||
// Re-register the task to consul if any of the services have changed.
|
||||
if hashServices(updatedTask) != hashServices(r.task) {
|
||||
if err := r.consulService.Register(updatedTask, update); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("updating services with consul failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Store the updated alloc.
|
||||
r.alloc = update
|
||||
r.task = updatedTask
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// Helper function for converting a WaitResult into a TaskTerminated event.
|
||||
|
|
19
command/fs.go
Normal file
19
command/fs.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package command
|
||||
|
||||
import "github.com/mitchellh/cli"
|
||||
|
||||
type FSCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (f *FSCommand) Help() string {
|
||||
return "This command is accessed by using one of the subcommands below."
|
||||
}
|
||||
|
||||
func (f *FSCommand) Synopsis() string {
|
||||
return "Inspect the contents of an allocation directory"
|
||||
}
|
||||
|
||||
func (f *FSCommand) Run(args []string) int {
|
||||
return cli.RunResultHelp
|
||||
}
|
|
@ -29,7 +29,7 @@ func (f *FSCatCommand) Help() string {
|
|||
}
|
||||
|
||||
func (f *FSCatCommand) Synopsis() string {
|
||||
return "displays a file at a given location"
|
||||
return "Cat a file in an allocation directory"
|
||||
}
|
||||
|
||||
func (f *FSCatCommand) Run(args []string) int {
|
||||
|
|
|
@ -28,7 +28,7 @@ Usage: nomad fs ls <alloc-id> <path>
|
|||
}
|
||||
|
||||
func (f *FSListCommand) Synopsis() string {
|
||||
return "Lists list of files of an allocation directory"
|
||||
return "List files in an allocation directory"
|
||||
}
|
||||
|
||||
func (f *FSListCommand) Run(args []string) int {
|
||||
|
|
|
@ -27,7 +27,7 @@ Usage: nomad fs stat <alloc-id> <path>
|
|||
}
|
||||
|
||||
func (f *FSStatCommand) Synopsis() string {
|
||||
return "Stats an entry in an allocation directory"
|
||||
return "Stat an entry in an allocation directory"
|
||||
}
|
||||
|
||||
func (f *FSStatCommand) Run(args []string) int {
|
||||
|
|
|
@ -299,12 +299,14 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
|
|||
|
||||
// Monitor the next eval in the chain, if present
|
||||
if eval.NextEval != "" {
|
||||
m.ui.Info(fmt.Sprintf(
|
||||
"Monitoring next evaluation %q in %s",
|
||||
eval.NextEval, eval.Wait))
|
||||
if eval.Wait.Nanoseconds() != 0 {
|
||||
m.ui.Info(fmt.Sprintf(
|
||||
"Monitoring next evaluation %q in %s",
|
||||
limit(eval.NextEval, m.length), eval.Wait))
|
||||
|
||||
// Skip some unnecessary polling
|
||||
time.Sleep(eval.Wait)
|
||||
// Skip some unnecessary polling
|
||||
time.Sleep(eval.Wait)
|
||||
}
|
||||
|
||||
// Reset the state and monitor the new eval
|
||||
m.state = newEvalState()
|
||||
|
|
|
@ -57,8 +57,8 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
|
|||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"executor": func() (cli.Command, error) {
|
||||
return &command.ExecutorPluginCommand{
|
||||
"fs": func() (cli.Command, error) {
|
||||
return &command.FSCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
|
|
|
@ -1414,6 +1414,13 @@ func (s *Service) InitFields(job string, taskGroup string, task string) {
|
|||
// Validate checks if the Check definition is valid
|
||||
func (s *Service) Validate() error {
|
||||
var mErr multierror.Error
|
||||
|
||||
// Ensure the name does not have a period in it.
|
||||
// RFC-2782: https://tools.ietf.org/html/rfc2782
|
||||
if strings.Contains(s.Name, ".") {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("service name can't contain periods: %q", s.Name))
|
||||
}
|
||||
|
||||
for _, c := range s.Checks {
|
||||
if err := c.Validate(); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
|
|
|
@ -485,6 +485,14 @@ func TestInvalidServiceCheck(t *testing.T) {
|
|||
if err := s.Validate(); err == nil {
|
||||
t.Fatalf("Service should be invalid")
|
||||
}
|
||||
|
||||
s = Service{
|
||||
Name: "service.name",
|
||||
PortLabel: "bar",
|
||||
}
|
||||
if err := s.Validate(); err == nil {
|
||||
t.Fatalf("Service should be invalid: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistinctCheckID(t *testing.T) {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
const (
|
||||
// TravisRunEnv is an environment variable that is set if being run by
|
||||
// Travis.
|
||||
TravisRunEnv = "TRAVIS_RUN"
|
||||
TravisRunEnv = "CI"
|
||||
)
|
||||
|
||||
type testFn func() (bool, error)
|
||||
|
|
|
@ -247,6 +247,9 @@ The `docker` driver has the following host-level configuration options:
|
|||
to customize this if you use a non-standard socket (http or another
|
||||
location).
|
||||
|
||||
* `docker.auth.config` - Allows an operator to specify a json file which is in
|
||||
the dockercfg format containing authentication information for private registry.
|
||||
|
||||
* `docker.tls.cert` - Path to the server's certificate file (`.pem`). Specify
|
||||
this along with `docker.tls.key` and `docker.tls.ca` to use a TLS client to
|
||||
connect to the docker daemon. `docker.endpoint` must also be specified or
|
||||
|
|
|
@ -338,7 +338,7 @@ be specified using the `?region=` query parameter.
|
|||
{
|
||||
"EvalIDs": ["d092fdc0-e1fd-2536-67d8-43af8ca798ac"],
|
||||
"EvalCreateIndex": 35,
|
||||
"NodeModifyIndex": 34,
|
||||
"NodeModifyIndex": 34
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -378,7 +378,7 @@ be specified using the `?region=` query parameter.
|
|||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"NodeModifyIndex": 34,
|
||||
"NodeModifyIndex": 34
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -140,9 +140,9 @@ we should see both nodes in the `ready` state:
|
|||
|
||||
```
|
||||
$ nomad node-status
|
||||
ID DC Name Class Drain Status
|
||||
f7780117-2cae-8ee9-4b36-f34dd796ab02 dc1 nomad <none> false ready
|
||||
ffb5b55a-6059-9ec7-6108-23a2bbba95da dc1 nomad <none> false ready
|
||||
ID Datacenter Name Class Drain Status
|
||||
fca62612 dc1 nomad <none> false ready
|
||||
c887deef dc1 nomad <none> false ready
|
||||
```
|
||||
|
||||
We now have a simple three node cluster running. The only difference
|
||||
|
@ -159,13 +159,13 @@ Then, use the [`run` command](/docs/commands/run.html) to submit the job:
|
|||
|
||||
```
|
||||
$ nomad run example.nomad
|
||||
==> Monitoring evaluation "77e5075f-2a1b-9cce-d14e-fe98cca9e17f"
|
||||
==> Monitoring evaluation "8e0a7cf9"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "711edd85-f183-99ea-910a-6445b23d79e4" created: node "ffb5b55a-6059-9ec7-6108-23a2bbba95da", group "cache"
|
||||
Allocation "98218a8a-627c-308f-8941-acdbffe1940c" created: node "f7780117-2cae-8ee9-4b36-f34dd796ab02", group "cache"
|
||||
Allocation "e8957a7f-6fff-f61f-2878-57715c26725d" created: node "f7780117-2cae-8ee9-4b36-f34dd796ab02", group "cache"
|
||||
Allocation "501154ac" created: node "c887deef", group "cache"
|
||||
Allocation "7e2b3900" created: node "fca62612", group "cache"
|
||||
Allocation "9c66fcaf" created: node "c887deef", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "77e5075f-2a1b-9cce-d14e-fe98cca9e17f" finished with status "complete"
|
||||
==> Evaluation "8e0a7cf9" finished with status "complete"
|
||||
```
|
||||
|
||||
We can see in the output that the scheduler assigned two of the
|
||||
|
@ -181,17 +181,19 @@ Name = example
|
|||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = <none>
|
||||
Status = running
|
||||
Periodic = false
|
||||
|
||||
==> Evaluations
|
||||
ID Priority TriggeredBy Status
|
||||
77e5075f-2a1b-9cce-d14e-fe98cca9e17f 50 job-register complete
|
||||
ID Priority Triggered By Status
|
||||
54dd2ae3 50 job-register complete
|
||||
|
||||
==> Allocations
|
||||
ID EvalID NodeID TaskGroup Desired Status
|
||||
711edd85-f183-99ea-910a-6445b23d79e4 77e5075f-2a1b-9cce-d14e-fe98cca9e17f ffb5b55a-6059-9ec7-6108-23a2bbba95da cache run running
|
||||
98218a8a-627c-308f-8941-acdbffe1940c 77e5075f-2a1b-9cce-d14e-fe98cca9e17f f7780117-2cae-8ee9-4b36-f34dd796ab02 cache run running
|
||||
e8957a7f-6fff-f61f-2878-57715c26725d 77e5075f-2a1b-9cce-d14e-fe98cca9e17f f7780117-2cae-8ee9-4b36-f34dd796ab02 cache run running
|
||||
ID Eval ID Node ID Task Group Desired Status
|
||||
102225ab 54dd2ae3 56b590e6 cache run running
|
||||
f327d2b1 54dd2ae3 e4235508 cache run running
|
||||
f91137f8 54dd2ae3 56b590e6 cache run running
|
||||
|
||||
```
|
||||
|
||||
We can see that all our tasks have been allocated and are running.
|
||||
|
|
|
@ -50,6 +50,7 @@ Available commands are:
|
|||
alloc-status Display allocation status information and metadata
|
||||
client-config View or modify client configuration details
|
||||
eval-monitor Monitor an evaluation interactively
|
||||
fs Inspect the contents of an allocation directory
|
||||
init Create an example job file
|
||||
node-drain Toggle drain mode on a given node
|
||||
node-status Display status information about nodes
|
||||
|
|
|
@ -46,11 +46,11 @@ We can register our example job now:
|
|||
|
||||
```
|
||||
$ nomad run example.nomad
|
||||
==> Monitoring evaluation "3d823c52-929a-fa8b-c50d-1ac4d00cf6b7"
|
||||
==> Monitoring evaluation "26cfc69e"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "85b839d7-f67a-72a4-5a13-104020ae4807" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "8ba85cef" created: node "171a583b", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "3d823c52-929a-fa8b-c50d-1ac4d00cf6b7" finished with status "complete"
|
||||
==> Evaluation "26cfc69e" finished with status "complete"
|
||||
```
|
||||
|
||||
Anytime a job is updated, Nomad creates an evaluation to determine what
|
||||
|
@ -67,15 +67,16 @@ Name = example
|
|||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = <none>
|
||||
Status = running
|
||||
Periodic = false
|
||||
|
||||
==> Evaluations
|
||||
ID Priority TriggeredBy Status
|
||||
3d823c52-929a-fa8b-c50d-1ac4d00cf6b7 50 job-register complete
|
||||
ID Priority Triggered By Status
|
||||
26cfc69e 50 job-register complete
|
||||
|
||||
==> Allocations
|
||||
ID EvalID NodeID TaskGroup Desired Status
|
||||
85b839d7-f67a-72a4-5a13-104020ae4807 3d823c52-929a-fa8b-c50d-1ac4d00cf6b7 2512929f-5b7c-a959-dfd9-bf8a8eb022a6 cache run running
|
||||
ID Eval ID Node ID Task Group Desired Status
|
||||
8ba85cef 26cfc69e 171a583b cache run running
|
||||
```
|
||||
|
||||
Here we can see that our evaluation that was created has completed, and that
|
||||
|
@ -100,13 +101,13 @@ push the updated version of the job:
|
|||
|
||||
```
|
||||
$ nomad run example.nomad
|
||||
==> Monitoring evaluation "ec199c63-2022-f5c7-328d-1cf85e61bf66"
|
||||
==> Monitoring evaluation "127a49d0"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "21551679-5224-cb6b-80a2-d0b091612d2e" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "b1be1410-a01c-20ad-80ff-96750ec0f1da" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "ed32a35d-8086-3f04-e299-4432e562cbf2" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "8ab24eef" created: node "171a583b", group "cache"
|
||||
Allocation "f6c29874" created: node "171a583b", group "cache"
|
||||
Allocation "8ba85cef" modified: node "171a583b", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "ec199c63-2022-f5c7-328d-1cf85e61bf66" finished with status "complete"
|
||||
==> Evaluation "127a49d0" finished with status "complete"
|
||||
```
|
||||
|
||||
Because we set the count of the task group to three, Nomad created two
|
||||
|
@ -132,13 +133,23 @@ specification now:
|
|||
|
||||
```
|
||||
$ nomad run example.nomad
|
||||
==> Monitoring evaluation "d34d37f4-19b1-f4c0-b2da-c949e6ade82d"
|
||||
==> Monitoring evaluation "ebcc3e14"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "5614feb0-212d-21e5-ccfb-56a394fc41d5" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "bf7e3ad5-b217-14fe-f3f8-2b83af9dbb42" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "e3978af2-f61e-c601-7aa1-90aea9b23cf6" created: node "2512929f-5b7c-a959-dfd9-bf8a8eb022a6", group "cache"
|
||||
Allocation "9a3743f4" created: node "171a583b", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "d34d37f4-19b1-f4c0-b2da-c949e6ade82d" finished with status "complete"
|
||||
==> Evaluation "ebcc3e14" finished with status "complete"
|
||||
==> Monitoring next evaluation "b508d8f0-7f21-8d66-ec59-7f5b2573435a" in 0
|
||||
==> Monitoring evaluation "b508d8f0"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "926e5876" created: node "171a583b", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "b508d8f0" finished with status "complete"
|
||||
==> Monitoring next evaluation "ea78c05a-a15f-92ae-8c3d-59f4a1edd091" in 10s
|
||||
==> Monitoring evaluation "ea78c05a"
|
||||
Evaluation triggered by job "example"
|
||||
Allocation "3c8589d5" created: node "171a583b", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "ea78c05a" finished with status "complete"
|
||||
```
|
||||
|
||||
We can see that Nomad handled the update in three phases, only updating a single task
|
||||
|
@ -152,10 +163,10 @@ is stopping the job. This is done with the [`stop` command](/docs/commands/stop.
|
|||
|
||||
```
|
||||
$ nomad stop example
|
||||
==> Monitoring evaluation "bb407de4-02cb-f009-d986-646d6c11366d"
|
||||
==> Monitoring evaluation "fd03c9f8"
|
||||
Evaluation triggered by job "example"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "bb407de4-02cb-f009-d986-646d6c11366d" finished with status "complete"
|
||||
==> Evaluation "fd03c9f8" finished with status "complete"
|
||||
```
|
||||
|
||||
When we stop a job, it creates an evaluation which is used to stop all
|
||||
|
@ -164,7 +175,7 @@ If we try to query the job status, we can see it is no longer registered:
|
|||
|
||||
```
|
||||
$ nomad status example
|
||||
Error querying job: Unexpected response code: 404 (job not found)
|
||||
No job(s) with prefix or id "example" found
|
||||
```
|
||||
|
||||
If we wanted to start the job again, we could simply `run` it again.
|
||||
|
|
|
@ -83,8 +83,8 @@ $ vagrant ssh
|
|||
...
|
||||
|
||||
$ nomad node-status
|
||||
ID DC Name Class Drain Status
|
||||
72d3af97-144f-1e5f-94e5-df1516fe4add dc1 nomad <none> false ready
|
||||
ID Datacenter Name Class Drain Status
|
||||
171a583b dc1 nomad <none> false ready
|
||||
```
|
||||
|
||||
The output shows our Node ID, which is a randomly generated UUID,
|
||||
|
@ -99,8 +99,8 @@ ring using the [`server-members`](/docs/commands/server-members.html) command:
|
|||
|
||||
```text
|
||||
$ nomad server-members
|
||||
Name Addr Port Status Proto Build DC Region
|
||||
nomad.global 127.0.0.1 4648 alive 2 0.1.0dev dc1 global
|
||||
Name Address Port Status Protocol Build Datacenter Region
|
||||
nomad.global 127.0.0.1 4648 alive 2 0.3.0dev dc1 global
|
||||
```
|
||||
|
||||
The output shows our own agent, the address it is running on, its
|
||||
|
|
Loading…
Reference in a new issue