Merge branch 'master' into autopilot
This commit is contained in:
commit
12ff22ea70
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -52,7 +52,6 @@ nomad_linux_amd64
|
|||
nomad_darwin_amd64
|
||||
TODO.md
|
||||
codecgen-*.generated.go
|
||||
*.generated.go
|
||||
|
||||
.terraform
|
||||
*.tfstate*
|
||||
|
|
20
.travis.yml
20
.travis.yml
|
@ -6,16 +6,12 @@ language: go
|
|||
go:
|
||||
- 1.9.x
|
||||
|
||||
addons:
|
||||
chrome: stable
|
||||
|
||||
git:
|
||||
depth: 300
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
|
@ -31,18 +27,20 @@ matrix:
|
|||
env: RUN_STATIC_CHECKS=1 SKIP_NOMAD_TESTS=1
|
||||
- os: osx
|
||||
osx_image: xcode9.1
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ui/node_modules
|
||||
allow_failures:
|
||||
- os: osx
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-mac-priv.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-linux.sh ; fi
|
||||
- if [[ "$RUN_UI_TESTS" ]]; then curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version 1.0.1 ; fi
|
||||
- if [[ "$RUN_UI_TESTS" ]]; then export PATH="$HOME/.yarn/bin:$PATH" ; fi
|
||||
|
||||
install:
|
||||
- if [[ -z "$SKIP_NOMAD_TESTS" ]]; then make deps ; fi
|
||||
- if [[ "$RUN_STATIC_CHECKS" ]]; then make lint-deps ; fi
|
||||
- if [[ "$RUN_UI_TESTS" ]]; then . $HOME/.nvm/nvm.sh && cd ui && nvm use && cd .. ; fi
|
||||
|
||||
script:
|
||||
- sudo -E "PATH=$PATH" make travis
|
||||
|
|
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,4 +1,26 @@
|
|||
## 0.7.1 (Unreleased)
|
||||
## 0.8 (Unreleased)
|
||||
|
||||
__BACKWARDS INCOMPATIBILITIES:__
|
||||
* discovery: Prevent absolute URLs in check paths. The documentation indicated
|
||||
that absolute URLs are not allowed, but it was not enforced. Absolute URLs
|
||||
in HTTP check paths will now fail to validate. [[GH-3685](https://github.com/hashicorp/nomad/issues/3685)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
* discovery: Allow `check_restart` to be specified in the `service` stanza.
|
||||
[[GH-3718](https://github.com/hashicorp/nomad/issues/3718)]
|
||||
* driver/lxc: Add volumes config to LXC driver [GH-3687]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Fix search endpoint forwarding for multi-region clusters [[GH-3680](https://github.com/hashicorp/nomad/issues/3680)]
|
||||
* core: Fix an issue in which batch jobs with queued placements and lost
|
||||
allocations could result in improper placement counts [[GH-3717](https://github.com/hashicorp/nomad/issues/3717)]
|
||||
* client: Migrated ephemeral_disk's maintain directory permissions [[GH-3723](https://github.com/hashicorp/nomad/issues/3723)]
|
||||
* client/vault: Recognize renewing non-renewable Vault lease as fatal [[GH-3727](https://github.com/hashicorp/nomad/issues/3727)]
|
||||
* config: Revert minimum CPU limit back to 20 from 100.
|
||||
* ui: Fix ui on non-leaders when ACLs are enabled [[GH-3722](https://github.com/hashicorp/nomad/issues/3722)]
|
||||
* ui: Fix requests using client-side certificates in Firefox. [[GH-3728](https://github.com/hashicorp/nomad/pull/3728)]
|
||||
|
||||
## 0.7.1 (December 19, 2017)
|
||||
|
||||
__BACKWARDS INCOMPATIBILITIES:__
|
||||
* client: The format of service IDs in Consul has changed. If you rely upon
|
||||
|
@ -7,6 +29,8 @@ __BACKWARDS INCOMPATIBILITIES:__
|
|||
* config: Nomad no longer parses Atlas configuration stanzas. Atlas has been
|
||||
deprecated since earlier this year. If you have an Atlas stanza in your
|
||||
config file it will have to be removed.
|
||||
* config: Default minimum CPU configuration has been changed to 100 from 20. Jobs
|
||||
using the old minimum value of 20 will have to be updated.
|
||||
* telemetry: Hostname is now emitted via a tag rather than within the key name.
|
||||
To maintain old behavior during an upgrade path specify
|
||||
`backwards_compatible_metrics` in the telemetry configuration.
|
||||
|
@ -52,6 +76,9 @@ BUG FIXES:
|
|||
|
||||
* core: Fix issue in which restoring periodic jobs could fail when a leader
|
||||
election occurs [[GH-3646](https://github.com/hashicorp/nomad/issues/3646)]
|
||||
* core: Fix race condition in which rapid reprocessing of a blocked evaluation
|
||||
may lead to the scheduler not seeing the results of the previous scheduling
|
||||
event [[GH-3669](https://github.com/hashicorp/nomad/issues/3669)]
|
||||
* core: Fixed an issue where the leader server could get into a state where it
|
||||
was no longer performing the periodic leader loop duties after a barrier
|
||||
timeout error [[GH-3402](https://github.com/hashicorp/nomad/issues/3402)]
|
||||
|
|
|
@ -208,7 +208,7 @@ dev: GOOS=$(shell go env GOOS)
|
|||
dev: GOARCH=$(shell go env GOARCH)
|
||||
dev: GOPATH=$(shell go env GOPATH)
|
||||
dev: DEV_TARGET=pkg/$(GOOS)_$(GOARCH)$(if $(HAS_LXC),-lxc)/nomad
|
||||
dev: vendorfmt ## Build for the current development platform
|
||||
dev: vendorfmt changelogfmt ## Build for the current development platform
|
||||
@echo "==> Removing old development build..."
|
||||
@rm -f $(PROJECT_ROOT)/$(DEV_TARGET)
|
||||
@rm -f $(PROJECT_ROOT)/bin/nomad
|
||||
|
@ -283,8 +283,8 @@ static-assets: ## Compile the static routes to serve alongside the API
|
|||
.PHONY: test-ui
|
||||
test-ui: ## Run Nomad UI test suite
|
||||
@echo "--> Installing JavaScript assets"
|
||||
@cd ui && npm rebuild node-sass
|
||||
@cd ui && yarn install
|
||||
@cd ui && npm install phantomjs-prebuilt
|
||||
@echo "--> Running ember tests"
|
||||
@cd ui && phantomjs --version
|
||||
@cd ui && npm test
|
||||
|
|
|
@ -49,7 +49,7 @@ func DefaultResources() *Resources {
|
|||
// IN nomad/structs/structs.go and should be kept in sync.
|
||||
func MinResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
CPU: helper.IntToPtr(20),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
IOPS: helper.IntToPtr(0),
|
||||
}
|
||||
|
|
14
api/tasks.go
14
api/tasks.go
|
@ -128,15 +128,15 @@ func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
|
|||
return nc
|
||||
}
|
||||
|
||||
if nc.Limit == 0 {
|
||||
if o.Limit > 0 {
|
||||
nc.Limit = o.Limit
|
||||
}
|
||||
|
||||
if nc.Grace == nil {
|
||||
if o.Grace != nil {
|
||||
nc.Grace = o.Grace
|
||||
}
|
||||
|
||||
if nc.IgnoreWarnings {
|
||||
if o.IgnoreWarnings {
|
||||
nc.IgnoreWarnings = o.IgnoreWarnings
|
||||
}
|
||||
|
||||
|
@ -185,13 +185,11 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
|||
s.AddressMode = "auto"
|
||||
}
|
||||
|
||||
s.CheckRestart.Canonicalize()
|
||||
|
||||
// Canonicallize CheckRestart on Checks and merge Service.CheckRestart
|
||||
// into each check.
|
||||
for _, c := range s.Checks {
|
||||
c.CheckRestart.Canonicalize()
|
||||
c.CheckRestart = c.CheckRestart.Merge(s.CheckRestart)
|
||||
for i, check := range s.Checks {
|
||||
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
|
||||
s.Checks[i].CheckRestart.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package api
|
|||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -266,3 +267,51 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) {
|
|||
tg.Canonicalize(job)
|
||||
assert.Nil(t, tg.Update)
|
||||
}
|
||||
|
||||
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
|
||||
// inherited by Checks.
|
||||
func TestService_CheckRestart(t *testing.T) {
|
||||
job := &Job{Name: helper.StringToPtr("job")}
|
||||
tg := &TaskGroup{Name: helper.StringToPtr("group")}
|
||||
task := &Task{Name: "task"}
|
||||
service := &Service{
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 11,
|
||||
Grace: helper.TimeToPtr(11 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
Checks: []ServiceCheck{
|
||||
{
|
||||
Name: "all-set",
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 22,
|
||||
Grace: helper.TimeToPtr(22 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "some-set",
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 33,
|
||||
Grace: helper.TimeToPtr(33 * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service.Canonicalize(task, tg, job)
|
||||
assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
|
||||
assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
|
||||
assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
|
||||
|
||||
assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
|
||||
assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
|
||||
assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
|
||||
|
||||
assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
|
||||
assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
|
||||
assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib"
|
||||
|
@ -452,6 +453,9 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser
|
|||
tr := tar.NewReader(resp)
|
||||
defer resp.Close()
|
||||
|
||||
// Cache effective uid as we only run Chown if we're root
|
||||
euid := syscall.Geteuid()
|
||||
|
||||
canceled := func() bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -495,7 +499,15 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser
|
|||
|
||||
// If the header is for a directory we create the directory
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
os.MkdirAll(filepath.Join(dest, hdr.Name), os.FileMode(hdr.Mode))
|
||||
name := filepath.Join(dest, hdr.Name)
|
||||
os.MkdirAll(name, os.FileMode(hdr.Mode))
|
||||
|
||||
// Can't change owner if not root or on Windows.
|
||||
if euid == 0 {
|
||||
if err := os.Chown(name, hdr.Uid, hdr.Gid); err != nil {
|
||||
return fmt.Errorf("error chowning directory %v", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If the header is for a symlink we create the symlink
|
||||
|
@ -517,9 +529,13 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser
|
|||
f.Close()
|
||||
return fmt.Errorf("error chmoding file %v", err)
|
||||
}
|
||||
if err := f.Chown(hdr.Uid, hdr.Gid); err != nil {
|
||||
f.Close()
|
||||
return fmt.Errorf("error chowning file %v", err)
|
||||
|
||||
// Can't change owner if not root or on Windows.
|
||||
if euid == 0 {
|
||||
if err := f.Chown(hdr.Uid, hdr.Gid); err != nil {
|
||||
f.Close()
|
||||
return fmt.Errorf("error chowning file %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We write in chunks so that we can test if the client
|
||||
|
|
|
@ -10,11 +10,13 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
)
|
||||
|
||||
|
@ -73,6 +75,7 @@ func TestPrevAlloc_LocalPrevAlloc(t *testing.T) {
|
|||
// TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir
|
||||
// works.
|
||||
func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
||||
testutil.RequireRoot(t)
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
|
@ -80,18 +83,29 @@ func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
|
||||
// Create foo/
|
||||
fooDir := filepath.Join(dir, "foo")
|
||||
if err := os.Mkdir(fooDir, 0777); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
dirInfo, err := os.Stat(filepath.Join(dir, "foo"))
|
||||
|
||||
// Change ownership of foo/ to test #3702 (any non-root user is fine)
|
||||
const uid, gid = 1, 1
|
||||
if err := os.Chown(fooDir, uid, gid); err != nil {
|
||||
t.Fatalf("err : %v", err)
|
||||
}
|
||||
|
||||
dirInfo, err := os.Stat(fooDir)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
f, err := os.Create(filepath.Join(dir, "foo", "bar"))
|
||||
|
||||
// Create foo/bar
|
||||
f, err := os.Create(filepath.Join(fooDir, "bar"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := f.WriteString("foo"); err != nil {
|
||||
if _, err := f.WriteString("123"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := f.Chmod(0644); err != nil {
|
||||
|
@ -102,6 +116,8 @@ func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create foo/baz -> bar symlink
|
||||
if err := os.Symlink("bar", filepath.Join(dir, "foo", "baz")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -181,6 +197,11 @@ func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
|||
if fi.Mode() != dirInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi.Mode())
|
||||
}
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
if stat.Uid != uid || stat.Gid != gid {
|
||||
t.Fatalf("foo/ has incorrect ownership: expected %d:%d found %d:%d",
|
||||
uid, gid, stat.Uid, stat.Gid)
|
||||
}
|
||||
|
||||
fi1, err := os.Stat(filepath.Join(dir1, "bar"))
|
||||
if err != nil {
|
||||
|
|
|
@ -24,13 +24,6 @@ var basicResources = &structs.Resources{
|
|||
CPU: 250,
|
||||
MemoryMB: 256,
|
||||
DiskMB: 20,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
IP: "0.0.0.0",
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: 12345}},
|
||||
DynamicPorts: []structs.Port{{Label: "HTTP", Value: 43330}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestExecutor_IsolationAndConstraints(t *testing.T) {
|
|||
|
||||
execCmd.FSIsolation = true
|
||||
execCmd.ResourceLimits = true
|
||||
execCmd.User = dstructs.DefaultUnpriviledgedUser
|
||||
execCmd.User = dstructs.DefaultUnprivilegedUser
|
||||
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool,
|
|||
// Only enable if we are root and cgroups are mounted when running on linux systems.
|
||||
if runtime.GOOS == "linux" && (syscall.Geteuid() != 0 || !cgroupsMounted(node)) {
|
||||
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
|
||||
d.logger.Printf("[DEBUG] driver.java: root priviledges and mounted cgroups required on linux, disabling")
|
||||
d.logger.Printf("[DEBUG] driver.java: root privileges and mounted cgroups required on linux, disabling")
|
||||
}
|
||||
delete(node.Attributes, "driver.java")
|
||||
d.fingerprintSuccess = helper.BoolToPtr(false)
|
||||
|
|
|
@ -31,6 +31,11 @@ const (
|
|||
// Config.Options map.
|
||||
lxcConfigOption = "driver.lxc.enable"
|
||||
|
||||
// lxcVolumesConfigOption is the key for enabling the use of
|
||||
// custom bind volumes to arbitrary host paths
|
||||
lxcVolumesConfigOption = "lxc.volumes.enabled"
|
||||
lxcVolumesConfigDefault = true
|
||||
|
||||
// containerMonitorIntv is the interval at which the driver checks if the
|
||||
// container is still alive
|
||||
containerMonitorIntv = 2 * time.Second
|
||||
|
@ -69,6 +74,7 @@ type LxcDriverConfig struct {
|
|||
TemplateArgs []string `mapstructure:"template_args"`
|
||||
LogLevel string `mapstructure:"log_level"`
|
||||
Verbosity string
|
||||
Volumes []string `mapstructure:"volumes"`
|
||||
}
|
||||
|
||||
// NewLxcDriver returns a new instance of the LXC driver
|
||||
|
@ -137,6 +143,10 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error {
|
|||
Type: fields.TypeString,
|
||||
Required: false,
|
||||
},
|
||||
"volumes": {
|
||||
Type: fields.TypeArray,
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -144,6 +154,21 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error {
|
|||
return err
|
||||
}
|
||||
|
||||
volumes, _ := fd.GetOk("volumes")
|
||||
for _, volDesc := range volumes.([]interface{}) {
|
||||
volStr := volDesc.(string)
|
||||
paths := strings.Split(volStr, ":")
|
||||
if len(paths) != 2 {
|
||||
return fmt.Errorf("invalid volume bind mount entry: '%s'", volStr)
|
||||
}
|
||||
if len(paths[0]) == 0 || len(paths[1]) == 0 {
|
||||
return fmt.Errorf("invalid volume bind mount entry: '%s'", volStr)
|
||||
}
|
||||
if paths[1][0] == '/' {
|
||||
return fmt.Errorf("unsupported absolute container mount point: '%s'", paths[1])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -170,6 +195,12 @@ func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, e
|
|||
}
|
||||
node.Attributes["driver.lxc.version"] = version
|
||||
node.Attributes["driver.lxc"] = "1"
|
||||
|
||||
// Advertise if this node supports lxc volumes
|
||||
if d.config.ReadBoolDefault(lxcVolumesConfigOption, lxcVolumesConfigDefault) {
|
||||
node.Attributes["driver."+lxcVolumesConfigOption] = "1"
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -250,6 +281,25 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
|
|||
fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.TaskDir.SharedAllocDir),
|
||||
fmt.Sprintf("%s secrets none rw,bind,create=dir", ctx.TaskDir.SecretsDir),
|
||||
}
|
||||
|
||||
volumesEnabled := d.config.ReadBoolDefault(lxcVolumesConfigOption, lxcVolumesConfigDefault)
|
||||
|
||||
for _, volDesc := range driverConfig.Volumes {
|
||||
// the format was checked in Validate()
|
||||
paths := strings.Split(volDesc, ":")
|
||||
|
||||
if filepath.IsAbs(paths[0]) {
|
||||
if !volumesEnabled {
|
||||
return nil, fmt.Errorf("absolute bind-mount volume in config but '%v' is false", lxcVolumesConfigOption)
|
||||
}
|
||||
} else {
|
||||
// Relative source paths are treated as relative to alloc dir
|
||||
paths[0] = filepath.Join(ctx.TaskDir.Dir, paths[0])
|
||||
}
|
||||
|
||||
mounts = append(mounts, fmt.Sprintf("%s %s none rw,bind,create=dir", paths[0], paths[1]))
|
||||
}
|
||||
|
||||
for _, mnt := range mounts {
|
||||
if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {
|
||||
return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err)
|
||||
|
|
|
@ -3,13 +3,17 @@
|
|||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
lxc "gopkg.in/lxc/go-lxc.v2"
|
||||
|
@ -61,17 +65,32 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
|||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
ctestutil.RequireRoot(t)
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
Driver: "lxc",
|
||||
Config: map[string]interface{}{
|
||||
"template": "/usr/share/lxc/templates/lxc-busybox",
|
||||
"volumes": []string{"/tmp/:mnt/tmp"},
|
||||
},
|
||||
KillTimeout: 10 * time.Second,
|
||||
Resources: structs.DefaultResources(),
|
||||
}
|
||||
|
||||
testFileContents := []byte("this should be visible under /mnt/tmp")
|
||||
tmpFile, err := ioutil.TempFile("/tmp", "testlxcdriver_start_wait")
|
||||
if err != nil {
|
||||
t.Fatalf("error writing temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
if _, err := tmpFile.Write(testFileContents); err != nil {
|
||||
t.Fatalf("error writing temp file: %v", err)
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
t.Fatalf("error closing temp file: %v", err)
|
||||
}
|
||||
|
||||
ctx := testDriverContexts(t, task)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewLxcDriver(ctx.DriverCtx)
|
||||
|
@ -104,7 +123,7 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
|||
|
||||
// Look for mounted directories in their proper location
|
||||
containerName := fmt.Sprintf("%s-%s", task.Name, ctx.DriverCtx.allocID)
|
||||
for _, mnt := range []string{"alloc", "local", "secrets"} {
|
||||
for _, mnt := range []string{"alloc", "local", "secrets", "mnt/tmp"} {
|
||||
fullpath := filepath.Join(lxcHandle.lxcPath, containerName, "rootfs", mnt)
|
||||
stat, err := os.Stat(fullpath)
|
||||
if err != nil {
|
||||
|
@ -115,6 +134,16 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that /mnt/tmp/$tempFile exists in the container:
|
||||
mountedContents, err := exec.Command("lxc-attach", "-n", containerName, "--", "cat", filepath.Join("/mnt/", tmpFile.Name())).Output()
|
||||
if err != nil {
|
||||
t.Fatalf("err reading temp file in bind mount: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(mountedContents, testFileContents) {
|
||||
t.Fatalf("contents of temp bind mounted file did not match, was '%s'", mountedContents)
|
||||
}
|
||||
|
||||
// Desroy the container
|
||||
if err := sresp.Handle.Kill(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -137,6 +166,7 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
|
|||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
ctestutil.RequireRoot(t)
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
|
@ -197,3 +227,98 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
|
|||
func lxcPresent(t *testing.T) bool {
|
||||
return lxc.Version() != ""
|
||||
}
|
||||
|
||||
func TestLxcDriver_Volumes_ConfigValidation(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
ctestutil.RequireRoot(t)
|
||||
|
||||
brokenVolumeConfigs := [][]string{
|
||||
{
|
||||
"foo:/var",
|
||||
},
|
||||
{
|
||||
":",
|
||||
},
|
||||
{
|
||||
"abc:",
|
||||
},
|
||||
{
|
||||
":def",
|
||||
},
|
||||
{
|
||||
"abc:def:ghi",
|
||||
},
|
||||
}
|
||||
|
||||
for _, bc := range brokenVolumeConfigs {
|
||||
if err := testVolumeConfig(t, bc); err == nil {
|
||||
t.Fatalf("error expected in validate for config %+v", bc)
|
||||
}
|
||||
}
|
||||
if err := testVolumeConfig(t, []string{"abc:def"}); err != nil {
|
||||
t.Fatalf("error in validate for syntactically valid config abc:def")
|
||||
}
|
||||
}
|
||||
|
||||
func testVolumeConfig(t *testing.T, volConfig []string) error {
|
||||
task := &structs.Task{
|
||||
Name: "voltest",
|
||||
Driver: "lxc",
|
||||
KillTimeout: 10 * time.Second,
|
||||
Resources: structs.DefaultResources(),
|
||||
Config: map[string]interface{}{
|
||||
"template": "busybox",
|
||||
},
|
||||
}
|
||||
task.Config["volumes"] = volConfig
|
||||
|
||||
ctx := testDriverContexts(t, task)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
|
||||
driver := NewLxcDriver(ctx.DriverCtx)
|
||||
|
||||
err := driver.Validate(task.Config)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func TestLxcDriver_Start_NoVolumes(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
ctestutil.RequireRoot(t)
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
Driver: "lxc",
|
||||
Config: map[string]interface{}{
|
||||
"template": "/usr/share/lxc/templates/lxc-busybox",
|
||||
"volumes": []string{"/tmp/:mnt/tmp"},
|
||||
},
|
||||
KillTimeout: 10 * time.Second,
|
||||
Resources: structs.DefaultResources(),
|
||||
}
|
||||
|
||||
ctx := testDriverContexts(t, task)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
|
||||
ctx.DriverCtx.config.Options = map[string]string{lxcVolumesConfigOption: "false"}
|
||||
|
||||
d := NewLxcDriver(ctx.DriverCtx)
|
||||
|
||||
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
|
||||
t.Fatalf("prestart err: %v", err)
|
||||
}
|
||||
_, err := d.Start(ctx.ExecCtx, task)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error in start, got nil.")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ func TestQemuDriver_GracefulShutdown(t *testing.T) {
|
|||
t.Parallel()
|
||||
}
|
||||
ctestutils.QemuCompatible(t)
|
||||
ctestutils.RequireRoot(t)
|
||||
task := &structs.Task{
|
||||
Name: "linux",
|
||||
Driver: "qemu",
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
const (
|
||||
// The default user that the executor uses to run tasks
|
||||
DefaultUnpriviledgedUser = "nobody"
|
||||
DefaultUnprivilegedUser = "nobody"
|
||||
|
||||
// CheckBufSize is the size of the check output result
|
||||
CheckBufSize = 4 * 1024
|
||||
|
|
|
@ -178,7 +178,7 @@ func GetAbsolutePath(bin string) (string, error) {
|
|||
// dstructs.DefaultUnprivilegedUser if none was given.
|
||||
func getExecutorUser(task *structs.Task) string {
|
||||
if task.User == "" {
|
||||
return dstructs.DefaultUnpriviledgedUser
|
||||
return dstructs.DefaultUnprivilegedUser
|
||||
}
|
||||
return task.User
|
||||
}
|
||||
|
|
|
@ -7,6 +7,13 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
// RequireRoot skips tests unless running on a Unix as root.
|
||||
func RequireRoot(t *testing.T) {
|
||||
if syscall.Geteuid() != 0 {
|
||||
t.Skip("Must run as root on Unix")
|
||||
}
|
||||
}
|
||||
|
||||
func ExecCompatible(t *testing.T) {
|
||||
if runtime.GOOS != "linux" || syscall.Geteuid() != 0 {
|
||||
t.Skip("Test only available running as root on linux")
|
||||
|
|
|
@ -428,6 +428,7 @@ func (c *vaultClient) renew(req *vaultClientRenewalRequest) error {
|
|||
fatal := false
|
||||
if renewalErr != nil &&
|
||||
(strings.Contains(renewalErr.Error(), "lease not found or lease is not renewable") ||
|
||||
strings.Contains(renewalErr.Error(), "lease is not renewable") ||
|
||||
strings.Contains(renewalErr.Error(), "token not found") ||
|
||||
strings.Contains(renewalErr.Error(), "permission denied")) {
|
||||
fatal = true
|
||||
|
|
|
@ -3,6 +3,7 @@ package vaultclient
|
|||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -197,3 +198,85 @@ func TestVaultClient_Heap(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestVaultClient_RenewNonRenewableLease(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags)
|
||||
v.Config.ConnectionRetryIntv = 100 * time.Millisecond
|
||||
v.Config.TaskTokenTTL = "4s"
|
||||
c, err := NewVaultClient(v.Config, logger, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build vault client: %v", err)
|
||||
}
|
||||
|
||||
c.Start()
|
||||
defer c.Stop()
|
||||
|
||||
// Sleep a little while to ensure that the renewal loop is active
|
||||
time.Sleep(time.Duration(testutil.TestMultiplier()) * time.Second)
|
||||
|
||||
tcr := &vaultapi.TokenCreateRequest{
|
||||
Policies: []string{"foo", "bar"},
|
||||
TTL: "2s",
|
||||
DisplayName: "derived-for-task",
|
||||
Renewable: new(bool),
|
||||
}
|
||||
|
||||
c.client.SetToken(v.Config.Token)
|
||||
|
||||
if err := c.client.SetAddress(v.Config.Addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
secret, err := c.client.Auth().Token().Create(tcr)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create vault token: %v", err)
|
||||
}
|
||||
|
||||
if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" {
|
||||
t.Fatal("failed to derive a wrapped vault token")
|
||||
}
|
||||
|
||||
_, err = c.RenewToken(secret.Auth.ClientToken, secret.Auth.LeaseDuration)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), "lease is not renewable") {
|
||||
t.Fatalf("expected \"%s\" in error message, got \"%v\"", "lease is not renewable", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVaultClient_RenewNonExistentLease(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags)
|
||||
v.Config.ConnectionRetryIntv = 100 * time.Millisecond
|
||||
v.Config.TaskTokenTTL = "4s"
|
||||
c, err := NewVaultClient(v.Config, logger, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build vault client: %v", err)
|
||||
}
|
||||
|
||||
c.Start()
|
||||
defer c.Stop()
|
||||
|
||||
// Sleep a little while to ensure that the renewal loop is active
|
||||
time.Sleep(time.Duration(testutil.TestMultiplier()) * time.Second)
|
||||
|
||||
c.client.SetToken(v.Config.Token)
|
||||
|
||||
if err := c.client.SetAddress(v.Config.Addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = c.RenewToken(c.client.Token(), 10)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), "lease not found") {
|
||||
t.Fatalf("expected \"%s\" in error message, got \"%v\"", "lease not found", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
@ -9,6 +10,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/freeport"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
)
|
||||
|
@ -541,7 +543,8 @@ func TestConfig_Listener(t *testing.T) {
|
|||
}
|
||||
|
||||
// Works with valid inputs
|
||||
ln, err := config.Listener("tcp", "127.0.0.1", 24000)
|
||||
ports := freeport.GetT(t, 2)
|
||||
ln, err := config.Listener("tcp", "127.0.0.1", ports[0])
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -550,20 +553,22 @@ func TestConfig_Listener(t *testing.T) {
|
|||
if net := ln.Addr().Network(); net != "tcp" {
|
||||
t.Fatalf("expected tcp, got: %q", net)
|
||||
}
|
||||
if addr := ln.Addr().String(); addr != "127.0.0.1:24000" {
|
||||
t.Fatalf("expected 127.0.0.1:4646, got: %q", addr)
|
||||
want := fmt.Sprintf("127.0.0.1:%d", ports[0])
|
||||
if addr := ln.Addr().String(); addr != want {
|
||||
t.Fatalf("expected %q, got: %q", want, addr)
|
||||
}
|
||||
|
||||
// Falls back to default bind address if non provided
|
||||
config.BindAddr = "0.0.0.0"
|
||||
ln, err = config.Listener("tcp4", "", 24000)
|
||||
ln, err = config.Listener("tcp4", "", ports[1])
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
ln.Close()
|
||||
|
||||
if addr := ln.Addr().String(); addr != "0.0.0.0:24000" {
|
||||
t.Fatalf("expected 0.0.0.0:24000, got: %q", addr)
|
||||
want = fmt.Sprintf("0.0.0.0:%d", ports[1])
|
||||
if addr := ln.Addr().String(); addr != want {
|
||||
t.Fatalf("expected %q, got: %q", want, addr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -663,7 +663,7 @@ func (c *ServiceClient) checkRegs(ops *operations, allocID, serviceID string, se
|
|||
|
||||
ip, port, err := getAddress(addrMode, portLabel, task.Resources.Networks, net)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get address for check %q: %v", check.Name, err)
|
||||
return nil, fmt.Errorf("error getting address for check %q: %v", check.Name, err)
|
||||
}
|
||||
|
||||
checkReg, err := createCheckReg(serviceID, checkID, check, ip, port)
|
||||
|
@ -1036,6 +1036,11 @@ func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host
|
|||
chkReg.Timeout = check.Timeout.String()
|
||||
chkReg.Interval = check.Interval.String()
|
||||
|
||||
// Require an address for http or tcp checks
|
||||
if port == 0 && check.RequiresPort() {
|
||||
return nil, fmt.Errorf("%s checks require an address", check.Type)
|
||||
}
|
||||
|
||||
switch check.Type {
|
||||
case structs.ServiceCheckHTTP:
|
||||
proto := check.Protocol
|
||||
|
@ -1089,9 +1094,15 @@ func isOldNomadService(id string) bool {
|
|||
return strings.HasPrefix(id, prefix)
|
||||
}
|
||||
|
||||
// getAddress returns the ip and port to use for a service or check. An error
|
||||
// is returned if an ip and port cannot be determined.
|
||||
// getAddress returns the IP and port to use for a service or check. If no port
|
||||
// label is specified (an empty value), zero values are returned because no
|
||||
// address could be resolved.
|
||||
func getAddress(addrMode, portLabel string, networks structs.Networks, driverNet *cstructs.DriverNetwork) (string, int, error) {
|
||||
// No port label specified, no address can be assembled
|
||||
if portLabel == "" {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
switch addrMode {
|
||||
case structs.AddressModeAuto:
|
||||
if driverNet.Advertise() {
|
||||
|
|
|
@ -87,8 +87,17 @@ func TestConsul_Integration(t *testing.T) {
|
|||
task.Config = map[string]interface{}{
|
||||
"run_for": "1h",
|
||||
}
|
||||
|
||||
// Choose a port that shouldn't be in use
|
||||
task.Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "http", Value: 3}}
|
||||
netResource := &structs.NetworkResource{
|
||||
Device: "eth0",
|
||||
IP: "127.0.0.1",
|
||||
MBits: 50,
|
||||
ReservedPorts: []structs.Port{{Label: "http", Value: 3}},
|
||||
}
|
||||
alloc.Resources.Networks[0] = netResource
|
||||
alloc.TaskResources["web"].Networks[0] = netResource
|
||||
task.Resources.Networks[0] = netResource
|
||||
task.Services = []*structs.Service{
|
||||
{
|
||||
Name: "httpd",
|
||||
|
@ -96,13 +105,12 @@ func TestConsul_Integration(t *testing.T) {
|
|||
Tags: []string{"nomad", "test", "http"},
|
||||
Checks: []*structs.ServiceCheck{
|
||||
{
|
||||
Name: "httpd-http-check",
|
||||
Type: "http",
|
||||
Path: "/",
|
||||
Protocol: "http",
|
||||
PortLabel: "http",
|
||||
Interval: 9000 * time.Hour,
|
||||
Timeout: 1, // fail as fast as possible
|
||||
Name: "httpd-http-check",
|
||||
Type: "http",
|
||||
Path: "/",
|
||||
Protocol: "http",
|
||||
Interval: 9000 * time.Hour,
|
||||
Timeout: 1, // fail as fast as possible
|
||||
},
|
||||
{
|
||||
Name: "httpd-script-check",
|
||||
|
|
|
@ -1566,8 +1566,13 @@ func TestGetAddress(t *testing.T) {
|
|||
{
|
||||
Name: "InvalidMode",
|
||||
Mode: "invalid-mode",
|
||||
PortLabel: "80",
|
||||
ErrContains: "invalid address mode",
|
||||
},
|
||||
{
|
||||
Name: "EmptyIsOk",
|
||||
Mode: structs.AddressModeHost,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"net/http/pprof"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
|
@ -284,17 +285,22 @@ func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Reque
|
|||
if err != nil {
|
||||
s.logger.Printf("[ERR] http: Request %v, error: %v", reqURL, err)
|
||||
code := 500
|
||||
errMsg := err.Error()
|
||||
if http, ok := err.(HTTPCodedError); ok {
|
||||
code = http.Code()
|
||||
} else {
|
||||
switch err.Error() {
|
||||
case structs.ErrPermissionDenied.Error(), structs.ErrTokenNotFound.Error():
|
||||
// RPC errors get wrapped, so manually unwrap by only looking at their suffix
|
||||
if strings.HasSuffix(errMsg, structs.ErrPermissionDenied.Error()) {
|
||||
errMsg = structs.ErrPermissionDenied.Error()
|
||||
code = 403
|
||||
} else if strings.HasSuffix(errMsg, structs.ErrTokenNotFound.Error()) {
|
||||
errMsg = structs.ErrTokenNotFound.Error()
|
||||
code = 403
|
||||
}
|
||||
}
|
||||
|
||||
resp.WriteHeader(code)
|
||||
resp.Write([]byte(err.Error()))
|
||||
resp.Write([]byte(errMsg))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -225,15 +225,28 @@ func TestPermissionDenied(t *testing.T) {
|
|||
})
|
||||
defer s.Shutdown()
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return nil, structs.ErrPermissionDenied
|
||||
{
|
||||
resp := httptest.NewRecorder()
|
||||
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return nil, structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/job/foo", nil)
|
||||
s.Server.wrap(handler)(resp, req)
|
||||
assert.Equal(t, resp.Code, 403)
|
||||
}
|
||||
|
||||
urlStr := "/v1/job/foo"
|
||||
req, _ := http.NewRequest("GET", urlStr, nil)
|
||||
s.Server.wrap(handler)(resp, req)
|
||||
assert.Equal(t, resp.Code, 403)
|
||||
// When remote RPC is used the errors have "rpc error: " prependend
|
||||
{
|
||||
resp := httptest.NewRecorder()
|
||||
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return nil, fmt.Errorf("rpc error: %v", structs.ErrPermissionDenied)
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/job/foo", nil)
|
||||
s.Server.wrap(handler)(resp, req)
|
||||
assert.Equal(t, resp.Code, 403)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenNotFound(t *testing.T) {
|
||||
|
|
|
@ -1212,6 +1212,10 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
|||
Name: "serviceA",
|
||||
Tags: []string{"1", "2"},
|
||||
PortLabel: "foo",
|
||||
CheckRestart: &api.CheckRestart{
|
||||
Limit: 4,
|
||||
Grace: helper.TimeToPtr(11 * time.Second),
|
||||
},
|
||||
Checks: []api.ServiceCheck{
|
||||
{
|
||||
Id: "hello",
|
||||
|
@ -1228,10 +1232,17 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
|||
InitialStatus: "ok",
|
||||
CheckRestart: &api.CheckRestart{
|
||||
Limit: 3,
|
||||
Grace: helper.TimeToPtr(10 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: "check2id",
|
||||
Name: "check2",
|
||||
Type: "tcp",
|
||||
PortLabel: "foo",
|
||||
Interval: 4 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1425,10 +1436,21 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
|||
InitialStatus: "ok",
|
||||
CheckRestart: &structs.CheckRestart{
|
||||
Limit: 3,
|
||||
Grace: 10 * time.Second,
|
||||
Grace: 11 * time.Second,
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "check2",
|
||||
Type: "tcp",
|
||||
PortLabel: "foo",
|
||||
Interval: 4 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
CheckRestart: &structs.CheckRestart{
|
||||
Limit: 4,
|
||||
Grace: 11 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
56
demo/tls/GNUmakefile
Normal file
56
demo/tls/GNUmakefile
Normal file
|
@ -0,0 +1,56 @@
|
|||
SHELL = bash
|
||||
|
||||
.PHONY: all
|
||||
all: \
|
||||
ca.pem ca-key.pem ca.csr \
|
||||
client.pem client-key.pem client.csr \
|
||||
dev.pem dev-key.pem dev.csr \
|
||||
server.pem server-key.pem server.csr \
|
||||
user.pem user-key.pem user.csr user.pfx
|
||||
|
||||
.PHONY: bootstrap
|
||||
bootstrap: ## Install dependencies
|
||||
@echo "==> Updating cfssl..."
|
||||
go get -u github.com/cloudflare/cfssl/cmd/...
|
||||
|
||||
clean: ## Remove generated files
|
||||
@echo "==> Removing generated files..."
|
||||
rm -f \
|
||||
ca.pem ca-key.pem ca.csr \
|
||||
client.pem client-key.pem client.csr \
|
||||
dev.pem dev-key.pem dev.csr \
|
||||
server.pem server-key.pem server.csr \
|
||||
user.pem user-key.pem user.csr user.pfx
|
||||
|
||||
# Generate Nomad certificate authority
|
||||
ca.pem ca-key.pem ca.csr:
|
||||
@echo "==> Removing generated files..."
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||
|
||||
# Generate Nomad server certificate
|
||||
server.pem server-key.pem server.csr:
|
||||
@echo "==> Generating Nomad server certificate..."
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=cfssl.json \
|
||||
-hostname="server.global.nomad,localhost,127.0.0.1" csr.json \
|
||||
| cfssljson -bare server
|
||||
|
||||
# Generate Nomad client node certificate
|
||||
client.pem client-key.pem client.csr:
|
||||
@echo "==> Generating Nomad client node certificate..."
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=cfssl.json \
|
||||
-hostname="client.global.nomad,localhost,127.0.0.1" csr.json \
|
||||
| cfssljson -bare client
|
||||
|
||||
# Generate Nomad combined server and client node certificate
|
||||
dev.pem dev-key.pem dev.csr:
|
||||
@echo "==> Generating Nomad server and client node certificate..."
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=cfssl.json \
|
||||
-hostname="server.global.nomad,client.global.nomad,localhost,127.0.0.1" csr.json \
|
||||
| cfssljson -bare dev
|
||||
|
||||
# Generate certificates for users (CLI and browsers)
|
||||
user.pem user-key.pem user.csr user.pfx:
|
||||
@echo "==> Generating Nomad user certificates..."
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=cfssl-user.json \
|
||||
csr.json | cfssljson -bare user
|
||||
openssl pkcs12 -export -inkey user-key.pem -in user.pem -out user.pfx -password pass:
|
57
demo/tls/README.md
Normal file
57
demo/tls/README.md
Normal file
|
@ -0,0 +1,57 @@
|
|||
Demo TLS Configuration
|
||||
======================
|
||||
|
||||
**Do _NOT_ use in production. For testing purposes only.**
|
||||
|
||||
See [Securing Nomad](https://www.nomadproject.io/guides/securing-nomad.html)
|
||||
for a full guide.
|
||||
|
||||
This directory contains sample TLS certificates and configuration to ease
|
||||
testing of TLS related features. There is a makefile to generate certificates,
|
||||
and pre-generated are available for use.
|
||||
|
||||
## Files
|
||||
|
||||
| Generated? | File | Description |
|
||||
| - | ------------- | ---|
|
||||
| ◻️ | `GNUmakefile` | Makefile to generate certificates |
|
||||
| ◻️ | `tls-*.hcl` | Nomad TLS configurations |
|
||||
| ◻️ | `cfssl*.json` | cfssl configuration files |
|
||||
| ◻️ | `csr*.json` | cfssl certificate generation configurations |
|
||||
| ☑️ | `ca*.pem` | Certificate Authority certificate and key |
|
||||
| ☑️ | `client*.pem` | Nomad client node certificate and key |
|
||||
| ☑️ | `dev*.pem` | Nomad certificate and key for dev agents |
|
||||
| ☑️ | `server*.pem` | Nomad server certificate and key |
|
||||
| ☑️ | `user*.pem` | Nomad user (CLI) certificate and key |
|
||||
| ☑️ | `user.pfx` | Nomad browser PKCS #12 certificate and key *(blank password)* |
|
||||
|
||||
## Usage
|
||||
|
||||
### Agent
|
||||
|
||||
To run a TLS-enabled Nomad agent include the `tls.hcl` configuration file with
|
||||
either the `-dev` flag or your own configuration file. If you're not running
|
||||
the `nomad agent` command from *this* directory you will have to edit the paths
|
||||
in `tls.hcl`.
|
||||
|
||||
```sh
|
||||
# Run the dev agent with TLS enabled
|
||||
nomad agent -dev -config=tls-dev.hcl
|
||||
|
||||
# Run a *server* agent with your configuration and TLS enabled
|
||||
nomad agent -config=path/to/custom.hcl -config=tls-server.hcl
|
||||
|
||||
# Run a *client* agent with your configuration and TLS enabled
|
||||
nomad agent -config=path/to/custom.hcl -config=tls-client.hcl
|
||||
```
|
||||
|
||||
### Browser
|
||||
|
||||
To access the Nomad Web UI when TLS is enabled you will need to import two
|
||||
certificate files into your browser:
|
||||
|
||||
- `ca.pem` must be imported as a Certificate Authority
|
||||
- `user.pfx` must be imported as a Client certificate. The password is blank.
|
||||
|
||||
When you access the UI via https://localhost:4646/ you will be prompted to
|
||||
select the user certificate you imported.
|
19
demo/tls/ca-csr.json
Normal file
19
demo/tls/ca-csr.json
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"CN": "example.nomad",
|
||||
"hosts": [
|
||||
"example.nomad"
|
||||
],
|
||||
"key": {
|
||||
"algo": "ecdsa",
|
||||
"size": 256
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"ST": "CA",
|
||||
"L": "San Francisco",
|
||||
"OU": "Nomad Demo"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
5
demo/tls/ca-key.pem
Normal file
5
demo/tls/ca-key.pem
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIKsrq20VeBrZ0VOqMJSvvU6E+w7RAbUR7D5RkZSgNKJQoAoGCCqGSM49
|
||||
AwEHoUQDQgAEn/hg7ktoFRazpDTMTkN1mEJoCo/wJOlI7XD98WE1wr6U/4q0Wh9F
|
||||
YuNyfCb2rK2nSrLKra/1R+z3Q+trXJt2cQ==
|
||||
-----END EC PRIVATE KEY-----
|
9
demo/tls/ca.csr
Normal file
9
demo/tls/ca.csr
Normal file
|
@ -0,0 +1,9 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBRjCB7AIBADBfMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcT
|
||||
DVNhbiBGcmFuY2lzY28xEzARBgNVBAsTCk5vbWFkIERlbW8xFjAUBgNVBAMTDWV4
|
||||
YW1wbGUubm9tYWQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASf+GDuS2gVFrOk
|
||||
NMxOQ3WYQmgKj/Ak6UjtcP3xYTXCvpT/irRaH0Vi43J8JvasradKssqtr/VH7PdD
|
||||
62tcm3ZxoCswKQYJKoZIhvcNAQkOMRwwGjAYBgNVHREEETAPgg1leGFtcGxlLm5v
|
||||
bWFkMAoGCCqGSM49BAMCA0kAMEYCIQDP+rv/peK1JGFzXOzdLmfjjEg2vOFWGccz
|
||||
iAy63lDurgIhAIF//KajKrghaC1JXmsrqnVHuP40KZLOcAv54Q4PgH1h
|
||||
-----END CERTIFICATE REQUEST-----
|
13
demo/tls/ca.pem
Normal file
13
demo/tls/ca.pem
Normal file
|
@ -0,0 +1,13 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICAzCCAaigAwIBAgIUN0nEio761fu7oRc04wRmlxxY3gowCgYIKoZIzj0EAwIw
|
||||
XzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp
|
||||
c2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFk
|
||||
MB4XDTE4MDEwOTE4MDgwMFoXDTIzMDEwODE4MDgwMFowXzELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpO
|
||||
b21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFkMFkwEwYHKoZIzj0CAQYI
|
||||
KoZIzj0DAQcDQgAEn/hg7ktoFRazpDTMTkN1mEJoCo/wJOlI7XD98WE1wr6U/4q0
|
||||
Wh9FYuNyfCb2rK2nSrLKra/1R+z3Q+trXJt2caNCMEAwDgYDVR0PAQH/BAQDAgEG
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKaOK4q82ysmZ7dYMhjbZyphHxx3
|
||||
MAoGCCqGSM49BAMCA0kAMEYCIQCLoeQKyg1PsyMzETrw3pBA3H3wXU81peHT1t74
|
||||
R63a2gIhALIeUT188aOaLtUMgPaWd7wE14BDhSpLp602jVGCNFkH
|
||||
-----END CERTIFICATE-----
|
12
demo/tls/cfssl-user.json
Normal file
12
demo/tls/cfssl-user.json
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "87600h",
|
||||
"usages": [
|
||||
"signing",
|
||||
"key encipherment",
|
||||
"client auth"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
13
demo/tls/cfssl.json
Normal file
13
demo/tls/cfssl.json
Normal file
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "87600h",
|
||||
"usages": [
|
||||
"signing",
|
||||
"key encipherment",
|
||||
"server auth",
|
||||
"client auth"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
5
demo/tls/client-key.pem
Normal file
5
demo/tls/client-key.pem
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIGCce4MNcD+MHx1hQWOARCLQWCPJVhWzrAiI1QV7ftYKoAoGCCqGSM49
|
||||
AwEHoUQDQgAEDotF3nv9Stt9Zp5sBv3BNk4936BFBH6eyGAIULRlqSJQUrbc97cf
|
||||
hcdwrVU0hDJcM98Bpd0R3OhqU7j86rc0FQ==
|
||||
-----END EC PRIVATE KEY-----
|
9
demo/tls/client.csr
Normal file
9
demo/tls/client.csr
Normal file
|
@ -0,0 +1,9 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBRDCB6wIBADBHMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcT
|
||||
DVNhbiBGcmFuY2lzY28xEzARBgNVBAsTCk5vbWFkIERlbW8wWTATBgcqhkjOPQIB
|
||||
BggqhkjOPQMBBwNCAAQOi0Xee/1K231mnmwG/cE2Tj3foEUEfp7IYAhQtGWpIlBS
|
||||
ttz3tx+Fx3CtVTSEMlwz3wGl3RHc6GpTuPzqtzQVoEIwQAYJKoZIhvcNAQkOMTMw
|
||||
MTAvBgNVHREEKDAmghNjbGllbnQuZ2xvYmFsLm5vbWFkgglsb2NhbGhvc3SHBH8A
|
||||
AAEwCgYIKoZIzj0EAwIDSAAwRQIgRr+uu2A1NPkhso3QFWuq9IFf8eCkU6yzkmJI
|
||||
9R7JZRQCIQDTj2mN3OqJAl1LsMRc2rmD1J7Fp+GvnGmSDT4fcdQ9zA==
|
||||
-----END CERTIFICATE REQUEST-----
|
15
demo/tls/client.pem
Normal file
15
demo/tls/client.pem
Normal file
|
@ -0,0 +1,15 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICWjCCAgCgAwIBAgIUDYX/mI1EZQPtc/6kc7Kv2epWDwQwCgYIKoZIzj0EAwIw
|
||||
XzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp
|
||||
c2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFk
|
||||
MB4XDTE4MDEwOTE4MDgwMFoXDTI4MDEwNzE4MDgwMFowRzELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpO
|
||||
b21hZCBEZW1vMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEDotF3nv9Stt9Zp5s
|
||||
Bv3BNk4936BFBH6eyGAIULRlqSJQUrbc97cfhcdwrVU0hDJcM98Bpd0R3OhqU7j8
|
||||
6rc0FaOBsTCBrjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFO2ys/83g7JgjwZf5KY4
|
||||
nOQojbV1MB8GA1UdIwQYMBaAFKaOK4q82ysmZ7dYMhjbZyphHxx3MC8GA1UdEQQo
|
||||
MCaCE2NsaWVudC5nbG9iYWwubm9tYWSCCWxvY2FsaG9zdIcEfwAAATAKBggqhkjO
|
||||
PQQDAgNIADBFAiEAu+R+nZv0QXbo5c+vEA+b8wryMWqK9TSkMZmh/BwMriwCIHIJ
|
||||
o/vUarVvgFLy+9ZITDYgtQxMWGLjm8brPyDiXNEA
|
||||
-----END CERTIFICATE-----
|
10
demo/tls/csr.json
Normal file
10
demo/tls/csr.json
Normal file
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"ST": "CA",
|
||||
"L": "San Francisco",
|
||||
"OU": "Nomad Demo"
|
||||
}
|
||||
]
|
||||
}
|
5
demo/tls/dev-key.pem
Normal file
5
demo/tls/dev-key.pem
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIJ/MkDicoe6ohduiDoGOwqGXlk2V13fZBwKRB8Ns+2hkoAoGCCqGSM49
|
||||
AwEHoUQDQgAEmjMddkSmrwZ5qamlGgn0NpbV09qvhAFmaBtawpGXa3LlPzvauHfm
|
||||
lRcSEzHzkS1M6NT5eAKjJG8yojGHR78cXQ==
|
||||
-----END EC PRIVATE KEY-----
|
10
demo/tls/dev.csr
Normal file
10
demo/tls/dev.csr
Normal file
|
@ -0,0 +1,10 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBWTCCAQACAQAwRzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQH
|
||||
Ew1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMFkwEwYHKoZIzj0C
|
||||
AQYIKoZIzj0DAQcDQgAEmjMddkSmrwZ5qamlGgn0NpbV09qvhAFmaBtawpGXa3Ll
|
||||
PzvauHfmlRcSEzHzkS1M6NT5eAKjJG8yojGHR78cXaBXMFUGCSqGSIb3DQEJDjFI
|
||||
MEYwRAYDVR0RBD0wO4ITc2VydmVyLmdsb2JhbC5ub21hZIITY2xpZW50Lmdsb2Jh
|
||||
bC5ub21hZIIJbG9jYWxob3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIEPHMv5p
|
||||
xoNybtEQVprQrq5ymLX3rm1ZMkjH0EiJjk/AAiAsM2DTQtK8LnL0YKVbbmBNBX5g
|
||||
1JQeTRt/kW7yKq0OeA==
|
||||
-----END CERTIFICATE REQUEST-----
|
16
demo/tls/dev.pem
Normal file
16
demo/tls/dev.pem
Normal file
|
@ -0,0 +1,16 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICbjCCAhWgAwIBAgIUc5S8QB/Kai23mJkU23YD4hoO7zkwCgYIKoZIzj0EAwIw
|
||||
XzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp
|
||||
c2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFk
|
||||
MB4XDTE4MDEwOTE4MDgwMFoXDTI4MDEwNzE4MDgwMFowRzELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpO
|
||||
b21hZCBEZW1vMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmjMddkSmrwZ5qaml
|
||||
Ggn0NpbV09qvhAFmaBtawpGXa3LlPzvauHfmlRcSEzHzkS1M6NT5eAKjJG8yojGH
|
||||
R78cXaOBxjCBwzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFBng/OMDB+a/pXc07ZYb
|
||||
I6OODU5ZMB8GA1UdIwQYMBaAFKaOK4q82ysmZ7dYMhjbZyphHxx3MEQGA1UdEQQ9
|
||||
MDuCE3NlcnZlci5nbG9iYWwubm9tYWSCE2NsaWVudC5nbG9iYWwubm9tYWSCCWxv
|
||||
Y2FsaG9zdIcEfwAAATAKBggqhkjOPQQDAgNHADBEAiAKiyqdAvtQewpuEXLU2VuP
|
||||
Ifdn+7XK82AoTjOW/BbB0gIgNLusqAft2j7mqDT/LNpUTsl6E7O068METh4I9JlT
|
||||
nEQ=
|
||||
-----END CERTIFICATE-----
|
5
demo/tls/server-key.pem
Normal file
5
demo/tls/server-key.pem
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIP5t9f7rjG4tWmGaDkfIul+OiMEcCOp4aK9oOGQPFcv3oAoGCCqGSM49
|
||||
AwEHoUQDQgAErP0oL1Eo7dnxsUbaM0O1zTa2XLQTQrt8sfYQKuSxq5f1w3GxgUYJ
|
||||
wHEpQRK34cNfvZZ1piAde/wBK8rAKCzhoQ==
|
||||
-----END EC PRIVATE KEY-----
|
9
demo/tls/server.csr
Normal file
9
demo/tls/server.csr
Normal file
|
@ -0,0 +1,9 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBRTCB6wIBADBHMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcT
|
||||
DVNhbiBGcmFuY2lzY28xEzARBgNVBAsTCk5vbWFkIERlbW8wWTATBgcqhkjOPQIB
|
||||
BggqhkjOPQMBBwNCAASs/SgvUSjt2fGxRtozQ7XNNrZctBNCu3yx9hAq5LGrl/XD
|
||||
cbGBRgnAcSlBErfhw1+9lnWmIB17/AErysAoLOGhoEIwQAYJKoZIhvcNAQkOMTMw
|
||||
MTAvBgNVHREEKDAmghNzZXJ2ZXIuZ2xvYmFsLm5vbWFkgglsb2NhbGhvc3SHBH8A
|
||||
AAEwCgYIKoZIzj0EAwIDSQAwRgIhAMpGeIRtFaCxn2Yp8EqRgRT3OnECUv6Mi4+d
|
||||
Hwn42L2UAiEAzISsF4+Dkemn6KRrOXTv7Anam8fTeoAdqokWV3j4ELQ=
|
||||
-----END CERTIFICATE REQUEST-----
|
15
demo/tls/server.pem
Normal file
15
demo/tls/server.pem
Normal file
|
@ -0,0 +1,15 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICWjCCAgCgAwIBAgIUJSWExbHzjFPPc/1Eiod55vk+11IwCgYIKoZIzj0EAwIw
|
||||
XzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp
|
||||
c2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFk
|
||||
MB4XDTE4MDEwOTE4MDgwMFoXDTI4MDEwNzE4MDgwMFowRzELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpO
|
||||
b21hZCBEZW1vMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAErP0oL1Eo7dnxsUba
|
||||
M0O1zTa2XLQTQrt8sfYQKuSxq5f1w3GxgUYJwHEpQRK34cNfvZZ1piAde/wBK8rA
|
||||
KCzhoaOBsTCBrjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFLK3byFY7RGvoyYtJ9sM
|
||||
DUKbriNRMB8GA1UdIwQYMBaAFKaOK4q82ysmZ7dYMhjbZyphHxx3MC8GA1UdEQQo
|
||||
MCaCE3NlcnZlci5nbG9iYWwubm9tYWSCCWxvY2FsaG9zdIcEfwAAATAKBggqhkjO
|
||||
PQQDAgNIADBFAiB7aohsv0AOs7dnL9zrUNoeU6/B90+BntrRtk8+NHTpnQIhAL7W
|
||||
EpQ9vbAxQ/FouOPC5lLd94yYkMbbUmoke3H2vKkd
|
||||
-----END CERTIFICATE-----
|
11
demo/tls/tls-client.hcl
Normal file
11
demo/tls/tls-client.hcl
Normal file
|
@ -0,0 +1,11 @@
|
|||
tls {
|
||||
http = true
|
||||
rpc = true
|
||||
|
||||
ca_file = "ca.pem"
|
||||
cert_file = "client.pem"
|
||||
key_file = "client-key.pem"
|
||||
|
||||
verify_server_hostname = true
|
||||
verify_https_client = true
|
||||
}
|
11
demo/tls/tls-dev.hcl
Normal file
11
demo/tls/tls-dev.hcl
Normal file
|
@ -0,0 +1,11 @@
|
|||
tls {
|
||||
http = true
|
||||
rpc = true
|
||||
|
||||
ca_file = "ca.pem"
|
||||
cert_file = "dev.pem"
|
||||
key_file = "dev-key.pem"
|
||||
|
||||
verify_server_hostname = true
|
||||
verify_https_client = true
|
||||
}
|
11
demo/tls/tls-server.hcl
Normal file
11
demo/tls/tls-server.hcl
Normal file
|
@ -0,0 +1,11 @@
|
|||
tls {
|
||||
http = true
|
||||
rpc = true
|
||||
|
||||
ca_file = "ca.pem"
|
||||
cert_file = "server.pem"
|
||||
key_file = "server-key.pem"
|
||||
|
||||
verify_server_hostname = true
|
||||
verify_https_client = true
|
||||
}
|
5
demo/tls/user-key.pem
Normal file
5
demo/tls/user-key.pem
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEILshv6hNINiqJk7iPOBr1rL519YdPah78vK/uTrJm+eYoAoGCCqGSM49
|
||||
AwEHoUQDQgAES0uuEUedpQxKop5YTUgtywlx7vWJ5dN5PTa2MRoccEhKTVTg1IxW
|
||||
S8OJxffyTIYXxAtTiDA4JVStchBf1rl2LQ==
|
||||
-----END EC PRIVATE KEY-----
|
8
demo/tls/user.csr
Normal file
8
demo/tls/user.csr
Normal file
|
@ -0,0 +1,8 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBATCBqQIBADBHMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcT
|
||||
DVNhbiBGcmFuY2lzY28xEzARBgNVBAsTCk5vbWFkIERlbW8wWTATBgcqhkjOPQIB
|
||||
BggqhkjOPQMBBwNCAARLS64RR52lDEqinlhNSC3LCXHu9Ynl03k9NrYxGhxwSEpN
|
||||
VODUjFZLw4nF9/JMhhfEC1OIMDglVK1yEF/WuXYtoAAwCgYIKoZIzj0EAwIDRwAw
|
||||
RAIgL01k8EVmO9UBLTa5VDTzPmmOBJuB2GAL7KIUc20BVnQCIFNUx7+KblsI6E5Q
|
||||
qOIZN1QUMPCGedKufHQvZJ9iX5S3
|
||||
-----END CERTIFICATE REQUEST-----
|
14
demo/tls/user.pem
Normal file
14
demo/tls/user.pem
Normal file
|
@ -0,0 +1,14 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICHjCCAcOgAwIBAgIUeB9kcy9/5oLhHCm0PmBiBe6pybwwCgYIKoZIzj0EAwIw
|
||||
XzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp
|
||||
c2NvMRMwEQYDVQQLEwpOb21hZCBEZW1vMRYwFAYDVQQDEw1leGFtcGxlLm5vbWFk
|
||||
MB4XDTE4MDEwOTE4MDgwMFoXDTI4MDEwNzE4MDgwMFowRzELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQLEwpO
|
||||
b21hZCBEZW1vMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAES0uuEUedpQxKop5Y
|
||||
TUgtywlx7vWJ5dN5PTa2MRoccEhKTVTg1IxWS8OJxffyTIYXxAtTiDA4JVStchBf
|
||||
1rl2LaN1MHMwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwG
|
||||
A1UdEwEB/wQCMAAwHQYDVR0OBBYEFIjrKUYag+vlAh5h1eJwhsdekvgGMB8GA1Ud
|
||||
IwQYMBaAFKaOK4q82ysmZ7dYMhjbZyphHxx3MAoGCCqGSM49BAMCA0kAMEYCIQC6
|
||||
AZ/eZTHXKOU1sxLTRsK3FHn88DKBqXhHJG/2rbMWEwIhALCC5fi/lTP1lB/EDm1E
|
||||
j4gRnSu3V03XWZhK6QcdQhr1
|
||||
-----END CERTIFICATE-----
|
BIN
demo/tls/user.pfx
(Stored with Git LFS)
Normal file
BIN
demo/tls/user.pfx
(Stored with Git LFS)
Normal file
Binary file not shown.
46
helper/testlog/testlog.go
Normal file
46
helper/testlog/testlog.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Package testlog creates a *log.Logger backed by *testing.T to ease logging
|
||||
// in tests. This allows logs from components being tested to only be printed
|
||||
// if the test fails (or the verbose flag is specified).
|
||||
package testlog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// LogPrinter is the methods of testing.T (or testing.B) needed by the test
|
||||
// logger.
|
||||
type LogPrinter interface {
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// writer implements io.Writer on top of a Logger.
|
||||
type writer struct {
|
||||
t LogPrinter
|
||||
}
|
||||
|
||||
// Write to an underlying Logger. Never returns an error.
|
||||
func (w *writer) Write(p []byte) (n int, err error) {
|
||||
w.t.Logf(string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// NewWriter creates a new io.Writer backed by a Logger.
|
||||
func NewWriter(t LogPrinter) io.Writer {
|
||||
return &writer{t}
|
||||
}
|
||||
|
||||
// New returns a new test logger. See https://golang.org/pkg/log/#New
|
||||
func New(t LogPrinter, prefix string, flag int) *log.Logger {
|
||||
return log.New(&writer{t}, prefix, flag)
|
||||
}
|
||||
|
||||
// WithPrefix returns a new test logger with the Lmicroseconds flag set.
|
||||
func WithPrefix(t LogPrinter, prefix string) *log.Logger {
|
||||
return New(t, prefix, log.Lmicroseconds)
|
||||
}
|
||||
|
||||
// NewLog logger with "TEST" prefix and the Lmicroseconds flag.
|
||||
func Logger(t LogPrinter) *log.Logger {
|
||||
return WithPrefix(t, "TEST ")
|
||||
}
|
|
@ -912,6 +912,7 @@ func parseServices(jobName string, taskGroupName string, task *api.Task, service
|
|||
"port",
|
||||
"check",
|
||||
"address_mode",
|
||||
"check_restart",
|
||||
}
|
||||
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
||||
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
|
||||
|
|
|
@ -631,6 +631,42 @@ func TestParse(t *testing.T) {
|
|||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"service-check-restart.hcl",
|
||||
&api.Job{
|
||||
ID: helper.StringToPtr("service_check_restart"),
|
||||
Name: helper.StringToPtr("service_check_restart"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("group"),
|
||||
Tasks: []*api.Task{
|
||||
{
|
||||
Name: "task",
|
||||
Services: []*api.Service{
|
||||
{
|
||||
Name: "http-service",
|
||||
CheckRestart: &api.CheckRestart{
|
||||
Limit: 3,
|
||||
Grace: helper.TimeToPtr(10 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
Checks: []api.ServiceCheck{
|
||||
{
|
||||
Name: "random-check",
|
||||
Type: "tcp",
|
||||
PortLabel: "9001",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
21
jobspec/test-fixtures/service-check-restart.hcl
Normal file
21
jobspec/test-fixtures/service-check-restart.hcl
Normal file
|
@ -0,0 +1,21 @@
|
|||
job "service_check_restart" {
|
||||
type = "service"
|
||||
group "group" {
|
||||
task "task" {
|
||||
service {
|
||||
name = "http-service"
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "10s"
|
||||
ignore_warnings = true
|
||||
}
|
||||
check {
|
||||
name = "random-check"
|
||||
type = "tcp"
|
||||
port = "9001"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -286,6 +286,73 @@ func TestEvalEndpoint_Dequeue_WaitIndex(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) {
|
||||
// test enqueueing an eval, updating a plan result for the same eval and de-queueing the eval
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
job := alloc.Job
|
||||
alloc.Job = nil
|
||||
|
||||
state := s1.fsm.State()
|
||||
|
||||
if err := state.UpsertJob(999, job); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
eval := mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
|
||||
// Create an eval
|
||||
if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
s1.evalBroker.Enqueue(eval)
|
||||
|
||||
// Create a plan result and apply it with a later index
|
||||
res := structs.ApplyPlanResultsRequest{
|
||||
AllocUpdateRequest: structs.AllocUpdateRequest{
|
||||
Alloc: []*structs.Allocation{alloc},
|
||||
Job: job,
|
||||
},
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
assert := assert.New(t)
|
||||
err := state.UpsertPlanResults(1000, &res)
|
||||
assert.Nil(err)
|
||||
|
||||
// Dequeue the eval
|
||||
get := &structs.EvalDequeueRequest{
|
||||
Schedulers: defaultSched,
|
||||
SchedulerVersion: scheduler.SchedulerVersion,
|
||||
WriteRequest: structs.WriteRequest{Region: "global"},
|
||||
}
|
||||
var resp structs.EvalDequeueResponse
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Eval.Dequeue", get, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure outstanding
|
||||
token, ok := s1.evalBroker.Outstanding(eval.ID)
|
||||
if !ok {
|
||||
t.Fatalf("should be outstanding")
|
||||
}
|
||||
if token != resp.Token {
|
||||
t.Fatalf("bad token: %#v %#v", token, resp.Token)
|
||||
}
|
||||
|
||||
if resp.WaitIndex != 1000 {
|
||||
t.Fatalf("bad wait index; got %d; want %d", resp.WaitIndex, 1000)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
|
|
|
@ -1123,7 +1123,7 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// reconcileSummaries re-calculates the queued allocations for every job that we
|
||||
// reconcileQueuedAllocations re-calculates the queued allocations for every job that we
|
||||
// created a Job Summary during the snap shot restore
|
||||
func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
|
||||
// Get all the jobs
|
||||
|
@ -1161,7 +1161,7 @@ func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
|
|||
Status: structs.EvalStatusPending,
|
||||
AnnotatePlan: true,
|
||||
}
|
||||
|
||||
snap.UpsertEvals(100, []*structs.Evaluation{eval})
|
||||
// Create the scheduler and run it
|
||||
sched, err := scheduler.NewScheduler(eval.Type, n.logger, snap, planner)
|
||||
if err != nil {
|
||||
|
|
|
@ -945,9 +945,14 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) {
|
|||
fsm := testFSM(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
||||
// Need to remove mock dynamic port from alloc as it won't be computed
|
||||
// in this test
|
||||
alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 0
|
||||
|
||||
fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
|
||||
job := alloc.Job
|
||||
resources := alloc.Resources
|
||||
origResources := alloc.Resources
|
||||
alloc.Resources = nil
|
||||
req := structs.AllocUpdateRequest{
|
||||
Job: job,
|
||||
|
@ -974,10 +979,10 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) {
|
|||
alloc.AllocModifyIndex = out.AllocModifyIndex
|
||||
|
||||
// Resources should be recomputed
|
||||
resources.DiskMB = alloc.Job.TaskGroups[0].EphemeralDisk.SizeMB
|
||||
alloc.Resources = resources
|
||||
origResources.DiskMB = alloc.Job.TaskGroups[0].EphemeralDisk.SizeMB
|
||||
alloc.Resources = origResources
|
||||
if !reflect.DeepEqual(alloc, out) {
|
||||
t.Fatalf("bad: %#v %#v", alloc, out)
|
||||
t.Fatalf("not equal: % #v", pretty.Diff(alloc, out))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1214,6 +1219,10 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
|
|||
|
||||
alloc.DeploymentID = d.ID
|
||||
|
||||
eval := mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
fsm.State().UpsertEvals(1, []*structs.Evaluation{eval})
|
||||
|
||||
fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
|
||||
req := structs.ApplyPlanResultsRequest{
|
||||
AllocUpdateRequest: structs.AllocUpdateRequest{
|
||||
|
@ -1221,6 +1230,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
|
|||
Alloc: []*structs.Allocation{alloc},
|
||||
},
|
||||
Deployment: d,
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
buf, err := structs.Encode(structs.ApplyPlanResultsRequestType, req)
|
||||
if err != nil {
|
||||
|
@ -1234,32 +1244,32 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
|
|||
|
||||
// Verify the allocation is registered
|
||||
ws := memdb.NewWatchSet()
|
||||
assert := assert.New(t)
|
||||
out, err := fsm.State().AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
alloc.CreateIndex = out.CreateIndex
|
||||
alloc.ModifyIndex = out.ModifyIndex
|
||||
alloc.AllocModifyIndex = out.AllocModifyIndex
|
||||
|
||||
// Job should be re-attached
|
||||
alloc.Job = job
|
||||
if !reflect.DeepEqual(alloc, out) {
|
||||
t.Fatalf("bad: %#v %#v", alloc, out)
|
||||
}
|
||||
assert.Equal(alloc, out)
|
||||
|
||||
dout, err := fsm.State().DeploymentByID(ws, d.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if tg, ok := dout.TaskGroups[alloc.TaskGroup]; !ok || tg.PlacedAllocs != 1 {
|
||||
t.Fatalf("err: %v %v", tg, err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
tg, ok := dout.TaskGroups[alloc.TaskGroup]
|
||||
assert.True(ok)
|
||||
assert.NotNil(tg)
|
||||
assert.Equal(1, tg.PlacedAllocs)
|
||||
|
||||
// Ensure that the original job is used
|
||||
evictAlloc := alloc.Copy()
|
||||
job = mock.Job()
|
||||
job.Priority = 123
|
||||
eval = mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
|
||||
fsm.State().UpsertEvals(2, []*structs.Evaluation{eval})
|
||||
|
||||
evictAlloc.Job = nil
|
||||
evictAlloc.DesiredStatus = structs.AllocDesiredStatusEvict
|
||||
|
@ -1268,28 +1278,28 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
|
|||
Job: job,
|
||||
Alloc: []*structs.Allocation{evictAlloc},
|
||||
},
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
buf, err = structs.Encode(structs.ApplyPlanResultsRequestType, req2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
|
||||
resp = fsm.Apply(makeLog(buf))
|
||||
if resp != nil {
|
||||
t.Fatalf("resp: %v", resp)
|
||||
}
|
||||
log := makeLog(buf)
|
||||
//set the index to something other than 1
|
||||
log.Index = 25
|
||||
resp = fsm.Apply(log)
|
||||
assert.Nil(resp)
|
||||
|
||||
// Verify we are evicted
|
||||
out, err = fsm.State().AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out.DesiredStatus != structs.AllocDesiredStatusEvict {
|
||||
t.Fatalf("alloc found!")
|
||||
}
|
||||
if out.Job == nil || out.Job.Priority == 123 {
|
||||
t.Fatalf("bad job")
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.Equal(structs.AllocDesiredStatusEvict, out.DesiredStatus)
|
||||
assert.NotNil(out.Job)
|
||||
assert.NotEqual(123, out.Job.Priority)
|
||||
|
||||
evalOut, err := fsm.State().EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.Equal(log.Index, evalOut.ModifyIndex)
|
||||
|
||||
}
|
||||
|
||||
func TestFSM_DeploymentStatusUpdate(t *testing.T) {
|
||||
|
|
|
@ -1088,6 +1088,8 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse)
|
|||
AnnotatePlan: true,
|
||||
}
|
||||
|
||||
snap.UpsertEvals(100, []*structs.Evaluation{eval})
|
||||
|
||||
// Create an in-memory Planner that returns no errors and stores the
|
||||
// submitted plan and created evals.
|
||||
planner := &scheduler.Harness{
|
||||
|
|
|
@ -292,7 +292,7 @@ func Alloc() *structs.Allocation {
|
|||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http"}},
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -135,6 +135,7 @@ func (s *Server) applyPlan(plan *structs.Plan, result *structs.PlanResult, snap
|
|||
},
|
||||
Deployment: result.Deployment,
|
||||
DeploymentUpdates: result.DeploymentUpdates,
|
||||
EvalID: plan.EvalID,
|
||||
}
|
||||
for _, updateList := range result.NodeUpdate {
|
||||
req.Alloc = append(req.Alloc, updateList...)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -65,7 +66,7 @@ func TestPlanApply_applyPlan(t *testing.T) {
|
|||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
// Register ndoe
|
||||
// Register node
|
||||
node := mock.Node()
|
||||
testRegisterNode(t, s1, node)
|
||||
|
||||
|
@ -91,6 +92,13 @@ func TestPlanApply_applyPlan(t *testing.T) {
|
|||
// Register alloc, deployment and deployment update
|
||||
alloc := mock.Alloc()
|
||||
s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))
|
||||
// Create an eval
|
||||
eval := mock.Eval()
|
||||
eval.JobID = alloc.JobID
|
||||
if err := s1.State().UpsertEvals(1, []*structs.Evaluation{eval}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
planRes := &structs.PlanResult{
|
||||
NodeAllocation: map[string][]*structs.Allocation{
|
||||
node.ID: {alloc},
|
||||
|
@ -110,73 +118,55 @@ func TestPlanApply_applyPlan(t *testing.T) {
|
|||
Job: alloc.Job,
|
||||
Deployment: dnew,
|
||||
DeploymentUpdates: updates,
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
|
||||
// Apply the plan
|
||||
future, err := s1.applyPlan(plan, planRes, snap)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert := assert.New(t)
|
||||
assert.Nil(err)
|
||||
|
||||
// Verify our optimistic snapshot is updated
|
||||
ws := memdb.NewWatchSet()
|
||||
if out, err := snap.AllocByID(ws, alloc.ID); err != nil || out == nil {
|
||||
t.Fatalf("bad: %v %v", out, err)
|
||||
}
|
||||
allocOut, err := snap.AllocByID(ws, alloc.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(allocOut)
|
||||
|
||||
if out, err := snap.DeploymentByID(ws, plan.Deployment.ID); err != nil || out == nil {
|
||||
t.Fatalf("bad: %v %v", out, err)
|
||||
}
|
||||
deploymentOut, err := snap.DeploymentByID(ws, plan.Deployment.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(deploymentOut)
|
||||
|
||||
// Check plan does apply cleanly
|
||||
index, err := planWaitFuture(future)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index == 0 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotEqual(0, index)
|
||||
|
||||
// Lookup the allocation
|
||||
fsmState := s1.fsm.State()
|
||||
out, err := fsmState.AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("missing alloc")
|
||||
}
|
||||
|
||||
if out.CreateTime <= 0 {
|
||||
t.Fatalf("invalid create time %v", out.CreateTime)
|
||||
}
|
||||
if out.ModifyTime <= 0 {
|
||||
t.Fatalf("invalid modify time %v", out.CreateTime)
|
||||
}
|
||||
if out.CreateTime != out.ModifyTime {
|
||||
t.Fatalf("create time %v modify time %v must be equal", out.CreateTime, out.ModifyTime)
|
||||
}
|
||||
allocOut, err = fsmState.AllocByID(ws, alloc.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(allocOut)
|
||||
assert.True(allocOut.CreateTime > 0)
|
||||
assert.True(allocOut.ModifyTime > 0)
|
||||
assert.Equal(allocOut.CreateTime, allocOut.ModifyTime)
|
||||
|
||||
// Lookup the new deployment
|
||||
dout, err := fsmState.DeploymentByID(ws, plan.Deployment.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if dout == nil {
|
||||
t.Fatalf("missing deployment")
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(dout)
|
||||
|
||||
// Lookup the updated deployment
|
||||
dout2, err := fsmState.DeploymentByID(ws, oldDeployment.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if dout2 == nil {
|
||||
t.Fatalf("missing deployment")
|
||||
}
|
||||
if dout2.Status != desiredStatus || dout2.StatusDescription != desiredStatusDescription {
|
||||
t.Fatalf("bad status: %#v", dout2)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(dout2)
|
||||
assert.Equal(desiredStatus, dout2.Status)
|
||||
assert.Equal(desiredStatusDescription, dout2.StatusDescription)
|
||||
|
||||
// Lookup updated eval
|
||||
evalOut, err := fsmState.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.Equal(index, evalOut.ModifyIndex)
|
||||
|
||||
// Evict alloc, Register alloc2
|
||||
allocEvict := new(structs.Allocation)
|
||||
|
@ -197,60 +187,43 @@ func TestPlanApply_applyPlan(t *testing.T) {
|
|||
|
||||
// Snapshot the state
|
||||
snap, err = s1.State().Snapshot()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
|
||||
// Apply the plan
|
||||
plan = &structs.Plan{
|
||||
Job: job,
|
||||
Job: job,
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
future, err = s1.applyPlan(plan, planRes, snap)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
|
||||
// Check that our optimistic view is updated
|
||||
if out, _ := snap.AllocByID(ws, allocEvict.ID); out.DesiredStatus != structs.AllocDesiredStatusEvict {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
out, _ := snap.AllocByID(ws, allocEvict.ID)
|
||||
assert.Equal(structs.AllocDesiredStatusEvict, out.DesiredStatus)
|
||||
|
||||
// Verify plan applies cleanly
|
||||
index, err = planWaitFuture(future)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index == 0 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotEqual(0, index)
|
||||
|
||||
// Lookup the allocation
|
||||
out, err = s1.fsm.State().AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out.DesiredStatus != structs.AllocDesiredStatusEvict {
|
||||
t.Fatalf("should be evicted alloc: %#v", out)
|
||||
}
|
||||
if out.Job == nil {
|
||||
t.Fatalf("missing job")
|
||||
}
|
||||
|
||||
if out.ModifyTime <= 0 {
|
||||
t.Fatalf("must have valid modify time but was %v", out.ModifyTime)
|
||||
}
|
||||
allocOut, err = s1.fsm.State().AllocByID(ws, alloc.ID)
|
||||
assert.Nil(err)
|
||||
assert.Equal(structs.AllocDesiredStatusEvict, allocOut.DesiredStatus)
|
||||
assert.NotNil(allocOut.Job)
|
||||
assert.True(allocOut.ModifyTime > 0)
|
||||
|
||||
// Lookup the allocation
|
||||
out, err = s1.fsm.State().AllocByID(ws, alloc2.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out == nil {
|
||||
t.Fatalf("missing alloc")
|
||||
}
|
||||
if out.Job == nil {
|
||||
t.Fatalf("missing job")
|
||||
}
|
||||
allocOut, err = s1.fsm.State().AllocByID(ws, alloc2.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(allocOut)
|
||||
assert.NotNil(allocOut.Job)
|
||||
|
||||
// Lookup updated eval
|
||||
evalOut, err = fsmState.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.Equal(index, evalOut.ModifyIndex)
|
||||
}
|
||||
|
||||
func TestPlanApply_EvalPlan_Simple(t *testing.T) {
|
||||
|
|
|
@ -2,7 +2,9 @@ package nomad
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
|
@ -114,6 +116,11 @@ func roundUUIDDownIfOdd(prefix string, context structs.Context) string {
|
|||
// PrefixSearch is used to list matches for a given prefix, and returns
|
||||
// matching jobs, evaluations, allocations, and/or nodes.
|
||||
func (s *Search) PrefixSearch(args *structs.SearchRequest, reply *structs.SearchResponse) error {
|
||||
if done, err := s.srv.forward("Search.PrefixSearch", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "search", "prefix_search"}, time.Now())
|
||||
|
||||
aclObj, err := s.srv.ResolveToken(args.AuthToken)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -712,3 +712,47 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) {
|
|||
assert.Equal(1, len(resp.Matches[structs.Jobs]))
|
||||
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
|
||||
}
|
||||
|
||||
func TestSearch_PrefixSearch_MultiRegion(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
jobName := "exampleexample"
|
||||
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0
|
||||
c.Region = "foo"
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
|
||||
s2 := testServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0
|
||||
c.Region = "bar"
|
||||
})
|
||||
defer s2.Shutdown()
|
||||
|
||||
testJoin(t, s1, s2)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
job := registerAndVerifyJob(s1, t, jobName, 0)
|
||||
|
||||
req := &structs.SearchRequest{
|
||||
Prefix: "",
|
||||
Context: structs.Jobs,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: "foo",
|
||||
Namespace: job.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
codec := rpcClient(t, s2)
|
||||
|
||||
var resp structs.SearchResponse
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(1, len(resp.Matches[structs.Jobs]))
|
||||
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
|
||||
assert.Equal(uint64(jobIndex), resp.Index)
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func (s *Server) maybeBootstrap() {
|
|||
if err := s.connPool.RPC(s.config.Region, server.Addr, server.MajorVersion,
|
||||
"Status.Peers", req, &peers); err != nil {
|
||||
nextRetry := (1 << attempt) * peerRetryBase
|
||||
s.logger.Printf("[ERR] consul: Failed to confirm peer status for %s: %v. Retrying in "+
|
||||
s.logger.Printf("[ERR] nomad: Failed to confirm peer status for %s: %v. Retrying in "+
|
||||
"%v...", server.Name, err, nextRetry.String())
|
||||
time.Sleep(nextRetry)
|
||||
} else {
|
||||
|
|
|
@ -196,6 +196,16 @@ func (s *StateStore) UpsertPlanResults(index uint64, results *structs.ApplyPlanR
|
|||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Nomad versions before 0.7.1 did not include the eval ID when
|
||||
// applying the plan. Thus while we are upgrading, we ignore updating the
|
||||
// modify index of evaluations from older plans.
|
||||
if results.EvalID != "" {
|
||||
// Update the modify index of the eval id
|
||||
if err := s.updateEvalModifyIndex(txn, index, results.EvalID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
@ -1486,6 +1496,34 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
|
|||
return nil
|
||||
}
|
||||
|
||||
// updateEvalModifyIndex is used to update the modify index of an evaluation that has been
|
||||
// through a scheduler pass. This is done as part of plan apply. It ensures that when a subsequent
|
||||
// scheduler workers process a re-queued evaluation it sees any partial updates from the plan apply.
|
||||
func (s *StateStore) updateEvalModifyIndex(txn *memdb.Txn, index uint64, evalID string) error {
|
||||
// Lookup the evaluation
|
||||
existing, err := txn.First("evals", "id", evalID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("eval lookup failed: %v", err)
|
||||
}
|
||||
if existing == nil {
|
||||
err := fmt.Errorf("unable to find eval id %q", evalID)
|
||||
s.logger.Printf("[ERR] state_store: %v", err)
|
||||
return err
|
||||
}
|
||||
eval := existing.(*structs.Evaluation).Copy()
|
||||
// Update the indexes
|
||||
eval.ModifyIndex = index
|
||||
|
||||
// Insert the eval
|
||||
if err := txn.Insert("evals", eval); err != nil {
|
||||
return fmt.Errorf("eval insert failed: %v", err)
|
||||
}
|
||||
if err := txn.Insert("index", &IndexEntry{"evals", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteEval is used to delete an evaluation
|
||||
func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) error {
|
||||
txn := s.db.Txn(true)
|
||||
|
|
|
@ -100,40 +100,43 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
eval := mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
|
||||
// Create an eval
|
||||
if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create a plan result
|
||||
res := structs.ApplyPlanResultsRequest{
|
||||
AllocUpdateRequest: structs.AllocUpdateRequest{
|
||||
Alloc: []*structs.Allocation{alloc},
|
||||
Job: job,
|
||||
},
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
err := state.UpsertPlanResults(1000, &res)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Nil(err)
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(alloc, out) {
|
||||
t.Fatalf("bad: %#v %#v", alloc, out)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.Equal(alloc, out)
|
||||
|
||||
index, err := state.Index("allocs")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if index != 1000 {
|
||||
t.Fatalf("bad: %d", index)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.EqualValues(1000, index)
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
evalOut, err := state.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.EqualValues(1000, evalOut.ModifyIndex)
|
||||
}
|
||||
|
||||
// This test checks that the deployment is created and allocations count towards
|
||||
|
@ -154,6 +157,14 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
eval := mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
|
||||
// Create an eval
|
||||
if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create a plan result
|
||||
res := structs.ApplyPlanResultsRequest{
|
||||
AllocUpdateRequest: structs.AllocUpdateRequest{
|
||||
|
@ -161,6 +172,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) {
|
|||
Job: job,
|
||||
},
|
||||
Deployment: d,
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
|
||||
err := state.UpsertPlanResults(1000, &res)
|
||||
|
@ -169,31 +181,24 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) {
|
|||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
assert := assert.New(t)
|
||||
out, err := state.AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(alloc, out) {
|
||||
t.Fatalf("bad: %#v %#v", alloc, out)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.Equal(alloc, out)
|
||||
|
||||
dout, err := state.DeploymentByID(ws, d.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if dout == nil {
|
||||
t.Fatalf("bad: nil deployment")
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(dout)
|
||||
|
||||
tg, ok := dout.TaskGroups[alloc.TaskGroup]
|
||||
if !ok {
|
||||
t.Fatalf("bad: nil deployment state")
|
||||
}
|
||||
if tg == nil || tg.PlacedAllocs != 2 {
|
||||
t.Fatalf("bad: %v", dout)
|
||||
}
|
||||
assert.True(ok)
|
||||
assert.NotNil(tg)
|
||||
assert.Equal(2, tg.PlacedAllocs)
|
||||
|
||||
evalOut, err := state.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.EqualValues(1000, evalOut.ModifyIndex)
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
|
@ -215,6 +220,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) {
|
|||
Job: job,
|
||||
},
|
||||
Deployment: d2,
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
|
||||
err = state.UpsertPlanResults(1001, &res)
|
||||
|
@ -223,21 +229,18 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) {
|
|||
}
|
||||
|
||||
dout, err = state.DeploymentByID(ws, d2.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if dout == nil {
|
||||
t.Fatalf("bad: nil deployment")
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(dout)
|
||||
|
||||
tg, ok = dout.TaskGroups[alloc.TaskGroup]
|
||||
if !ok {
|
||||
t.Fatalf("bad: nil deployment state")
|
||||
}
|
||||
if tg == nil || tg.PlacedAllocs != 2 {
|
||||
t.Fatalf("bad: %v", dout)
|
||||
}
|
||||
assert.True(ok)
|
||||
assert.NotNil(tg)
|
||||
assert.Equal(2, tg.PlacedAllocs)
|
||||
|
||||
evalOut, err = state.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.EqualValues(1001, evalOut.ModifyIndex)
|
||||
}
|
||||
|
||||
// This test checks that deployment updates are applied correctly
|
||||
|
@ -258,6 +261,13 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
eval := mock.Eval()
|
||||
eval.JobID = job.ID
|
||||
|
||||
// Create an eval
|
||||
if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job = nil
|
||||
|
||||
|
@ -280,41 +290,37 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) {
|
|||
},
|
||||
Deployment: dnew,
|
||||
DeploymentUpdates: []*structs.DeploymentStatusUpdate{update},
|
||||
EvalID: eval.ID,
|
||||
}
|
||||
|
||||
err := state.UpsertPlanResults(1000, &res)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
// Check the deployments are correctly updated.
|
||||
dout, err := state.DeploymentByID(ws, dnew.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if dout == nil {
|
||||
t.Fatalf("bad: nil deployment")
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(dout)
|
||||
|
||||
tg, ok := dout.TaskGroups[alloc.TaskGroup]
|
||||
if !ok {
|
||||
t.Fatalf("bad: nil deployment state")
|
||||
}
|
||||
if tg == nil || tg.PlacedAllocs != 1 {
|
||||
t.Fatalf("bad: %v", dout)
|
||||
}
|
||||
assert.True(ok)
|
||||
assert.NotNil(tg)
|
||||
assert.Equal(1, tg.PlacedAllocs)
|
||||
|
||||
doutstandingout, err := state.DeploymentByID(ws, doutstanding.ID)
|
||||
if err != nil || doutstandingout == nil {
|
||||
t.Fatalf("bad: %v %v", err, doutstandingout)
|
||||
}
|
||||
if doutstandingout.Status != update.Status || doutstandingout.StatusDescription != update.StatusDescription || doutstandingout.ModifyIndex != 1000 {
|
||||
t.Fatalf("bad: %v", doutstandingout)
|
||||
}
|
||||
assert.Nil(err)
|
||||
assert.NotNil(doutstandingout)
|
||||
assert.Equal(update.Status, doutstandingout.Status)
|
||||
assert.Equal(update.StatusDescription, doutstandingout.StatusDescription)
|
||||
assert.EqualValues(1000, doutstandingout.ModifyIndex)
|
||||
|
||||
evalOut, err := state.EvalByID(ws, eval.ID)
|
||||
assert.Nil(err)
|
||||
assert.NotNil(evalOut)
|
||||
assert.EqualValues(1000, evalOut.ModifyIndex)
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
@ -515,6 +516,14 @@ type ApplyPlanResultsRequest struct {
|
|||
// deployments. This allows the scheduler to cancel any unneeded deployment
|
||||
// because the job is stopped or the update block is removed.
|
||||
DeploymentUpdates []*DeploymentStatusUpdate
|
||||
|
||||
// EvalID is the eval ID of the plan being applied. The modify index of the
|
||||
// evaluation is updated as part of applying the plan to ensure that subsequent
|
||||
// scheduling events for the same job will wait for the index that last produced
|
||||
// state changes. This is necessary for blocked evaluations since they can be
|
||||
// processed many times, potentially making state updates, without the state of
|
||||
// the evaluation itself being updated.
|
||||
EvalID string
|
||||
}
|
||||
|
||||
// AllocUpdateRequest is used to submit changes to allocations, either
|
||||
|
@ -1062,7 +1071,7 @@ type Node struct {
|
|||
|
||||
// SecretID is an ID that is only known by the Node and the set of Servers.
|
||||
// It is not accessible via the API and is used to authenticate nodes
|
||||
// conducting priviledged activities.
|
||||
// conducting privileged activities.
|
||||
SecretID string
|
||||
|
||||
// Datacenter for this node
|
||||
|
@ -1246,7 +1255,7 @@ func DefaultResources() *Resources {
|
|||
// api/resources.go and should be kept in sync.
|
||||
func MinResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: 100,
|
||||
CPU: 20,
|
||||
MemoryMB: 10,
|
||||
IOPS: 0,
|
||||
}
|
||||
|
@ -2930,6 +2939,13 @@ func (sc *ServiceCheck) validate() error {
|
|||
if sc.Path == "" {
|
||||
return fmt.Errorf("http type must have a valid http path")
|
||||
}
|
||||
url, err := url.Parse(sc.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("http type must have a valid http path")
|
||||
}
|
||||
if url.IsAbs() {
|
||||
return fmt.Errorf("http type must have a relative http path")
|
||||
}
|
||||
|
||||
case ServiceCheckScript:
|
||||
if sc.Command == "" {
|
||||
|
@ -3124,8 +3140,8 @@ func (s *Service) Validate() error {
|
|||
}
|
||||
|
||||
for _, c := range s.Checks {
|
||||
if s.PortLabel == "" && c.RequiresPort() {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name))
|
||||
if s.PortLabel == "" && c.PortLabel == "" && c.RequiresPort() {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but neither check nor service %+q have a port", c.Name, s.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -3570,8 +3586,16 @@ func validateServices(t *Task) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Iterate over a sorted list of keys to make error listings stable
|
||||
keys := make([]string, 0, len(servicePorts))
|
||||
for p := range servicePorts {
|
||||
keys = append(keys, p)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Ensure all ports referenced in services exist.
|
||||
for servicePort, services := range servicePorts {
|
||||
for _, servicePort := range keys {
|
||||
services := servicePorts[servicePort]
|
||||
_, ok := portLabels[servicePort]
|
||||
if !ok {
|
||||
names := make([]string, 0, len(services))
|
||||
|
@ -3620,7 +3644,7 @@ type Template struct {
|
|||
DestPath string
|
||||
|
||||
// EmbeddedTmpl store the raw template. This is useful for smaller templates
|
||||
// where they are embedded in the job file rather than sent as an artificat
|
||||
// where they are embedded in the job file rather than sent as an artifact
|
||||
EmbeddedTmpl string
|
||||
|
||||
// ChangeMode indicates what should be done if the template is re-rendered
|
||||
|
@ -3790,7 +3814,9 @@ func (ts *TaskState) Copy() *TaskState {
|
|||
return copy
|
||||
}
|
||||
|
||||
// Successful returns whether a task finished successfully.
|
||||
// Successful returns whether a task finished successfully. This doesn't really
|
||||
// have meaning on a non-batch allocation because a service and system
|
||||
// allocation should not finish.
|
||||
func (ts *TaskState) Successful() bool {
|
||||
l := len(ts.Events)
|
||||
if ts.State != TaskStateDead || l == 0 {
|
||||
|
@ -4996,9 +5022,25 @@ func (a *Allocation) Terminated() bool {
|
|||
}
|
||||
|
||||
// RanSuccessfully returns whether the client has ran the allocation and all
|
||||
// tasks finished successfully
|
||||
// tasks finished successfully. Critically this function returns whether the
|
||||
// allocation has ran to completion and not just that the alloc has converged to
|
||||
// its desired state. That is to say that a batch allocation must have finished
|
||||
// with exit code 0 on all task groups. This doesn't really have meaning on a
|
||||
// non-batch allocation because a service and system allocation should not
|
||||
// finish.
|
||||
func (a *Allocation) RanSuccessfully() bool {
|
||||
return a.ClientStatus == AllocClientStatusComplete
|
||||
// Handle the case the client hasn't started the allocation.
|
||||
if len(a.TaskStates) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check to see if all the tasks finised successfully in the allocation
|
||||
allSuccess := true
|
||||
for _, state := range a.TaskStates {
|
||||
allSuccess = allSuccess && state.Successful()
|
||||
}
|
||||
|
||||
return allSuccess
|
||||
}
|
||||
|
||||
// ShouldMigrate returns if the allocation needs data migration
|
||||
|
|
|
@ -1230,60 +1230,129 @@ func TestTask_Validate_Service_Check(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
check2 := ServiceCheck{
|
||||
Name: "check-name-2",
|
||||
Type: ServiceCheckHTTP,
|
||||
Interval: 10 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
Path: "/foo/bar",
|
||||
}
|
||||
|
||||
err = check2.validate()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
check2.Path = ""
|
||||
err = check2.validate()
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "valid http path") {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
check2.Path = "http://www.example.com"
|
||||
err = check2.validate()
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "relative http path") {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTask_Validate_Service_Check_AddressMode asserts that checks do not
|
||||
// inherit address mode but do inherit ports.
|
||||
func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
|
||||
task := &Task{
|
||||
Resources: &Resources{
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
DynamicPorts: []Port{
|
||||
{
|
||||
Label: "http",
|
||||
Value: 9999,
|
||||
getTask := func(s *Service) *Task {
|
||||
return &Task{
|
||||
Resources: &Resources{
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
DynamicPorts: []Port{
|
||||
{
|
||||
Label: "http",
|
||||
Value: 9999,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Services: []*Service{
|
||||
{
|
||||
Services: []*Service{s},
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
Service *Service
|
||||
ErrContains string
|
||||
}{
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "invalid-driver",
|
||||
PortLabel: "80",
|
||||
AddressMode: "host",
|
||||
},
|
||||
{
|
||||
Name: "http-driver",
|
||||
ErrContains: `port label "80" referenced`,
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "http-driver-fail-1",
|
||||
PortLabel: "80",
|
||||
AddressMode: "driver",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
// Should fail
|
||||
Name: "invalid-check-1",
|
||||
Type: "tcp",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
ErrContains: `check "invalid-check-1" cannot use a numeric port`,
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "http-driver-fail-2",
|
||||
PortLabel: "80",
|
||||
AddressMode: "driver",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
// Should fail
|
||||
Name: "invalid-check-2",
|
||||
Type: "tcp",
|
||||
PortLabel: "80",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
ErrContains: `check "invalid-check-2" cannot use a numeric port`,
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "http-driver-fail-3",
|
||||
PortLabel: "80",
|
||||
AddressMode: "driver",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
// Should fail
|
||||
Name: "invalid-check-3",
|
||||
Type: "tcp",
|
||||
PortLabel: "missing-port-label",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
ErrContains: `port label "missing-port-label" referenced`,
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "http-driver-passes",
|
||||
PortLabel: "80",
|
||||
AddressMode: "driver",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
// Should pass
|
||||
Name: "valid-script-check",
|
||||
Type: "script",
|
||||
Command: "ok",
|
||||
|
@ -1291,7 +1360,6 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
|
|||
Timeout: time.Second,
|
||||
},
|
||||
{
|
||||
// Should pass
|
||||
Name: "valid-host-check",
|
||||
Type: "tcp",
|
||||
PortLabel: "http",
|
||||
|
@ -1299,7 +1367,6 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
|
|||
Timeout: time.Second,
|
||||
},
|
||||
{
|
||||
// Should pass
|
||||
Name: "valid-driver-check",
|
||||
Type: "tcp",
|
||||
AddressMode: "driver",
|
||||
|
@ -1309,23 +1376,65 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := validateServices(task)
|
||||
if err == nil {
|
||||
t.Fatalf("expected errors but task validated successfully")
|
||||
}
|
||||
errs := err.(*multierror.Error).Errors
|
||||
if expected := 4; len(errs) != expected {
|
||||
for i, err := range errs {
|
||||
t.Logf("errs[%d] -> %s", i, err)
|
||||
}
|
||||
t.Fatalf("expected %d errors but found %d", expected, len(errs))
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "empty-address-3673-passes-1",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
Name: "valid-port-label",
|
||||
Type: "tcp",
|
||||
PortLabel: "http",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
{
|
||||
Name: "empty-is-ok",
|
||||
Type: "script",
|
||||
Command: "ok",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "empty-address-3673-passes-2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: &Service{
|
||||
Name: "empty-address-3673-fails",
|
||||
Checks: []*ServiceCheck{
|
||||
{
|
||||
Name: "empty-is-not-ok",
|
||||
Type: "tcp",
|
||||
Interval: time.Second,
|
||||
Timeout: time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
ErrContains: `invalid: check requires a port but neither check nor service`,
|
||||
},
|
||||
}
|
||||
|
||||
assert.Contains(t, errs[0].Error(), `check "invalid-check-1" cannot use a numeric port`)
|
||||
assert.Contains(t, errs[1].Error(), `check "invalid-check-2" cannot use a numeric port`)
|
||||
assert.Contains(t, errs[2].Error(), `port label "80" referenced`)
|
||||
assert.Contains(t, errs[3].Error(), `port label "missing-port-label" referenced`)
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
task := getTask(tc.Service)
|
||||
t.Run(tc.Service.Name, func(t *testing.T) {
|
||||
err := validateServices(task)
|
||||
if err == nil && tc.ErrContains == "" {
|
||||
// Ok!
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("no error returned. expected: %s", tc.ErrContains)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.ErrContains) {
|
||||
t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
|
||||
|
|
|
@ -341,6 +341,7 @@ func TestWorker_SubmitPlan(t *testing.T) {
|
|||
eval1 := mock.Eval()
|
||||
eval1.JobID = job.ID
|
||||
s1.fsm.State().UpsertJob(1000, job)
|
||||
s1.fsm.State().UpsertEvals(1000, []*structs.Evaluation{eval1})
|
||||
|
||||
// Create the register request
|
||||
s1.evalBroker.Enqueue(eval1)
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestServiceSched_JobRegister(t *testing.T) {
|
||||
|
@ -35,8 +36,11 @@ func TestServiceSched_JobRegister(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -118,7 +122,9 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
||||
|
@ -149,7 +155,9 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
h1 := NewHarnessWithState(t, h.State)
|
||||
if err := h1.Process(NewServiceScheduler, eval); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -206,8 +214,11 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -275,8 +286,11 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -364,8 +378,11 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -456,7 +473,9 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -548,7 +567,9 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T)
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
||||
|
@ -602,7 +623,9 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
AnnotatePlan: true,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -679,7 +702,9 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -720,8 +745,11 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -802,8 +830,11 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -899,8 +930,9 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -1016,8 +1048,11 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -1245,7 +1280,9 @@ func TestServiceSched_JobModify(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1329,7 +1366,9 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1436,7 +1475,9 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1530,7 +1571,9 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1635,7 +1678,9 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1736,7 +1781,9 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1844,7 +1891,9 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1958,7 +2007,9 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2044,7 +2095,9 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobDeregister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2111,7 +2164,9 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobDeregister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2201,7 +2256,9 @@ func TestServiceSched_NodeDown(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2267,7 +2324,9 @@ func TestServiceSched_NodeUpdate(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2318,7 +2377,9 @@ func TestServiceSched_NodeDrain(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2419,8 +2480,11 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -2493,7 +2557,9 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2550,7 +2616,9 @@ func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2608,7 +2676,9 @@ func TestServiceSched_RetryLimit(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -2664,7 +2734,9 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -2719,7 +2791,9 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -2752,6 +2826,94 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) {
|
|||
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
||||
}
|
||||
|
||||
func TestBatchSched_Run_LostAlloc(t *testing.T) {
|
||||
h := NewHarness(t)
|
||||
|
||||
// Create a node
|
||||
node := mock.Node()
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
|
||||
|
||||
// Create a job
|
||||
job := mock.Job()
|
||||
job.ID = "my-job"
|
||||
job.Type = structs.JobTypeBatch
|
||||
job.TaskGroups[0].Count = 3
|
||||
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
|
||||
|
||||
// Desired = 3
|
||||
// Mark one as lost and then schedule
|
||||
// [(0, run, running), (1, run, running), (1, stop, lost)]
|
||||
|
||||
// Create two running allocations
|
||||
var allocs []*structs.Allocation
|
||||
for i := 0; i <= 1; i++ {
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job = job
|
||||
alloc.JobID = job.ID
|
||||
alloc.NodeID = node.ID
|
||||
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
||||
alloc.ClientStatus = structs.AllocClientStatusRunning
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
|
||||
// Create a failed alloc
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job = job
|
||||
alloc.JobID = job.ID
|
||||
alloc.NodeID = node.ID
|
||||
alloc.Name = "my-job.web[1]"
|
||||
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
||||
alloc.ClientStatus = structs.AllocClientStatusComplete
|
||||
allocs = append(allocs, alloc)
|
||||
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
||||
|
||||
// Create a mock evaluation to register the job
|
||||
eval := &structs.Evaluation{
|
||||
Namespace: structs.DefaultNamespace,
|
||||
ID: uuid.Generate(),
|
||||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure a plan
|
||||
if len(h.Plans) != 1 {
|
||||
t.Fatalf("bad: %#v", h.Plans)
|
||||
}
|
||||
|
||||
// Lookup the allocations by JobID
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
||||
noErr(t, err)
|
||||
|
||||
// Ensure a replacement alloc was placed.
|
||||
if len(out) != 4 {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
|
||||
// Assert that we have the correct number of each alloc name
|
||||
expected := map[string]int{
|
||||
"my-job.web[0]": 1,
|
||||
"my-job.web[1]": 2,
|
||||
"my-job.web[2]": 1,
|
||||
}
|
||||
actual := make(map[string]int, 3)
|
||||
for _, alloc := range out {
|
||||
actual[alloc.Name] += 1
|
||||
}
|
||||
require.Equal(t, actual, expected)
|
||||
|
||||
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
||||
}
|
||||
|
||||
func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) {
|
||||
h := NewHarness(t)
|
||||
|
||||
|
@ -2781,7 +2943,9 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -2841,7 +3005,9 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -2904,7 +3070,9 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -2985,7 +3153,9 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
|
@ -3039,8 +3209,11 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -3093,6 +3266,16 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) {
|
|||
alloc.NodeID = node.ID
|
||||
alloc.Name = "my-job.web[0]"
|
||||
alloc.ClientStatus = structs.AllocClientStatusComplete
|
||||
alloc.TaskStates = make(map[string]*structs.TaskState)
|
||||
alloc.TaskStates["web"] = &structs.TaskState{
|
||||
State: structs.TaskStateDead,
|
||||
Events: []*structs.TaskEvent{
|
||||
{
|
||||
Type: structs.TaskTerminated,
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a mock evaluation to register the job
|
||||
|
@ -3102,8 +3285,11 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -3154,8 +3340,11 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewBatchScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -3197,7 +3386,9 @@ func TestGenericSched_ChainedAlloc(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -3226,7 +3417,10 @@ func TestGenericSched_ChainedAlloc(t *testing.T) {
|
|||
Priority: job1.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job1.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
|
||||
|
||||
// Process the evaluation
|
||||
if err := h1.Process(NewServiceScheduler, eval1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -3287,8 +3481,11 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: alloc.Job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -3344,8 +3541,11 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobDeregister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -3413,8 +3613,11 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
|
|
@ -32,7 +32,9 @@ func TestSystemSched_JobRegister(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -105,7 +107,9 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewSystemScheduler, eval); err != nil {
|
||||
|
@ -134,7 +138,9 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
h1 := NewHarnessWithState(t, h.State)
|
||||
if err := h1.Process(NewSystemScheduler, eval); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -181,7 +187,9 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewSystemScheduler, eval); err != nil {
|
||||
|
@ -207,7 +215,9 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
|
|||
Priority: job1.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job1.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
if err := h1.Process(NewSystemScheduler, eval1); err != nil {
|
||||
|
@ -241,8 +251,9 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
|
|||
Priority: svcJob.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: svcJob.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -260,8 +271,9 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewSystemScheduler, eval1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -307,7 +319,9 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
AnnotatePlan: true,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -405,8 +419,9 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -472,8 +487,9 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -542,7 +558,9 @@ func TestSystemSched_JobModify(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -633,8 +651,9 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
if err != nil {
|
||||
|
@ -728,7 +747,9 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -822,7 +843,9 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobDeregister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -894,7 +917,9 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
|
|||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerJobDeregister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -956,7 +981,9 @@ func TestSystemSched_NodeDown(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1021,7 +1048,9 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewServiceScheduler, eval)
|
||||
|
@ -1080,7 +1109,9 @@ func TestSystemSched_NodeDrain(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1143,7 +1174,9 @@ func TestSystemSched_NodeUpdate(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1180,7 +1213,9 @@ func TestSystemSched_RetryLimit(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1230,7 +1265,9 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1264,7 +1301,9 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
|
|||
Priority: job.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
// Process the evaluation
|
||||
if err := h.Process(NewSystemScheduler, eval); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1299,7 +1338,9 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
|
|||
Priority: job1.Priority,
|
||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||
JobID: job1.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
|
||||
// Process the evaluation
|
||||
if err := h1.Process(NewSystemScheduler, eval1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1389,7 +1430,9 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
@ -1460,7 +1503,9 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
|
|||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
|
|
|
@ -122,6 +122,7 @@ func (h *Harness) SubmitPlan(plan *structs.Plan) (*structs.PlanResult, State, er
|
|||
},
|
||||
Deployment: plan.Deployment,
|
||||
DeploymentUpdates: plan.DeploymentUpdates,
|
||||
EvalID: plan.EvalID,
|
||||
}
|
||||
|
||||
// Apply the full plan
|
||||
|
|
|
@ -9,7 +9,7 @@ trap 'kill ${PING_LOOP_PID}' EXIT HUP INT QUIT TERM
|
|||
|
||||
if [ "$RUN_STATIC_CHECKS" ]; then
|
||||
make check
|
||||
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
|
||||
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
|
||||
make checkscripts
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
set -o errexit
|
||||
|
||||
VERSION=1.0.0
|
||||
VERSION=1.0.2
|
||||
DOWNLOAD=https://releases.hashicorp.com/consul/${VERSION}/consul_${VERSION}_linux_amd64.zip
|
||||
|
||||
function install_consul() {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
set -o errexit
|
||||
|
||||
VERSION=1.27.0
|
||||
DOWNLOAD=https://github.com/coreos/rkt/releases/download/v${VERSION}/rkt-v${VERSION}.tar.gz
|
||||
DOWNLOAD=https://github.com/rkt/rkt/releases/download/v${VERSION}/rkt-v${VERSION}.tar.gz
|
||||
|
||||
function install_rkt() {
|
||||
if [[ -e /usr/local/bin/rkt ]] ; then
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
set -o errexit
|
||||
|
||||
VERSION=0.7.0
|
||||
VERSION=0.9.1
|
||||
DOWNLOAD=https://releases.hashicorp.com/vault/${VERSION}/vault_${VERSION}_linux_amd64.zip
|
||||
|
||||
function install_vault() {
|
||||
|
|
|
@ -43,7 +43,7 @@ a custom AMI:
|
|||
|
||||
```bash
|
||||
region = "us-east-1"
|
||||
ami = "ami-6ce26316"
|
||||
ami = "ami-d42d74ae"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "KEY_NAME"
|
||||
server_count = "3"
|
||||
|
|
2
terraform/aws/env/us-east/terraform.tfvars
vendored
2
terraform/aws/env/us-east/terraform.tfvars
vendored
|
@ -1,5 +1,5 @@
|
|||
region = "us-east-1"
|
||||
ami = "ami-6ce26316"
|
||||
ami = "ami-d42d74ae"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "KEY_NAME"
|
||||
server_count = "3"
|
||||
|
|
|
@ -6,17 +6,17 @@ cd /ops
|
|||
|
||||
CONFIGDIR=/ops/shared/config
|
||||
|
||||
CONSULVERSION=1.0.0
|
||||
CONSULVERSION=1.0.2
|
||||
CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
CONSULDIR=/opt/consul
|
||||
|
||||
VAULTVERSION=0.8.3
|
||||
VAULTVERSION=0.9.1
|
||||
VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
|
||||
VAULTCONFIGDIR=/etc/vault.d
|
||||
VAULTDIR=/opt/vault
|
||||
|
||||
NOMADVERSION=0.7.0
|
||||
NOMADVERSION=0.7.1
|
||||
NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
NOMADDIR=/opt/nomad
|
||||
|
|
3
ui/.prettierrc
Normal file
3
ui/.prettierrc
Normal file
|
@ -0,0 +1,3 @@
|
|||
printWidth: 100
|
||||
singleQuote: true
|
||||
trailingComma: es5
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
language: node_js
|
||||
node_js:
|
||||
- "6"
|
||||
|
||||
sudo: false
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.npm
|
||||
|
||||
before_install:
|
||||
- npm config set spin false
|
||||
- npm install -g phantomjs-prebuilt
|
||||
- phantomjs --version
|
||||
|
||||
install:
|
||||
- npm install
|
||||
|
||||
script:
|
||||
- npm test
|
|
@ -1,15 +1,14 @@
|
|||
import Ember from 'ember';
|
||||
import { inject as service } from '@ember/service';
|
||||
import { computed, get } from '@ember/object';
|
||||
import RESTAdapter from 'ember-data/adapters/rest';
|
||||
import codesForError from '../utils/codes-for-error';
|
||||
|
||||
const { get, computed, inject } = Ember;
|
||||
|
||||
export const namespace = 'v1';
|
||||
|
||||
export default RESTAdapter.extend({
|
||||
namespace,
|
||||
|
||||
token: inject.service(),
|
||||
token: service(),
|
||||
|
||||
headers: computed('token.secret', function() {
|
||||
const token = this.get('token.secret');
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
import Ember from 'ember';
|
||||
import { inject as service } from '@ember/service';
|
||||
import RSVP from 'rsvp';
|
||||
import { assign } from '@ember/polyfills';
|
||||
import ApplicationAdapter from './application';
|
||||
|
||||
const { RSVP, inject, assign } = Ember;
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
system: inject.service(),
|
||||
system: service(),
|
||||
|
||||
shouldReloadAll: () => true,
|
||||
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
import Ember from 'ember';
|
||||
import { inject as service } from '@ember/service';
|
||||
import { default as ApplicationAdapter, namespace } from './application';
|
||||
|
||||
const { inject } = Ember;
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
store: inject.service(),
|
||||
store: service(),
|
||||
|
||||
namespace: namespace + '/acl',
|
||||
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
import Ember from 'ember';
|
||||
import Application from '@ember/application';
|
||||
import Resolver from './resolver';
|
||||
import loadInitializers from 'ember-load-initializers';
|
||||
import config from './config/environment';
|
||||
|
||||
let App;
|
||||
|
||||
Ember.MODEL_FACTORY_INJECTIONS = true;
|
||||
|
||||
App = Ember.Application.extend({
|
||||
App = Application.extend({
|
||||
modulePrefix: config.modulePrefix,
|
||||
podModulePrefix: config.podModulePrefix,
|
||||
Resolver
|
||||
Resolver,
|
||||
});
|
||||
|
||||
loadInitializers(App, config.modulePrefix);
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
import Ember from 'ember';
|
||||
import { inject as service } from '@ember/service';
|
||||
import Component from '@ember/component';
|
||||
import { run } from '@ember/runloop';
|
||||
import { lazyClick } from '../helpers/lazy-click';
|
||||
|
||||
const { Component, inject, run } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
store: inject.service(),
|
||||
store: service(),
|
||||
|
||||
tagName: 'tr',
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
import Ember from 'ember';
|
||||
import { computed } from '@ember/object';
|
||||
import DistributionBar from './distribution-bar';
|
||||
|
||||
const { computed } = Ember;
|
||||
|
||||
export default DistributionBar.extend({
|
||||
layoutName: 'components/distribution-bar',
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
import Component from '@ember/component';
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
import Ember from 'ember';
|
||||
import Component from '@ember/component';
|
||||
import { lazyClick } from '../helpers/lazy-click';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tr',
|
||||
classNames: ['client-node-row', 'is-interactive'],
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
import Ember from 'ember';
|
||||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
import { run } from '@ember/runloop';
|
||||
import { assign } from '@ember/polyfills';
|
||||
import { guidFor } from '@ember/object/internals';
|
||||
import d3 from 'npm:d3-selection';
|
||||
import 'npm:d3-transition';
|
||||
import WindowResizable from '../mixins/window-resizable';
|
||||
import styleStringProperty from '../utils/properties/style-string';
|
||||
|
||||
const { Component, computed, run, assign, guidFor } = Ember;
|
||||
const sumAggregate = (total, val) => total + val;
|
||||
|
||||
export default Component.extend(WindowResizable, {
|
||||
|
@ -96,7 +99,7 @@ export default Component.extend(WindowResizable, {
|
|||
});
|
||||
|
||||
slices = slices.merge(slicesEnter);
|
||||
slices.attr('class', d => d.className || `slice-${filteredData.indexOf(d)}`);
|
||||
slices.attr('class', d => d.className || `slice-${_data.indexOf(d)}`);
|
||||
|
||||
const setWidth = d => `${width * d.percent - (d.index === sliceCount - 1 || d.index === 0 ? 1 : 2)}px`
|
||||
const setOffset = d => `${width * d.offset + (d.index === 0 ? 0 : 1)}px`
|
||||
|
|
27
ui/app/components/freestyle/sg-boxed-section.js
Normal file
27
ui/app/components/freestyle/sg-boxed-section.js
Normal file
|
@ -0,0 +1,27 @@
|
|||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
|
||||
export default Component.extend({
|
||||
variants: computed(() => [
|
||||
{
|
||||
key: 'Normal',
|
||||
title: 'Normal',
|
||||
slug: '',
|
||||
},
|
||||
{
|
||||
key: 'Info',
|
||||
title: 'Info',
|
||||
slug: 'is-info',
|
||||
},
|
||||
{
|
||||
key: 'Warning',
|
||||
title: 'Warning',
|
||||
slug: 'is-warning',
|
||||
},
|
||||
{
|
||||
key: 'Danger',
|
||||
title: 'Danger',
|
||||
slug: 'is-danger',
|
||||
},
|
||||
]),
|
||||
});
|
97
ui/app/components/freestyle/sg-colors.js
Normal file
97
ui/app/components/freestyle/sg-colors.js
Normal file
|
@ -0,0 +1,97 @@
|
|||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
|
||||
export default Component.extend({
|
||||
nomadTheme: computed(() => [
|
||||
{
|
||||
name: 'Primary',
|
||||
base: '#25ba81',
|
||||
},
|
||||
{
|
||||
name: 'Primary Dark',
|
||||
base: '#1d9467',
|
||||
},
|
||||
{
|
||||
name: 'Text',
|
||||
base: '#0a0a0a',
|
||||
},
|
||||
{
|
||||
name: 'Link',
|
||||
base: '#1563ff',
|
||||
},
|
||||
{
|
||||
name: 'Gray',
|
||||
base: '#bbc4d1',
|
||||
},
|
||||
{
|
||||
name: 'Off-white',
|
||||
base: '#f5f5f5',
|
||||
},
|
||||
]),
|
||||
|
||||
productColors: computed(() => [
|
||||
{
|
||||
name: 'Consul Pink',
|
||||
base: '#ff0087',
|
||||
},
|
||||
{
|
||||
name: 'Consul Pink Dark',
|
||||
base: '#c62a71',
|
||||
},
|
||||
{
|
||||
name: 'Packer Blue',
|
||||
base: '#1daeff',
|
||||
},
|
||||
{
|
||||
name: 'Packer Blue Dark',
|
||||
base: '#1d94dd',
|
||||
},
|
||||
{
|
||||
name: 'Terraform Purple',
|
||||
base: '#5c4ee5',
|
||||
},
|
||||
{
|
||||
name: 'Terraform Purple Dark',
|
||||
base: '#4040b2',
|
||||
},
|
||||
{
|
||||
name: 'Vagrant Blue',
|
||||
base: '#1563ff',
|
||||
},
|
||||
{
|
||||
name: 'Vagrant Blue Dark',
|
||||
base: '#104eb2',
|
||||
},
|
||||
{
|
||||
name: 'Nomad Green',
|
||||
base: '#25ba81',
|
||||
},
|
||||
{
|
||||
name: 'Nomad Green Dark',
|
||||
base: '#1d9467',
|
||||
},
|
||||
{
|
||||
name: 'Nomad Green Darker',
|
||||
base: '#16704d',
|
||||
},
|
||||
]),
|
||||
|
||||
emotiveColors: computed(() => [
|
||||
{
|
||||
name: 'Success',
|
||||
base: '#23d160',
|
||||
},
|
||||
{
|
||||
name: 'Warning',
|
||||
base: '#fa8e23',
|
||||
},
|
||||
{
|
||||
name: 'Danger',
|
||||
base: '#c84034',
|
||||
},
|
||||
{
|
||||
name: 'Info',
|
||||
base: '#1563ff',
|
||||
},
|
||||
]),
|
||||
});
|
13
ui/app/components/freestyle/sg-distribution-bar-jumbo.js
Normal file
13
ui/app/components/freestyle/sg-distribution-bar-jumbo.js
Normal file
|
@ -0,0 +1,13 @@
|
|||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
|
||||
export default Component.extend({
|
||||
distributionBarData: computed(() => {
|
||||
return [
|
||||
{ label: 'one', value: 10 },
|
||||
{ label: 'two', value: 20 },
|
||||
{ label: 'three', value: 0 },
|
||||
{ label: 'four', value: 35 },
|
||||
];
|
||||
}),
|
||||
});
|
43
ui/app/components/freestyle/sg-distribution-bar.js
Normal file
43
ui/app/components/freestyle/sg-distribution-bar.js
Normal file
|
@ -0,0 +1,43 @@
|
|||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
|
||||
export default Component.extend({
|
||||
timerTicks: 0,
|
||||
|
||||
startTimer: function() {
|
||||
this.set(
|
||||
'timer',
|
||||
setInterval(() => {
|
||||
this.incrementProperty('timerTicks');
|
||||
}, 500)
|
||||
);
|
||||
}.on('init'),
|
||||
|
||||
willDestroy() {
|
||||
clearInterval(this.get('timer'));
|
||||
},
|
||||
|
||||
distributionBarData: computed(() => {
|
||||
return [
|
||||
{ label: 'one', value: 10 },
|
||||
{ label: 'two', value: 20 },
|
||||
{ label: 'three', value: 30 },
|
||||
];
|
||||
}),
|
||||
|
||||
distributionBarDataWithClasses: computed(() => {
|
||||
return [
|
||||
{ label: 'Queued', value: 10, className: 'queued' },
|
||||
{ label: 'Complete', value: 20, className: 'complete' },
|
||||
{ label: 'Failed', value: 30, className: 'failed' },
|
||||
];
|
||||
}),
|
||||
|
||||
distributionBarDataRotating: computed('timerTicks', () => {
|
||||
return [
|
||||
{ label: 'one', value: Math.round(Math.random() * 50) },
|
||||
{ label: 'two', value: Math.round(Math.random() * 50) },
|
||||
{ label: 'three', value: Math.round(Math.random() * 50) },
|
||||
];
|
||||
}),
|
||||
});
|
|
@ -1,9 +1,9 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, inject, computed } = Ember;
|
||||
import { inject as service } from '@ember/service';
|
||||
import Component from '@ember/component';
|
||||
import { computed } from '@ember/object';
|
||||
|
||||
export default Component.extend({
|
||||
system: inject.service(),
|
||||
system: service(),
|
||||
|
||||
sortedNamespaces: computed('system.namespaces.@each.name', function() {
|
||||
const namespaces = this.get('system.namespaces').toArray() || [];
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
import Component from '@ember/component';
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['job-deployment', 'boxed-section'],
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
import Component from '@ember/component';
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue