sync
This commit is contained in:
parent
b85f15ead1
commit
e5ec915ac3
|
@ -60,3 +60,25 @@ rkt-*
|
|||
|
||||
./idea
|
||||
*.iml
|
||||
|
||||
# UI rules
|
||||
|
||||
# compiled output
|
||||
/ui/dist
|
||||
/ui/tmp
|
||||
|
||||
# dependencies
|
||||
/ui/node_modules
|
||||
/ui/bower_components
|
||||
|
||||
# misc
|
||||
/ui/.sass-cache
|
||||
/ui/connect.lock
|
||||
/ui/coverage/*
|
||||
/ui/libpeerconnection.log
|
||||
/ui/npm-debug.log*
|
||||
/ui/testem.log
|
||||
.ignore
|
||||
|
||||
# generated routes file
|
||||
command/agent/bindata_assetfs.go
|
||||
|
|
12
.travis.yml
12
.travis.yml
|
@ -9,10 +9,22 @@ language: go
|
|||
go:
|
||||
- 1.9.x
|
||||
|
||||
git:
|
||||
depth: 300
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- env:
|
||||
- env: RUN_UI_TESTS=1 SKIP_NOMAD_TESTS=1
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ui/node_modules
|
||||
|
||||
before_install:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y liblxc1 lxc-dev lxc shellcheck
|
||||
|
|
65
GNUmakefile
65
GNUmakefile
|
@ -47,7 +47,7 @@ ALL_TARGETS += freebsd_amd64
|
|||
endif
|
||||
|
||||
pkg/darwin_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for darwin/amd64
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -63,7 +63,7 @@ pkg/freebsd_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for freebsd/amd64
|
|||
-o "$@"
|
||||
|
||||
pkg/linux_386/nomad: $(SOURCE_FILES) ## Build Nomad for linux/386
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=linux GOARCH=386 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -71,7 +71,7 @@ pkg/linux_386/nomad: $(SOURCE_FILES) ## Build Nomad for linux/386
|
|||
-o "$@"
|
||||
|
||||
pkg/linux_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for linux/amd64
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -79,7 +79,7 @@ pkg/linux_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for linux/amd64
|
|||
-o "$@"
|
||||
|
||||
pkg/linux_arm/nomad: $(SOURCE_FILES) ## Build Nomad for linux/arm
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=linux GOARCH=arm CC=arm-linux-gnueabihf-gcc-5 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -87,7 +87,7 @@ pkg/linux_arm/nomad: $(SOURCE_FILES) ## Build Nomad for linux/arm
|
|||
-o "$@"
|
||||
|
||||
pkg/linux_arm64/nomad: $(SOURCE_FILES) ## Build Nomad for linux/arm64
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=linux GOARCH=arm64 CC=aarch64-linux-gnu-gcc-5 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -100,7 +100,7 @@ pkg/linux_arm64/nomad: $(SOURCE_FILES) ## Build Nomad for linux/arm64
|
|||
# CC=i686-w64-mingw32-gcc
|
||||
# CXX=i686-w64-mingw32-g++
|
||||
pkg/windows_386/nomad: $(SOURCE_FILES) ## Build Nomad for windows/386
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=windows GOARCH=386 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -108,7 +108,7 @@ pkg/windows_386/nomad: $(SOURCE_FILES) ## Build Nomad for windows/386
|
|||
-o "$@.exe"
|
||||
|
||||
pkg/windows_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for windows/amd64
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=windows GOARCH=amd64 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -116,7 +116,7 @@ pkg/windows_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for windows/amd64
|
|||
-o "$@.exe"
|
||||
|
||||
pkg/linux_amd64-lxc/nomad: $(SOURCE_FILES) ## Build Nomad+LXC for linux/amd64
|
||||
@echo "==> Building $@..."
|
||||
@echo "==> Building $@ with tags $(GO_TAGS)..."
|
||||
@CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \
|
||||
go build \
|
||||
-ldflags $(GO_LDFLAGS) \
|
||||
|
@ -149,6 +149,8 @@ deps: ## Install build and development dependencies
|
|||
go get -u github.com/axw/gocov/gocov
|
||||
go get -u gopkg.in/matm/v1/gocov-html
|
||||
go get -u github.com/ugorji/go/codec/codecgen
|
||||
go get -u github.com/jteeuwen/go-bindata/...
|
||||
go get -u github.com/elazarl/go-bindata-assetfs/...
|
||||
go get -u github.com/hashicorp/vault
|
||||
go get -u github.com/a8m/tree/cmd/tree
|
||||
|
||||
|
@ -158,7 +160,7 @@ check: ## Lint the source code
|
|||
@gometalinter \
|
||||
--deadline 10m \
|
||||
--vendor \
|
||||
--exclude '.*\.generated\.go:\d+:' \
|
||||
--exclude '(.*\.generated\.go:\d+:|bindata_assetfs)' \
|
||||
--disable-all \
|
||||
--sort severity \
|
||||
$(CHECKS) \
|
||||
|
@ -185,20 +187,30 @@ dev: check ## Build for the current development platform
|
|||
@rm -f $(GOPATH)/bin/nomad
|
||||
@$(MAKE) --no-print-directory \
|
||||
$(DEV_TARGET) \
|
||||
GO_TAGS=nomad_test
|
||||
GO_TAGS="nomad_test $(NOMAD_UI_TAG)"
|
||||
@mkdir -p $(PROJECT_ROOT)/bin
|
||||
@mkdir -p $(GOPATH)/bin
|
||||
@cp $(PROJECT_ROOT)/$(DEV_TARGET) $(PROJECT_ROOT)/bin/
|
||||
@cp $(PROJECT_ROOT)/$(DEV_TARGET) $(GOPATH)/bin
|
||||
|
||||
.PHONY: release
|
||||
release: clean check $(foreach t,$(ALL_TARGETS),pkg/$(t).zip) ## Build all release packages which can be built on this platform.
|
||||
release: GO_TAGS="ui"
|
||||
release: clean ember-dist static-assets check $(foreach t,$(ALL_TARGETS),pkg/$(t).zip) ## Build all release packages which can be built on this platform.
|
||||
@echo "==> Results:"
|
||||
@tree --dirsfirst $(PROJECT_ROOT)/pkg
|
||||
|
||||
.PHONY: test
|
||||
test: LOCAL_PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
||||
test: dev ## Run Nomad test suites
|
||||
test: ## Run the Nomad test suite and/or the Nomad UI test suite
|
||||
@if [ ! $(SKIP_NOMAD_TESTS) ]; then \
|
||||
make test-nomad; \
|
||||
fi
|
||||
@if [ $(RUN_UI_TESTS) ]; then \
|
||||
make test-ui; \
|
||||
fi
|
||||
|
||||
.PHONY: test-nomad
|
||||
test-nomad: LOCAL_PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
||||
test-nomad: dev ## Run Nomad test suites
|
||||
@echo "==> Running Nomad test suites:"
|
||||
@NOMAD_TEST_RKT=1 \
|
||||
go test \
|
||||
|
@ -229,6 +241,33 @@ testcluster: ## Bring up a Linux test cluster using Vagrant. Set PROVIDER if nec
|
|||
nomad-client03 \
|
||||
$(if $(PROVIDER),--provider $(PROVIDER))
|
||||
|
||||
.PHONY: static-assets
|
||||
static-assets: ## Compile the static routes to serve alongside the API
|
||||
@echo "--> Generating static assets"
|
||||
@go-bindata-assetfs -pkg agent -prefix ui -modtime 1480000000 -tags ui ./ui/dist/...
|
||||
@mv bindata_assetfs.go command/agent
|
||||
|
||||
.PHONY: test-ui
|
||||
test-ui: ## Run Noma UI test suite
|
||||
@echo "--> Installing JavaScript assets"
|
||||
@cd ui && yarn install
|
||||
@cd ui && npm install phantomjs-prebuilt
|
||||
@echo "--> Running ember tests"
|
||||
@cd ui && phantomjs --version
|
||||
@cd ui && npm test
|
||||
|
||||
.PHONY: ember-dist
|
||||
ember-dist: ## Build the static UI assets from source
|
||||
@echo "--> Installing JavaScript assets"
|
||||
@cd ui && yarn install
|
||||
@cd ui && npm rebuild node-sass
|
||||
@echo "--> Building Ember application"
|
||||
@cd ui && npm run build
|
||||
|
||||
.PHONY: dev-ui
|
||||
dev-ui: ember-dist static-assets
|
||||
@$(MAKE) NOMAD_UI_TAG="ui" dev ## Build a dev binary with the UI baked in
|
||||
|
||||
HELP_FORMAT=" \033[36m%-25s\033[0m %s\n"
|
||||
.PHONY: help
|
||||
help: ## Display this usage information
|
||||
|
|
11
README.md
11
README.md
|
@ -107,6 +107,17 @@ $ bin/nomad
|
|||
...
|
||||
```
|
||||
|
||||
If the Nomad UI is desired in the development version, run `make dev-ui`. This will build the UI from source and compile it into the dev binary.
|
||||
|
||||
```sh
|
||||
$ make dev-ui
|
||||
...
|
||||
$ bin/nomad
|
||||
...
|
||||
```
|
||||
|
||||
**Note:** Building the Nomad UI from source requires Node, Yarn, and Ember CLI. These tools are already in the Vagrant VM. Read the [UI README](https://github.com/hashicorp/nomad/blob/master/ui/README.md) for more info.
|
||||
|
||||
To cross-compile Nomad, run `make release`. This will compile Nomad for multiple
|
||||
platforms and place the resulting binaries into the `./pkg` directory:
|
||||
|
||||
|
|
|
@ -21,6 +21,13 @@ Vagrant.configure(2) do |config|
|
|||
vmCfg.vm.provision "shell",
|
||||
privileged: false,
|
||||
path: './scripts/vagrant-linux-unpriv-bootstrap.sh'
|
||||
|
||||
# Expose the nomad api and ui to the host
|
||||
vmCfg.vm.network "forwarded_port", guest: 4646, host: 4646, auto_correct: true
|
||||
|
||||
# Expose Ember ports to the host (one for the site, one for livereload)
|
||||
vmCfg.vm.network :forwarded_port, guest: 4201, host: 4201, auto_correct: true
|
||||
vmCfg.vm.network :forwarded_port, guest: 49153, host: 49153, auto_correct: true
|
||||
end
|
||||
|
||||
config.vm.define "freebsd", autostart: false, primary: false do |vmCfg|
|
||||
|
@ -113,6 +120,10 @@ def configureLinuxProvisioners(vmCfg)
|
|||
privileged: true,
|
||||
path: './scripts/vagrant-linux-priv-rkt.sh'
|
||||
|
||||
vmCfg.vm.provision "shell",
|
||||
privileged: false,
|
||||
path: './scripts/vagrant-linux-priv-ui.sh'
|
||||
|
||||
return vmCfg
|
||||
end
|
||||
|
||||
|
|
|
@ -21,12 +21,13 @@ const (
|
|||
// The Policy stanza is a short hand for granting several of these. When capabilities are
|
||||
// combined we take the union of all capabilities. If the deny capability is present, it
|
||||
// takes precedence and overwrites all other capabilities.
|
||||
NamespaceCapabilityDeny = "deny"
|
||||
NamespaceCapabilityListJobs = "list-jobs"
|
||||
NamespaceCapabilityReadJob = "read-job"
|
||||
NamespaceCapabilitySubmitJob = "submit-job"
|
||||
NamespaceCapabilityReadLogs = "read-logs"
|
||||
NamespaceCapabilityReadFS = "read-fs"
|
||||
NamespaceCapabilityDeny = "deny"
|
||||
NamespaceCapabilityListJobs = "list-jobs"
|
||||
NamespaceCapabilityReadJob = "read-job"
|
||||
NamespaceCapabilitySubmitJob = "submit-job"
|
||||
NamespaceCapabilityReadLogs = "read-logs"
|
||||
NamespaceCapabilityReadFS = "read-fs"
|
||||
NamespaceCapabilitySentinelOverride = "sentinel-override"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -77,6 +78,9 @@ func isNamespaceCapabilityValid(cap string) bool {
|
|||
case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,
|
||||
NamespaceCapabilitySubmitJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS:
|
||||
return true
|
||||
// Seperate the enterprise-only capabilities
|
||||
case NamespaceCapabilitySentinelOverride:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -151,6 +151,25 @@ func TestParse(t *testing.T) {
|
|||
"Invalid namespace name",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
namespace "default" {
|
||||
capabilities = ["sentinel-override"]
|
||||
}
|
||||
`,
|
||||
"",
|
||||
&Policy{
|
||||
Namespaces: []*NamespacePolicy{
|
||||
&NamespacePolicy{
|
||||
Name: "default",
|
||||
Policy: "",
|
||||
Capabilities: []string{
|
||||
NamespaceCapabilitySentinelOverride,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range tcases {
|
||||
|
|
73
api/jobs.go
73
api/jobs.go
|
@ -52,30 +52,43 @@ func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *Write
|
|||
return &resp, wm, err
|
||||
}
|
||||
|
||||
// RegisterOptions is used to pass through job registration parameters
|
||||
type RegisterOptions struct {
|
||||
EnforceIndex bool
|
||||
ModifyIndex uint64
|
||||
PolicyOverride bool
|
||||
}
|
||||
|
||||
// Register is used to register a new job. It returns the ID
|
||||
// of the evaluation, along with any errors encountered.
|
||||
func (j *Jobs) Register(job *Job, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
|
||||
var resp JobRegisterResponse
|
||||
|
||||
req := &RegisterJobRequest{Job: job}
|
||||
wm, err := j.client.write("/v1/jobs", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
return j.RegisterOpts(job, nil, q)
|
||||
}
|
||||
|
||||
// EnforceRegister is used to register a job enforcing its job modify index.
|
||||
func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
opts := RegisterOptions{EnforceIndex: true, ModifyIndex: modifyIndex}
|
||||
return j.RegisterOpts(job, &opts, q)
|
||||
}
|
||||
|
||||
// Register is used to register a new job. It returns the ID
|
||||
// of the evaluation, along with any errors encountered.
|
||||
func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||
// Format the request
|
||||
req := &RegisterJobRequest{
|
||||
Job: job,
|
||||
}
|
||||
if opts != nil {
|
||||
if opts.EnforceIndex {
|
||||
req.EnforceIndex = true
|
||||
req.JobModifyIndex = opts.ModifyIndex
|
||||
}
|
||||
if opts.PolicyOverride {
|
||||
req.PolicyOverride = true
|
||||
}
|
||||
}
|
||||
|
||||
var resp JobRegisterResponse
|
||||
|
||||
req := &RegisterJobRequest{
|
||||
Job: job,
|
||||
EnforceIndex: true,
|
||||
JobModifyIndex: modifyIndex,
|
||||
}
|
||||
wm, err := j.client.write("/v1/jobs", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -208,21 +221,36 @@ func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta,
|
|||
return resp.EvalID, wm, nil
|
||||
}
|
||||
|
||||
// PlanOptions is used to pass through job planning parameters
|
||||
type PlanOptions struct {
|
||||
Diff bool
|
||||
PolicyOverride bool
|
||||
}
|
||||
|
||||
func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {
|
||||
opts := PlanOptions{Diff: diff}
|
||||
return j.PlanOpts(job, &opts, q)
|
||||
}
|
||||
|
||||
func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {
|
||||
if job == nil {
|
||||
return nil, nil, fmt.Errorf("must pass non-nil job")
|
||||
}
|
||||
|
||||
var resp JobPlanResponse
|
||||
// Setup the request
|
||||
req := &JobPlanRequest{
|
||||
Job: job,
|
||||
Diff: diff,
|
||||
Job: job,
|
||||
}
|
||||
if opts != nil {
|
||||
req.Diff = opts.Diff
|
||||
req.PolicyOverride = opts.PolicyOverride
|
||||
}
|
||||
|
||||
var resp JobPlanResponse
|
||||
wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
|
@ -794,6 +822,7 @@ type JobRegisterRequest struct {
|
|||
// register only occurs if the job is new.
|
||||
EnforceIndex bool
|
||||
JobModifyIndex uint64
|
||||
PolicyOverride bool
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
@ -803,6 +832,7 @@ type RegisterJobRequest struct {
|
|||
Job *Job
|
||||
EnforceIndex bool `json:",omitempty"`
|
||||
JobModifyIndex uint64 `json:",omitempty"`
|
||||
PolicyOverride bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// JobRegisterResponse is used to respond to a job registration
|
||||
|
@ -827,8 +857,9 @@ type JobDeregisterResponse struct {
|
|||
}
|
||||
|
||||
type JobPlanRequest struct {
|
||||
Job *Job
|
||||
Diff bool
|
||||
Job *Job
|
||||
Diff bool
|
||||
PolicyOverride bool
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ func TestNamespace_Info(t *testing.T) {
|
|||
|
||||
// Trying to retrieve a namespace before it exists returns an error
|
||||
_, _, err := namespaces.Info("foo", nil)
|
||||
assert.Nil(err)
|
||||
assert.Contains("not found", err.Error())
|
||||
assert.NotNil(err)
|
||||
assert.Contains(err.Error(), "not found")
|
||||
|
||||
// Register the namespace
|
||||
ns := testNamespace()
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// SentinelPolicies is used to query the Sentinel Policy endpoints.
|
||||
type SentinelPolicies struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// SentinelPolicies returns a new handle on the Sentinel policies.
|
||||
func (c *Client) SentinelPolicies() *SentinelPolicies {
|
||||
return &SentinelPolicies{client: c}
|
||||
}
|
||||
|
||||
// List is used to dump all of the policies.
|
||||
func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *QueryMeta, error) {
|
||||
var resp []*SentinelPolicyListStub
|
||||
qm, err := a.client.query("/v1/sentinel/policies", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Upsert is used to create or update a policy
|
||||
func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policy == nil || policy.Name == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a policy
|
||||
func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query a specific policy
|
||||
func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) {
|
||||
if policyName == "" {
|
||||
return nil, nil, fmt.Errorf("missing policy name")
|
||||
}
|
||||
var resp SentinelPolicy
|
||||
wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &resp, wm, nil
|
||||
}
|
||||
|
||||
type SentinelPolicy struct {
|
||||
Name string
|
||||
Description string
|
||||
Scope string
|
||||
EnforcementLevel string
|
||||
Policy string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type SentinelPolicyListStub struct {
|
||||
Name string
|
||||
Description string
|
||||
Scope string
|
||||
EnforcementLevel string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
// +build pro ent
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSentinelPolicies_ListUpsert(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.SentinelPolicies()
|
||||
|
||||
// Listing when nothing exists returns empty
|
||||
result, qm, err := ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if qm.LastIndex != 1 {
|
||||
t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
}
|
||||
if n := len(result); n != 0 {
|
||||
t.Fatalf("expected 0 policies, got: %d", n)
|
||||
}
|
||||
|
||||
// Register a policy
|
||||
policy := &SentinelPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
EnforcementLevel: "advisory",
|
||||
Scope: "submit-job",
|
||||
Policy: "main = rule { true }",
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Check the list again
|
||||
result, qm, err = ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
assertQueryMeta(t, qm)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected policy, got: %#v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSentinelPolicies_Delete(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.SentinelPolicies()
|
||||
|
||||
// Register a policy
|
||||
policy := &SentinelPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
EnforcementLevel: "advisory",
|
||||
Scope: "submit-job",
|
||||
Policy: "main = rule { true } ",
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Delete the policy
|
||||
wm, err = ap.Delete(policy.Name, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Check the list again
|
||||
result, qm, err := ap.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
assertQueryMeta(t, qm)
|
||||
if len(result) != 0 {
|
||||
t.Fatalf("unexpected policy, got: %#v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSentinelPolicies_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s, _ := makeACLClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
ap := c.SentinelPolicies()
|
||||
|
||||
// Register a policy
|
||||
policy := &SentinelPolicy{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
EnforcementLevel: "advisory",
|
||||
Scope: "submit-job",
|
||||
Policy: "main = rule { true }",
|
||||
}
|
||||
wm, err := ap.Upsert(policy, nil)
|
||||
assert.Nil(t, err)
|
||||
assertWriteMeta(t, wm)
|
||||
|
||||
// Query the policy
|
||||
out, qm, err := ap.Info(policy.Name, nil)
|
||||
assert.Nil(t, err)
|
||||
assertQueryMeta(t, qm)
|
||||
assert.Equal(t, policy.Name, out.Name)
|
||||
}
|
31
appveyor.yml
31
appveyor.yml
|
@ -1,20 +1,27 @@
|
|||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2017
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\nomad
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOBIN: c:\gopath\bin
|
||||
|
||||
matrix:
|
||||
- RUN_UI_TESTS: 1
|
||||
SKIP_NOMAD_TESTS: 1
|
||||
- {}
|
||||
|
||||
install:
|
||||
- cmd: set PATH=%GOBIN%;c:\go\bin;%PATH%
|
||||
- cmd: echo %Path%
|
||||
- cmd: go version
|
||||
- cmd: go env
|
||||
- ps: mkdir C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\vault.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\vault.zip -DestinationPath C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/consul/0.7.0/consul_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\consul.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\consul.zip -DestinationPath C:\gopath\bin
|
||||
#- cmd: go install -tags nomad_test
|
||||
- cmd: set PATH=%GOBIN%;c:\go\bin;%PATH%
|
||||
- cmd: echo %Path%
|
||||
- cmd: go version
|
||||
- cmd: go env
|
||||
- ps: mkdir C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\vault.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\vault.zip -DestinationPath C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/consul/0.7.0/consul_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\consul.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\consul.zip -DestinationPath C:\gopath\bin
|
||||
#- cmd: go install -tags nomad_test
|
||||
build_script:
|
||||
#- cmd: go test -tags nomad_test ./...
|
||||
- cmd: go install -tags nomad_test
|
||||
#- cmd: go test -tags nomad_test ./...
|
||||
- cmd: go install -tags nomad_test
|
||||
|
|
|
@ -153,6 +153,9 @@ func convertServerConfig(agentConfig *Config, logOutput io.Writer) (*nomad.Confi
|
|||
if agentConfig.ACL.ReplicationToken != "" {
|
||||
conf.ReplicationToken = agentConfig.ACL.ReplicationToken
|
||||
}
|
||||
if agentConfig.Sentinel != nil {
|
||||
conf.SentinelConfig = agentConfig.Sentinel
|
||||
}
|
||||
|
||||
// Set up the bind addresses
|
||||
rpcAddr, err := net.ResolveTCPAddr("tcp", agentConfig.normalizedAddrs.RPC)
|
||||
|
|
|
@ -154,3 +154,13 @@ tls {
|
|||
key_file = "pipe"
|
||||
verify_https_client = true
|
||||
}
|
||||
sentinel {
|
||||
import "foo" {
|
||||
path = "foo"
|
||||
args = ["a", "b", "c"]
|
||||
}
|
||||
import "bar" {
|
||||
path = "bar"
|
||||
args = ["x", "y", "z"]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,6 +130,9 @@ type Config struct {
|
|||
// HTTPAPIResponseHeaders allows users to configure the Nomad http agent to
|
||||
// set arbritrary headers on API responses
|
||||
HTTPAPIResponseHeaders map[string]string `mapstructure:"http_api_response_headers"`
|
||||
|
||||
// Sentinel holds sentinel related settings
|
||||
Sentinel *config.SentinelConfig `mapstructure:"sentinel"`
|
||||
}
|
||||
|
||||
// AtlasConfig is used to enable an parameterize the Atlas integration
|
||||
|
@ -615,6 +618,7 @@ func DefaultConfig() *Config {
|
|||
collectionInterval: 1 * time.Second,
|
||||
},
|
||||
TLSConfig: &config.TLSConfig{},
|
||||
Sentinel: &config.SentinelConfig{},
|
||||
Version: version.GetVersion(),
|
||||
}
|
||||
}
|
||||
|
@ -775,6 +779,14 @@ func (c *Config) Merge(b *Config) *Config {
|
|||
result.Vault = result.Vault.Merge(b.Vault)
|
||||
}
|
||||
|
||||
// Apply the sentinel config
|
||||
if result.Sentinel == nil && b.Sentinel != nil {
|
||||
server := *b.Sentinel
|
||||
result.Sentinel = &server
|
||||
} else if b.Sentinel != nil {
|
||||
result.Sentinel = result.Sentinel.Merge(b.Sentinel)
|
||||
}
|
||||
|
||||
// Merge config files lists
|
||||
result.Files = append(result.Files, b.Files...)
|
||||
|
||||
|
|
|
@ -97,6 +97,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
"tls",
|
||||
"http_api_response_headers",
|
||||
"acl",
|
||||
"sentinel",
|
||||
}
|
||||
if err := checkHCLKeys(list, valid); err != nil {
|
||||
return multierror.Prefix(err, "config:")
|
||||
|
@ -120,6 +121,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
delete(m, "tls")
|
||||
delete(m, "http_api_response_headers")
|
||||
delete(m, "acl")
|
||||
delete(m, "sentinel")
|
||||
|
||||
// Decode the rest
|
||||
if err := mapstructure.WeakDecode(m, result); err != nil {
|
||||
|
@ -203,6 +205,13 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Parse Sentinel config
|
||||
if o := list.Filter("sentinel"); len(o.Items) > 0 {
|
||||
if err := parseSentinel(&result.Sentinel, o); err != nil {
|
||||
return multierror.Prefix(err, "sentinel->")
|
||||
}
|
||||
}
|
||||
|
||||
// Parse out http_api_response_headers fields. These are in HCL as a list so
|
||||
// we need to iterate over them and merge them.
|
||||
if headersO := list.Filter("http_api_response_headers"); len(headersO.Items) > 0 {
|
||||
|
@ -835,6 +844,40 @@ func parseVaultConfig(result **config.VaultConfig, list *ast.ObjectList) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseSentinel(result **config.SentinelConfig, list *ast.ObjectList) error {
|
||||
list = list.Elem()
|
||||
if len(list.Items) > 1 {
|
||||
return fmt.Errorf("only one 'sentinel' block allowed")
|
||||
}
|
||||
|
||||
// Get our sentinel object
|
||||
obj := list.Items[0]
|
||||
|
||||
// Value should be an object
|
||||
var listVal *ast.ObjectList
|
||||
if ot, ok := obj.Val.(*ast.ObjectType); ok {
|
||||
listVal = ot.List
|
||||
} else {
|
||||
return fmt.Errorf("sentinel value: should be an object")
|
||||
}
|
||||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
"import",
|
||||
}
|
||||
if err := checkHCLKeys(listVal, valid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var config config.SentinelConfig
|
||||
if err := hcl.DecodeObject(&config, listVal); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*result = &config
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHCLKeys(node ast.Node, valid []string) error {
|
||||
var list *ast.ObjectList
|
||||
switch n := node.(type) {
|
||||
|
|
|
@ -177,6 +177,20 @@ func TestConfig_Parse(t *testing.T) {
|
|||
HTTPAPIResponseHeaders: map[string]string{
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
},
|
||||
Sentinel: &config.SentinelConfig{
|
||||
Imports: []*config.SentinelImport{
|
||||
&config.SentinelImport{
|
||||
Name: "foo",
|
||||
Path: "foo",
|
||||
Args: []string{"a", "b", "c"},
|
||||
},
|
||||
&config.SentinelImport{
|
||||
Name: "bar",
|
||||
Path: "bar",
|
||||
Args: []string{"x", "y", "z"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
|
|
|
@ -34,6 +34,7 @@ func TestConfig_Merge(t *testing.T) {
|
|||
Atlas: &AtlasConfig{},
|
||||
Vault: &config.VaultConfig{},
|
||||
Consul: &config.ConsulConfig{},
|
||||
Sentinel: &config.SentinelConfig{},
|
||||
}
|
||||
|
||||
c2 := &Config{
|
||||
|
@ -312,6 +313,15 @@ func TestConfig_Merge(t *testing.T) {
|
|||
ClientAutoJoin: &trueValue,
|
||||
ChecksUseAdvertise: &trueValue,
|
||||
},
|
||||
Sentinel: &config.SentinelConfig{
|
||||
Imports: []*config.SentinelImport{
|
||||
&config.SentinelImport{
|
||||
Name: "foo",
|
||||
Path: "foo",
|
||||
Args: []string{"a", "b", "c"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := c0.Merge(c1)
|
||||
|
|
|
@ -9,10 +9,12 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/elazarl/go-bindata-assetfs"
|
||||
"github.com/hashicorp/nomad/helper/tlsutil"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/ugorji/go/codec"
|
||||
|
@ -27,6 +29,18 @@ const (
|
|||
// this is checked to switch between the ACLToken and
|
||||
// AtlasACLToken
|
||||
scadaHTTPAddr = "SCADA"
|
||||
|
||||
// ErrEntOnly is the error returned if accessing an enterprise only
|
||||
// endpoint
|
||||
ErrEntOnly = "Nomad Enterprise only endpoint"
|
||||
)
|
||||
|
||||
var (
|
||||
// Set to false by stub_asset if the ui build tag isn't enabled
|
||||
uiEnabled = true
|
||||
|
||||
// Overridden if the ui build tag isn't enabled
|
||||
stubHTML = ""
|
||||
)
|
||||
|
||||
// HTTPServer is used to wrap an Agent and expose it over an HTTP interface
|
||||
|
@ -186,6 +200,16 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
|
|||
s.mux.HandleFunc("/v1/system/gc", s.wrap(s.GarbageCollectRequest))
|
||||
s.mux.HandleFunc("/v1/system/reconcile/summaries", s.wrap(s.ReconcileJobSummaries))
|
||||
|
||||
if uiEnabled {
|
||||
s.mux.Handle("/ui/", http.StripPrefix("/ui/", handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))
|
||||
} else {
|
||||
// Write the stubHTML
|
||||
s.mux.HandleFunc("/ui/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(stubHTML))
|
||||
})
|
||||
}
|
||||
s.mux.Handle("/", handleRootRedirect())
|
||||
|
||||
if enableDebug {
|
||||
s.mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
s.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
||||
|
@ -204,6 +228,22 @@ type HTTPCodedError interface {
|
|||
Code() int
|
||||
}
|
||||
|
||||
type UIAssetWrapper struct {
|
||||
FileSystem *assetfs.AssetFS
|
||||
}
|
||||
|
||||
func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
|
||||
if file, err := fs.FileSystem.Open(name); err == nil {
|
||||
return file, nil
|
||||
} else {
|
||||
// serve index.html instead of 404ing
|
||||
if err == os.ErrNotExist {
|
||||
return fs.FileSystem.Open("index.html")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func CodedError(c int, s string) HTTPCodedError {
|
||||
return &codedError{s, c}
|
||||
}
|
||||
|
@ -221,6 +261,22 @@ func (e *codedError) Code() int {
|
|||
return e.code
|
||||
}
|
||||
|
||||
func handleUI(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
header := w.Header()
|
||||
header.Add("Content-Security-Policy", "default-src 'none'; connect-src 'self'; img-src 'self' data:; script-src 'self'; style-src 'self' 'unsafe-inline'; form-action 'none'; frame-ancestors 'none'")
|
||||
h.ServeHTTP(w, req)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func handleRootRedirect() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, "/ui/", 307)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// wrap is used to wrap functions to make them more convenient
|
||||
func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Request) (interface{}, error)) func(resp http.ResponseWriter, req *http.Request) {
|
||||
f := func(resp http.ResponseWriter, req *http.Request) {
|
||||
|
|
|
@ -2,5 +2,17 @@
|
|||
|
||||
package agent
|
||||
|
||||
import "net/http"
|
||||
|
||||
// registerEnterpriseHandlers is a no-op for the oss release
|
||||
func (s *HTTPServer) registerEnterpriseHandlers() {}
|
||||
func (s *HTTPServer) registerEnterpriseHandlers() {
|
||||
s.mux.HandleFunc("/v1/namespaces", s.wrap(s.entOnly))
|
||||
s.mux.HandleFunc("/v1/namespace", s.wrap(s.entOnly))
|
||||
s.mux.HandleFunc("/v1/namespace/", s.wrap(s.entOnly))
|
||||
s.mux.HandleFunc("/v1/sentinel/policies", s.wrap(s.entOnly))
|
||||
s.mux.HandleFunc("/v1/sentinel/policy/", s.wrap(s.entOnly))
|
||||
}
|
||||
|
||||
func (s *HTTPServer) entOnly(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
return nil, CodedError(501, ErrEntOnly)
|
||||
}
|
||||
|
|
|
@ -124,8 +124,9 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
|
|||
|
||||
sJob := ApiJobToStructJob(args.Job)
|
||||
planReq := structs.JobPlanRequest{
|
||||
Job: sJob,
|
||||
Diff: args.Diff,
|
||||
Job: sJob,
|
||||
Diff: args.Diff,
|
||||
PolicyOverride: args.PolicyOverride,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: args.WriteRequest.Region,
|
||||
},
|
||||
|
@ -355,6 +356,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|||
Job: sJob,
|
||||
EnforceIndex: args.EnforceIndex,
|
||||
JobModifyIndex: args.JobModifyIndex,
|
||||
PolicyOverride: args.PolicyOverride,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: args.WriteRequest.Region,
|
||||
SecretID: args.WriteRequest.SecretID,
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
// +build !ui
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
uiEnabled = false
|
||||
stubHTML = `<!DOCTYPE html>
|
||||
<html>
|
||||
<p>Nomad UI is not available in this binary. To get Nomad UI do one of the following:</p>
|
||||
<ul>
|
||||
<li><a href="https://www.nomadproject.io/downloads.html">Download an official release</a></li>
|
||||
<li>Run <pre>make release</pre> to create your own release binaries.
|
||||
<li>Run <pre>make dev-ui</pre> to create a development binary with the UI.
|
||||
</ul>
|
||||
</html>
|
||||
`
|
||||
}
|
||||
|
||||
// assetFS is a stub for building Nomad without a UI.
|
||||
func assetFS() *assetfs.AssetFS {
|
||||
return nil
|
||||
}
|
|
@ -14,9 +14,10 @@ type NamespaceApplyCommand struct {
|
|||
|
||||
func (c *NamespaceApplyCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: nomad namespace apply [options]
|
||||
Usage: nomad namespace apply [options] <namespace>
|
||||
|
||||
Apply is used to create or update a namespace.
|
||||
Apply is used to create or update a namespace. It takes the namespace name to
|
||||
create or update as its only argument.
|
||||
|
||||
General Options:
|
||||
|
||||
|
@ -24,9 +25,6 @@ General Options:
|
|||
|
||||
Apply Options:
|
||||
|
||||
-name
|
||||
The name of the namespace.
|
||||
|
||||
-description
|
||||
An optional description for the namespace.
|
||||
`
|
||||
|
@ -36,7 +34,6 @@ Apply Options:
|
|||
func (c *NamespaceApplyCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{
|
||||
"-name": complete.PredictAnything,
|
||||
"-description": complete.PredictAnything,
|
||||
})
|
||||
}
|
||||
|
@ -50,24 +47,25 @@ func (c *NamespaceApplyCommand) Synopsis() string {
|
|||
}
|
||||
|
||||
func (c *NamespaceApplyCommand) Run(args []string) int {
|
||||
var name, description string
|
||||
var description string
|
||||
|
||||
flags := c.Meta.FlagSet("namespace apply", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
flags.StringVar(&name, "name", "", "")
|
||||
flags.StringVar(&description, "description", "", "")
|
||||
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Check that we got no arguments
|
||||
// Check that we get exactly one argument
|
||||
args = flags.Args()
|
||||
if l := len(args); l != 0 {
|
||||
if l := len(args); l != 1 {
|
||||
c.Ui.Error(c.Help())
|
||||
return 1
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
|
||||
// Validate we have at-least a name
|
||||
if name == "" {
|
||||
c.Ui.Error("Namespace name required")
|
||||
|
@ -93,5 +91,6 @@ func (c *NamespaceApplyCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Successfully applied namespace %q!", name))
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func TestNamespaceApplyCommand_Good(t *testing.T) {
|
|||
|
||||
// Create a namespace
|
||||
name, desc := "foo", "bar"
|
||||
if code := cmd.Run([]string{"-address=" + url, "-name=" + name, "-description=" + desc}); code != 0 {
|
||||
if code := cmd.Run([]string{"-address=" + url, "-description=" + desc, name}); code != 0 {
|
||||
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
|
||||
|
|
|
@ -67,5 +67,6 @@ func (c *NamespaceDeleteCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Successfully deleted namespace %q!", namespace))
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
|
@ -102,6 +103,9 @@ func formatNamespaces(namespaces []*api.Namespace) string {
|
|||
return "No namespaces found"
|
||||
}
|
||||
|
||||
// Sort the output by namespace name
|
||||
sort.Slice(namespaces, func(i, j int) bool { return namespaces[i].Name < namespaces[j].Name })
|
||||
|
||||
rows := make([]string, len(namespaces)+1)
|
||||
rows[0] = "Name|Description"
|
||||
for i, ns := range namespaces {
|
||||
|
|
|
@ -68,6 +68,9 @@ Plan Options:
|
|||
Determines whether the diff between the remote job and planned job is shown.
|
||||
Defaults to true.
|
||||
|
||||
-policy-override
|
||||
Sets the flag to force override any soft mandatory Sentinel policies.
|
||||
|
||||
-verbose
|
||||
Increase diff verbosity.
|
||||
`
|
||||
|
@ -81,8 +84,9 @@ func (c *PlanCommand) Synopsis() string {
|
|||
func (c *PlanCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{
|
||||
"-diff": complete.PredictNothing,
|
||||
"-verbose": complete.PredictNothing,
|
||||
"-diff": complete.PredictNothing,
|
||||
"-policy-override": complete.PredictNothing,
|
||||
"-verbose": complete.PredictNothing,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -91,11 +95,12 @@ func (c *PlanCommand) AutocompleteArgs() complete.Predictor {
|
|||
}
|
||||
|
||||
func (c *PlanCommand) Run(args []string) int {
|
||||
var diff, verbose bool
|
||||
var diff, policyOverride, verbose bool
|
||||
|
||||
flags := c.Meta.FlagSet("plan", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
flags.BoolVar(&diff, "diff", true, "")
|
||||
flags.BoolVar(&policyOverride, "policy-override", false, "")
|
||||
flags.BoolVar(&verbose, "verbose", false, "")
|
||||
|
||||
if err := flags.Parse(args); err != nil {
|
||||
|
@ -134,8 +139,17 @@ func (c *PlanCommand) Run(args []string) int {
|
|||
client.SetNamespace(*n)
|
||||
}
|
||||
|
||||
// Setup the options
|
||||
opts := &api.PlanOptions{}
|
||||
if diff {
|
||||
opts.Diff = true
|
||||
}
|
||||
if policyOverride {
|
||||
opts.PolicyOverride = true
|
||||
}
|
||||
|
||||
// Submit the job
|
||||
resp, _, err := client.Jobs().Plan(job, diff, nil)
|
||||
resp, _, err := client.Jobs().PlanOpts(job, opts, nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error during plan: %s", err))
|
||||
return 255
|
||||
|
|
|
@ -77,8 +77,12 @@ Run Options:
|
|||
the evaluation ID will be printed to the screen, which can be used to
|
||||
examine the evaluation using the eval-status command.
|
||||
|
||||
-verbose
|
||||
Display full information.
|
||||
-output
|
||||
Output the JSON that would be submitted to the HTTP API without submitting
|
||||
the job.
|
||||
|
||||
-policy-override
|
||||
Sets the flag to force override any soft mandatory Sentinel policies.
|
||||
|
||||
-vault-token
|
||||
If set, the passed Vault token is stored in the job before sending to the
|
||||
|
@ -86,9 +90,8 @@ Run Options:
|
|||
the job file. This overrides the token found in $VAULT_TOKEN environment
|
||||
variable and that found in the job.
|
||||
|
||||
-output
|
||||
Output the JSON that would be submitted to the HTTP API without submitting
|
||||
the job.
|
||||
-verbose
|
||||
Display full information.
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
@ -100,11 +103,12 @@ func (c *RunCommand) Synopsis() string {
|
|||
func (c *RunCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{
|
||||
"-check-index": complete.PredictNothing,
|
||||
"-detach": complete.PredictNothing,
|
||||
"-verbose": complete.PredictNothing,
|
||||
"-vault-token": complete.PredictAnything,
|
||||
"-output": complete.PredictNothing,
|
||||
"-check-index": complete.PredictNothing,
|
||||
"-detach": complete.PredictNothing,
|
||||
"-verbose": complete.PredictNothing,
|
||||
"-vault-token": complete.PredictAnything,
|
||||
"-output": complete.PredictNothing,
|
||||
"-policy-override": complete.PredictNothing,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -113,7 +117,7 @@ func (c *RunCommand) AutocompleteArgs() complete.Predictor {
|
|||
}
|
||||
|
||||
func (c *RunCommand) Run(args []string) int {
|
||||
var detach, verbose, output bool
|
||||
var detach, verbose, output, override bool
|
||||
var checkIndexStr, vaultToken string
|
||||
|
||||
flags := c.Meta.FlagSet("run", FlagSetClient)
|
||||
|
@ -121,6 +125,7 @@ func (c *RunCommand) Run(args []string) int {
|
|||
flags.BoolVar(&detach, "detach", false, "")
|
||||
flags.BoolVar(&verbose, "verbose", false, "")
|
||||
flags.BoolVar(&output, "output", false, "")
|
||||
flags.BoolVar(&override, "policy-override", false, "")
|
||||
flags.StringVar(&checkIndexStr, "check-index", "", "")
|
||||
flags.StringVar(&vaultToken, "vault-token", "", "")
|
||||
|
||||
|
@ -205,13 +210,18 @@ func (c *RunCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
// Submit the job
|
||||
var resp *api.JobRegisterResponse
|
||||
// Set the register options
|
||||
opts := &api.RegisterOptions{}
|
||||
if enforce {
|
||||
resp, _, err = client.Jobs().EnforceRegister(job, checkIndex, nil)
|
||||
} else {
|
||||
resp, _, err = client.Jobs().Register(job, nil)
|
||||
opts.EnforceIndex = true
|
||||
opts.ModifyIndex = checkIndex
|
||||
}
|
||||
if override {
|
||||
opts.PolicyOverride = true
|
||||
}
|
||||
|
||||
// Submit the job
|
||||
resp, _, err := client.Jobs().RegisterOpts(job, opts, nil)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), api.RegisterEnforceIndexErrPrefix) {
|
||||
// Format the error specially if the error is due to index
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
package command
|
||||
|
||||
import "github.com/mitchellh/cli"
|
||||
|
||||
type SentinelCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (f *SentinelCommand) Help() string {
|
||||
return "This command is accessed by using one of the subcommands below."
|
||||
}
|
||||
|
||||
func (f *SentinelCommand) Synopsis() string {
|
||||
return "Interact with Sentinel policies"
|
||||
}
|
||||
|
||||
func (f *SentinelCommand) Run(args []string) int {
|
||||
return cli.RunResultHelp
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
type SentinelApplyCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (c *SentinelApplyCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: nomad sentinel apply [options] <name> <file>
|
||||
|
||||
Apply is used to write a new Sentinel policy or update an existing one.
|
||||
The name of the policy and file must be specified. The file will be read
|
||||
from stdin by specifying "-".
|
||||
|
||||
General Options:
|
||||
|
||||
` + generalOptionsUsage() + `
|
||||
|
||||
Apply Options:
|
||||
|
||||
-description
|
||||
Sets a human readable description for the policy.
|
||||
|
||||
-scope (default: submit-job)
|
||||
Sets the scope of the policy and when it should be enforced.
|
||||
|
||||
-level (default: advisory)
|
||||
Sets the enforcment level of the policy. Must be one of advisory,
|
||||
soft-mandatory, hard-mandatory.
|
||||
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (c *SentinelApplyCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{
|
||||
"-description": complete.PredictAnything,
|
||||
"-scope": complete.PredictAnything,
|
||||
"-level": complete.PredictAnything,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *SentinelApplyCommand) AutocompleteArgs() complete.Predictor {
|
||||
return complete.PredictNothing
|
||||
}
|
||||
|
||||
func (c *SentinelApplyCommand) Synopsis() string {
|
||||
return "Create a new or update existing Sentinel policies"
|
||||
}
|
||||
|
||||
func (c *SentinelApplyCommand) Run(args []string) int {
|
||||
var description, scope, enfLevel string
|
||||
var err error
|
||||
flags := c.Meta.FlagSet("sentinel apply", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
flags.StringVar(&description, "description", "", "")
|
||||
flags.StringVar(&scope, "scope", "submit-job", "")
|
||||
flags.StringVar(&enfLevel, "level", "advisory", "")
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Check that we got exactly two arguments
|
||||
args = flags.Args()
|
||||
if l := len(args); l != 2 {
|
||||
c.Ui.Error(c.Help())
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the name and file
|
||||
policyName := args[0]
|
||||
|
||||
// Read the file contents
|
||||
file := args[1]
|
||||
var rawPolicy []byte
|
||||
if file == "-" {
|
||||
rawPolicy, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed to read stdin: %v", err))
|
||||
return 1
|
||||
}
|
||||
} else {
|
||||
rawPolicy, err = ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed to read file: %v", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Construct the policy
|
||||
sp := &api.SentinelPolicy{
|
||||
Name: policyName,
|
||||
Description: description,
|
||||
Scope: scope,
|
||||
EnforcementLevel: enfLevel,
|
||||
Policy: string(rawPolicy),
|
||||
}
|
||||
|
||||
// Get the HTTP client
|
||||
client, err := c.Meta.Client()
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the list of policies
|
||||
_, err = client.SentinelPolicies().Upsert(sp, nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error writing Sentinel policy: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Successfully wrote %q Sentinel policy!",
|
||||
policyName))
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func TestSentinelApplyCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &SentinelApplyCommand{}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
type SentinelDeleteCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (c *SentinelDeleteCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: nomad sentinel delete [options] <name>
|
||||
|
||||
Delete is used to delete an existing Sentinel policy.
|
||||
|
||||
General Options:
|
||||
|
||||
` + generalOptionsUsage() + `
|
||||
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (c *SentinelDeleteCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{})
|
||||
}
|
||||
|
||||
func (c *SentinelDeleteCommand) AutocompleteArgs() complete.Predictor {
|
||||
return complete.PredictNothing
|
||||
}
|
||||
|
||||
func (c *SentinelDeleteCommand) Synopsis() string {
|
||||
return "Delete an existing Sentinel policies"
|
||||
}
|
||||
|
||||
func (c *SentinelDeleteCommand) Run(args []string) int {
|
||||
flags := c.Meta.FlagSet("sentinel delete", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Check that we got exactly one arguments
|
||||
args = flags.Args()
|
||||
if l := len(args); l != 1 {
|
||||
c.Ui.Error(c.Help())
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the name and file
|
||||
policyName := args[0]
|
||||
|
||||
// Get the HTTP client
|
||||
client, err := c.Meta.Client()
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the list of policies
|
||||
_, err = client.SentinelPolicies().Delete(policyName, nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error deleting Sentinel policy: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Successfully deleted %q Sentinel policy!",
|
||||
policyName))
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func TestSentinelDeleteCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &SentinelDeleteCommand{}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
type SentinelListCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (c *SentinelListCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: nomad sentinel list [options]
|
||||
|
||||
List is used to display all the installed Sentinel policies.
|
||||
|
||||
General Options:
|
||||
|
||||
` + generalOptionsUsage() + `
|
||||
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (c *SentinelListCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{})
|
||||
}
|
||||
|
||||
func (c *SentinelListCommand) AutocompleteArgs() complete.Predictor {
|
||||
return complete.PredictNothing
|
||||
}
|
||||
|
||||
func (c *SentinelListCommand) Synopsis() string {
|
||||
return "Display all Sentinel policies"
|
||||
}
|
||||
|
||||
func (c *SentinelListCommand) Run(args []string) int {
|
||||
flags := c.Meta.FlagSet("sentinel list", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the HTTP client
|
||||
client, err := c.Meta.Client()
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the list of policies
|
||||
policies, _, err := client.SentinelPolicies().List(nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error listing Sentinel policies: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
if len(policies) == 0 {
|
||||
c.Ui.Output("No policies found")
|
||||
return 0
|
||||
}
|
||||
|
||||
out := []string{}
|
||||
out = append(out, "Name|Scope|Enforcement Level|Description")
|
||||
for _, p := range policies {
|
||||
line := fmt.Sprintf("%s|%s|%s|%s", p.Name, p.Scope, p.EnforcementLevel, p.Description)
|
||||
out = append(out, line)
|
||||
}
|
||||
c.Ui.Output(formatList(out))
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func TestSentinelListCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &SentinelListCommand{}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
type SentinelReadCommand struct {
|
||||
Meta
|
||||
}
|
||||
|
||||
func (c *SentinelReadCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: nomad sentinel read [options] <name>
|
||||
|
||||
Read is used to inspect a Sentinel policy.
|
||||
|
||||
General Options:
|
||||
|
||||
` + generalOptionsUsage() + `
|
||||
|
||||
Read Options:
|
||||
|
||||
-raw
|
||||
Prints only the raw policy
|
||||
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (c *SentinelReadCommand) AutocompleteFlags() complete.Flags {
|
||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||
complete.Flags{
|
||||
"-raw": complete.PredictNothing,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *SentinelReadCommand) AutocompleteArgs() complete.Predictor {
|
||||
return complete.PredictNothing
|
||||
}
|
||||
|
||||
func (c *SentinelReadCommand) Synopsis() string {
|
||||
return "Inspects an existing Sentinel policies"
|
||||
}
|
||||
|
||||
func (c *SentinelReadCommand) Run(args []string) int {
|
||||
var raw bool
|
||||
flags := c.Meta.FlagSet("sentinel read", FlagSetClient)
|
||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
flags.BoolVar(&raw, "raw", false, "")
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Check that we got exactly one arguments
|
||||
args = flags.Args()
|
||||
if l := len(args); l != 1 {
|
||||
c.Ui.Error(c.Help())
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the name and file
|
||||
policyName := args[0]
|
||||
|
||||
// Get the HTTP client
|
||||
client, err := c.Meta.Client()
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Query the policy
|
||||
policy, _, err := client.SentinelPolicies().Info(policyName, nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error querying Sentinel policy: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Check for only the raw policy
|
||||
if raw {
|
||||
c.Ui.Output(policy.Policy)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Output the base information
|
||||
info := []string{
|
||||
fmt.Sprintf("Name|%s", policy.Name),
|
||||
fmt.Sprintf("Scope|%s", policy.Scope),
|
||||
fmt.Sprintf("Enforcement Level|%s", policy.EnforcementLevel),
|
||||
fmt.Sprintf("Description|%s", policy.Description),
|
||||
}
|
||||
c.Ui.Output(formatKV(info))
|
||||
c.Ui.Output("Policy:")
|
||||
c.Ui.Output(policy.Policy)
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func TestSentinelReadCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &SentinelReadCommand{}
|
||||
}
|
25
commands.go
25
commands.go
|
@ -279,6 +279,31 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
|
|||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"sentinel": func() (cli.Command, error) {
|
||||
return &command.SentinelCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"sentinel list": func() (cli.Command, error) {
|
||||
return &command.SentinelListCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"sentinel apply": func() (cli.Command, error) {
|
||||
return &command.SentinelApplyCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"sentinel delete": func() (cli.Command, error) {
|
||||
return &command.SentinelDeleteCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"sentinel read": func() (cli.Command, error) {
|
||||
return &command.SentinelReadCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
"server-force-leave": func() (cli.Command, error) {
|
||||
return &command.ServerForceLeaveCommand{
|
||||
Meta: meta,
|
||||
|
|
|
@ -239,6 +239,12 @@ type Config struct {
|
|||
// ReplicationToken is the ACL Token Secret ID used to fetch from
|
||||
// the Authoritative Region.
|
||||
ReplicationToken string
|
||||
|
||||
// SentinelGCInterval is the interval that we GC unused policies.
|
||||
SentinelGCInterval time.Duration
|
||||
|
||||
// SentinelConfig is this Agent's Sentinel configuration
|
||||
SentinelConfig *config.SentinelConfig
|
||||
}
|
||||
|
||||
// CheckVersion is used to check if the ProtocolVersion is valid
|
||||
|
@ -296,6 +302,7 @@ func DefaultConfig() *Config {
|
|||
RPCHoldTimeout: 5 * time.Second,
|
||||
TLSConfig: &config.TLSConfig{},
|
||||
ReplicationBackoff: 30 * time.Second,
|
||||
SentinelGCInterval: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Enable all known schedulers by default
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -330,6 +331,46 @@ func TestFSM_RegisterJob(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFSM_RegisterJob_BadNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
job := mock.Job()
|
||||
job.Namespace = "foo"
|
||||
req := structs.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Namespace: job.Namespace,
|
||||
},
|
||||
}
|
||||
buf, err := structs.Encode(structs.JobRegisterRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
if resp == nil {
|
||||
t.Fatalf("no resp: %v", resp)
|
||||
}
|
||||
err, ok := resp.(error)
|
||||
if !ok {
|
||||
t.Fatalf("resp not of error type: %T %v", resp, resp)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "non-existant namespace") {
|
||||
t.Fatalf("bad error: %v", err)
|
||||
}
|
||||
|
||||
// Verify we are not registered
|
||||
ws := memdb.NewWatchSet()
|
||||
jobOut, err := fsm.State().JobByID(ws, req.Namespace, req.Job.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if jobOut != nil {
|
||||
t.Fatalf("job found!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSM_DeregisterJob_Purge(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
|
|
@ -75,12 +75,22 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
|
|||
// Check job submission permissions
|
||||
if aclObj, err := j.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if aclObj != nil && !aclObj.AllowNamespaceOperation(structs.DefaultNamespace, acl.NamespaceCapabilitySubmitJob) {
|
||||
return structs.ErrPermissionDenied
|
||||
} else if aclObj != nil {
|
||||
if !aclObj.AllowNsOp(structs.DefaultNamespace, acl.NamespaceCapabilitySubmitJob) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
// Check if override is set and we do not have permissions
|
||||
if args.PolicyOverride {
|
||||
if !aclObj.AllowNsOp(structs.DefaultNamespace, acl.NamespaceCapabilitySentinelOverride) {
|
||||
j.srv.logger.Printf("[WARN] nomad.job: policy override attempted without permissions for Job %q", args.Job.ID)
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
j.srv.logger.Printf("[WARN] nomad.job: policy override set for Job %q", args.Job.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup the job
|
||||
snap, err := j.srv.fsm.State().Snapshot()
|
||||
snap, err := j.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -149,6 +159,16 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
|
|||
}
|
||||
}
|
||||
|
||||
// Enforce Sentinel policies
|
||||
policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if policyWarnings != nil {
|
||||
reply.Warnings = structs.MergeMultierrorWarnings(warnings,
|
||||
canonicalizeWarnings, policyWarnings)
|
||||
}
|
||||
|
||||
// Clear the Vault token
|
||||
args.Job.VaultToken = ""
|
||||
|
||||
|
@ -158,7 +178,11 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
|
|||
args.Job.SetSubmitTime()
|
||||
|
||||
// Commit this update via Raft
|
||||
_, index, err := j.srv.raftApply(structs.JobRegisterRequestType, args)
|
||||
fsmErr, index, err := j.srv.raftApply(structs.JobRegisterRequestType, args)
|
||||
if err, ok := fsmErr.(error); ok && err != nil {
|
||||
j.srv.logger.Printf("[ERR] nomad.job: Register failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
j.srv.logger.Printf("[ERR] nomad.job: Register failed: %v", err)
|
||||
return err
|
||||
|
@ -917,6 +941,31 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse)
|
|||
// Set the warning message
|
||||
reply.Warnings = structs.MergeMultierrorWarnings(warnings, canonicalizeWarnings)
|
||||
|
||||
// Check job submission permissions, which we assume is the same for plan
|
||||
if aclObj, err := j.srv.resolveToken(args.SecretID); err != nil {
|
||||
return err
|
||||
} else if aclObj != nil {
|
||||
if !aclObj.AllowNsOp(structs.DefaultNamespace, acl.NamespaceCapabilitySubmitJob) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
// Check if override is set and we do not have permissions
|
||||
if args.PolicyOverride {
|
||||
if !aclObj.AllowNsOp(structs.DefaultNamespace, acl.NamespaceCapabilitySentinelOverride) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce Sentinel policies
|
||||
policyWarnings, err := j.enforceSubmitJob(args.PolicyOverride, args.Job)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if policyWarnings != nil {
|
||||
reply.Warnings = structs.MergeMultierrorWarnings(warnings,
|
||||
canonicalizeWarnings, policyWarnings)
|
||||
}
|
||||
|
||||
// Acquire a snapshot of the state
|
||||
snap, err := j.srv.fsm.State().Snapshot()
|
||||
if err != nil {
|
||||
|
@ -1166,7 +1215,11 @@ func (j *Job) Dispatch(args *structs.JobDispatchRequest, reply *structs.JobDispa
|
|||
}
|
||||
|
||||
// Commit this update via Raft
|
||||
_, jobCreateIndex, err := j.srv.raftApply(structs.JobRegisterRequestType, regReq)
|
||||
fsmErr, jobCreateIndex, err := j.srv.raftApply(structs.JobRegisterRequestType, regReq)
|
||||
if err, ok := fsmErr.(error); ok && err != nil {
|
||||
j.srv.logger.Printf("[ERR] nomad.job: Dispatched job register failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
j.srv.logger.Printf("[ERR] nomad.job: Dispatched job register failed: %v", err)
|
||||
return err
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
// +build !ent
|
||||
|
||||
package nomad
|
||||
|
||||
import "github.com/hashicorp/nomad/nomad/structs"
|
||||
|
||||
// enforceSubmitJob is used to check any Sentinel policies for the submit-job scope
|
||||
func (j *Job) enforceSubmitJob(override bool, job *structs.Job) (error, error) {
|
||||
return nil, nil
|
||||
}
|
|
@ -140,6 +140,42 @@ func TestJobEndpoint_Register_ACL(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
// Create the register request
|
||||
job := mock.Job()
|
||||
job.Namespace = "foo"
|
||||
req := &structs.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: structs.WriteRequest{Region: "global"},
|
||||
}
|
||||
|
||||
// Try without a token, expect failure
|
||||
var resp structs.JobRegisterResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
|
||||
if err == nil || !strings.Contains(err.Error(), "non-existant namespace") {
|
||||
t.Fatalf("expected namespace error: %v", err)
|
||||
}
|
||||
|
||||
// Check for the job in the FSM
|
||||
state := s1.fsm.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.JobByID(ws, job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Fatalf("expected no job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
|
@ -2728,6 +2764,39 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Plan_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, root := testACLServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
||||
// Create a plan request
|
||||
job := mock.Job()
|
||||
planReq := &structs.JobPlanRequest{
|
||||
Job: job,
|
||||
Diff: true,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
Namespace: job.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
// Try without a token, expect failure
|
||||
var planResp structs.JobPlanResponse
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
|
||||
// Try with a token
|
||||
planReq.SecretID = root.SecretID
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testServer(t, func(c *Config) {
|
||||
|
|
|
@ -52,7 +52,10 @@ func (s *Server) DispatchJob(job *structs.Job) (*structs.Evaluation, error) {
|
|||
Namespace: job.Namespace,
|
||||
},
|
||||
}
|
||||
_, index, err := s.raftApply(structs.JobRegisterRequestType, req)
|
||||
fsmErr, index, err := s.raftApply(structs.JobRegisterRequestType, req)
|
||||
if err, ok := fsmErr.(error); ok && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -208,7 +208,6 @@ func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) {
|
|||
job := mock.PeriodicJob()
|
||||
job2 := mock.PeriodicJob()
|
||||
job2.Namespace = "test"
|
||||
|
||||
added, err := p.Add(job)
|
||||
assert.Nil(err)
|
||||
assert.True(added)
|
||||
|
@ -515,6 +514,10 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) {
|
|||
t.Fatalf("got %d tracked; want 2", l)
|
||||
}
|
||||
|
||||
if l := len(p.Tracked()); l != 2 {
|
||||
t.Fatalf("got %d tracked; want 2", l)
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Check that the jobs were launched correctly.
|
||||
|
|
|
@ -166,6 +166,9 @@ type Server struct {
|
|||
// aclCache is used to maintain the parsed ACL objects
|
||||
aclCache *lru.TwoQueueCache
|
||||
|
||||
// EnterpriseState is used to fill in state for Pro/Ent builds
|
||||
EnterpriseState
|
||||
|
||||
left bool
|
||||
shutdown bool
|
||||
shutdownCh chan struct{}
|
||||
|
@ -309,6 +312,11 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
|||
return nil, fmt.Errorf("failed to create deployment watcher: %v", err)
|
||||
}
|
||||
|
||||
// Setup the enterprise state
|
||||
if err := s.setupEnterprise(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Monitor leadership changes
|
||||
go s.monitorLeadership()
|
||||
|
||||
|
@ -333,6 +341,9 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
|||
// Emit metrics
|
||||
go s.heartbeatStats()
|
||||
|
||||
// Start enterprise background workers
|
||||
s.startEnterpriseBackground()
|
||||
|
||||
// Done
|
||||
return s, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
// +build !pro,!ent
|
||||
|
||||
package nomad
|
||||
|
||||
type EnterpriseState struct{}
|
||||
|
||||
func (s *Server) setupEnterprise(config *Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) startEnterpriseBackground() {}
|
|
@ -659,6 +659,14 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b
|
|||
if job.Namespace == "" {
|
||||
panic("empty namespace")
|
||||
}
|
||||
|
||||
// Assert the namespace exists
|
||||
if exists, err := s.namespaceExists(txn, job.Namespace); err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return fmt.Errorf("job %q is in non-existant namespace %q", job.ID, job.Namespace)
|
||||
}
|
||||
|
||||
// Check if the job already exists
|
||||
existing, err := txn.First("jobs", "id", job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
// +build !pro,!ent
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// namespaceExists returns whether a namespace exists
|
||||
func (s *StateStore) namespaceExists(txn *memdb.Txn, namespace string) (bool, error) {
|
||||
return namespace == structs.DefaultNamespace, nil
|
||||
}
|
|
@ -57,10 +57,10 @@ func TestStateStore_Blocking_Timeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStateStore_Blocking_MinQuery(t *testing.T) {
|
||||
job := mock.Job()
|
||||
node := mock.Node()
|
||||
count := 0
|
||||
queryFn := func(ws memdb.WatchSet, s *StateStore) (interface{}, uint64, error) {
|
||||
_, err := s.JobByID(ws, job.Namespace, job.ID)
|
||||
_, err := s.NodeByID(ws, node.ID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func TestStateStore_Blocking_MinQuery(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
time.AfterFunc(5*time.Millisecond, func() {
|
||||
state.UpsertJob(11, job)
|
||||
state.UpsertNode(11, node)
|
||||
})
|
||||
|
||||
resp, idx, err := state.BlockingQuery(queryFn, 10, deadlineCtx)
|
||||
|
@ -461,75 +461,6 @@ func TestStateStore_Deployments(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_Deployments_Namespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
ns1 := "namespaced"
|
||||
deploy1 := mock.Deployment()
|
||||
deploy2 := mock.Deployment()
|
||||
deploy1.Namespace = ns1
|
||||
deploy2.Namespace = ns1
|
||||
|
||||
ns2 := "new-namespace"
|
||||
deploy3 := mock.Deployment()
|
||||
deploy4 := mock.Deployment()
|
||||
deploy3.Namespace = ns2
|
||||
deploy4.Namespace = ns2
|
||||
|
||||
// Create watchsets so we can test that update fires the watch
|
||||
watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()}
|
||||
_, err := state.DeploymentsByNamespace(watches[0], ns1)
|
||||
assert.Nil(err)
|
||||
_, err = state.DeploymentsByNamespace(watches[1], ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.Nil(state.UpsertDeployment(1001, deploy1))
|
||||
assert.Nil(state.UpsertDeployment(1002, deploy2))
|
||||
assert.Nil(state.UpsertDeployment(1003, deploy3))
|
||||
assert.Nil(state.UpsertDeployment(1004, deploy4))
|
||||
assert.True(watchFired(watches[0]))
|
||||
assert.True(watchFired(watches[1]))
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.DeploymentsByNamespace(ws, ns1)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.DeploymentsByNamespace(ws, ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
var out1 []*structs.Deployment
|
||||
for {
|
||||
raw := iter1.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out1 = append(out1, raw.(*structs.Deployment))
|
||||
}
|
||||
|
||||
var out2 []*structs.Deployment
|
||||
for {
|
||||
raw := iter2.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out2 = append(out2, raw.(*structs.Deployment))
|
||||
}
|
||||
|
||||
assert.Len(out1, 2)
|
||||
assert.Len(out2, 2)
|
||||
|
||||
for _, deploy := range out1 {
|
||||
assert.Equal(ns1, deploy.Namespace)
|
||||
}
|
||||
for _, deploy := range out2 {
|
||||
assert.Equal(ns2, deploy.Namespace)
|
||||
}
|
||||
|
||||
index, err := state.Index("deployment")
|
||||
assert.Nil(err)
|
||||
assert.EqualValues(1004, index)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_DeploymentsByIDPrefix(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
deploy := mock.Deployment()
|
||||
|
@ -616,54 +547,6 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
deploy1 := mock.Deployment()
|
||||
deploy1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
deploy2 := mock.Deployment()
|
||||
deploy2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
sharedPrefix := "aabb"
|
||||
|
||||
ns1, ns2 := "namespace1", "namespace2"
|
||||
deploy1.Namespace = ns1
|
||||
deploy2.Namespace = ns2
|
||||
|
||||
assert.Nil(state.UpsertDeployment(1000, deploy1))
|
||||
assert.Nil(state.UpsertDeployment(1001, deploy2))
|
||||
|
||||
gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment {
|
||||
var deploys []*structs.Deployment
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
deploy := raw.(*structs.Deployment)
|
||||
deploys = append(deploys, deploy)
|
||||
}
|
||||
return deploys
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.DeploymentsByIDPrefix(ws, ns1, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.DeploymentsByIDPrefix(ws, ns2, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
|
||||
deploysNs1 := gatherDeploys(iter1)
|
||||
deploysNs2 := gatherDeploys(iter2)
|
||||
assert.Len(deploysNs1, 1)
|
||||
assert.Len(deploysNs2, 1)
|
||||
|
||||
iter1, err = state.DeploymentsByIDPrefix(ws, ns1, deploy1.ID[:8])
|
||||
assert.Nil(err)
|
||||
|
||||
deploysNs1 = gatherDeploys(iter1)
|
||||
assert.Len(deploysNs1, 1)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertNode_Node(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
|
@ -1277,6 +1160,21 @@ func TestStateStore_UpsertJob_NoEphemeralDisk(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertJob_BadNamespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
job := mock.Job()
|
||||
job.Namespace = "foo"
|
||||
|
||||
err := state.UpsertJob(1000, job)
|
||||
assert.Contains(err.Error(), "non-existant namespace")
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.JobByID(ws, job.Namespace, job.ID)
|
||||
assert.Nil(err)
|
||||
assert.Nil(out)
|
||||
}
|
||||
|
||||
// Upsert a job that is the child of a parent job and ensures its summary gets
|
||||
// updated.
|
||||
func TestStateStore_UpsertJob_ChildJob(t *testing.T) {
|
||||
|
@ -1635,75 +1533,6 @@ func TestStateStore_Jobs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_JobsByNamespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
ns1 := "new"
|
||||
job1 := mock.Job()
|
||||
job2 := mock.Job()
|
||||
job1.Namespace = ns1
|
||||
job2.Namespace = ns1
|
||||
|
||||
ns2 := "new-namespace"
|
||||
job3 := mock.Job()
|
||||
job4 := mock.Job()
|
||||
job3.Namespace = ns2
|
||||
job4.Namespace = ns2
|
||||
|
||||
// Create watchsets so we can test that update fires the watch
|
||||
watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()}
|
||||
_, err := state.JobsByNamespace(watches[0], ns1)
|
||||
assert.Nil(err)
|
||||
_, err = state.JobsByNamespace(watches[1], ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.Nil(state.UpsertJob(1001, job1))
|
||||
assert.Nil(state.UpsertJob(1002, job2))
|
||||
assert.Nil(state.UpsertJob(1003, job3))
|
||||
assert.Nil(state.UpsertJob(1004, job4))
|
||||
assert.True(watchFired(watches[0]))
|
||||
assert.True(watchFired(watches[1]))
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.JobsByNamespace(ws, ns1)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.JobsByNamespace(ws, ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
var out1 []*structs.Job
|
||||
for {
|
||||
raw := iter1.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out1 = append(out1, raw.(*structs.Job))
|
||||
}
|
||||
|
||||
var out2 []*structs.Job
|
||||
for {
|
||||
raw := iter2.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out2 = append(out2, raw.(*structs.Job))
|
||||
}
|
||||
|
||||
assert.Len(out1, 2)
|
||||
assert.Len(out2, 2)
|
||||
|
||||
for _, job := range out1 {
|
||||
assert.Equal(ns1, job.Namespace)
|
||||
}
|
||||
for _, job := range out2 {
|
||||
assert.Equal(ns2, job.Namespace)
|
||||
}
|
||||
|
||||
index, err := state.Index("jobs")
|
||||
assert.Nil(err)
|
||||
assert.EqualValues(1004, index)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_JobVersions(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
var jobs []*structs.Job
|
||||
|
@ -1826,83 +1655,6 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
job1 := mock.Job()
|
||||
job2 := mock.Job()
|
||||
|
||||
jobID := "redis"
|
||||
ns1, ns2 := "namespace1", "namespace2"
|
||||
job1.ID = jobID
|
||||
job2.ID = jobID
|
||||
job1.Namespace = ns1
|
||||
job2.Namespace = ns2
|
||||
|
||||
assert.Nil(state.UpsertJob(1000, job1))
|
||||
assert.Nil(state.UpsertJob(1001, job2))
|
||||
|
||||
gatherJobs := func(iter memdb.ResultIterator) []*structs.Job {
|
||||
var jobs []*structs.Job
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
jobs = append(jobs, raw.(*structs.Job))
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
// Try full match
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.JobsByIDPrefix(ws, ns1, jobID)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.JobsByIDPrefix(ws, ns2, jobID)
|
||||
assert.Nil(err)
|
||||
|
||||
jobsNs1 := gatherJobs(iter1)
|
||||
assert.Len(jobsNs1, 1)
|
||||
|
||||
jobsNs2 := gatherJobs(iter2)
|
||||
assert.Len(jobsNs2, 1)
|
||||
|
||||
// Try prefix
|
||||
iter1, err = state.JobsByIDPrefix(ws, ns1, "re")
|
||||
assert.Nil(err)
|
||||
iter2, err = state.JobsByIDPrefix(ws, ns2, "re")
|
||||
assert.Nil(err)
|
||||
|
||||
jobsNs1 = gatherJobs(iter1)
|
||||
jobsNs2 = gatherJobs(iter2)
|
||||
assert.Len(jobsNs1, 1)
|
||||
assert.Len(jobsNs2, 1)
|
||||
|
||||
job3 := mock.Job()
|
||||
job3.ID = "riak"
|
||||
job3.Namespace = ns1
|
||||
assert.Nil(state.UpsertJob(1003, job3))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
ws = memdb.NewWatchSet()
|
||||
iter1, err = state.JobsByIDPrefix(ws, ns1, "r")
|
||||
assert.Nil(err)
|
||||
iter2, err = state.JobsByIDPrefix(ws, ns2, "r")
|
||||
assert.Nil(err)
|
||||
|
||||
jobsNs1 = gatherJobs(iter1)
|
||||
jobsNs2 = gatherJobs(iter2)
|
||||
assert.Len(jobsNs1, 2)
|
||||
assert.Len(jobsNs2, 1)
|
||||
|
||||
iter1, err = state.JobsByIDPrefix(ws, ns1, "ri")
|
||||
assert.Nil(err)
|
||||
|
||||
jobsNs1 = gatherJobs(iter1)
|
||||
assert.Len(jobsNs1, 1)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_JobsByPeriodic(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
var periodic, nonPeriodic []*structs.Job
|
||||
|
@ -2662,72 +2414,6 @@ func TestStateStore_UpsertEvals_Eval(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertEvals_Namespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
ns1 := "namespaced"
|
||||
eval1 := mock.Eval()
|
||||
eval2 := mock.Eval()
|
||||
eval1.Namespace = ns1
|
||||
eval2.Namespace = ns1
|
||||
|
||||
ns2 := "new-namespace"
|
||||
eval3 := mock.Eval()
|
||||
eval4 := mock.Eval()
|
||||
eval3.Namespace = ns2
|
||||
eval4.Namespace = ns2
|
||||
|
||||
// Create watchsets so we can test that update fires the watch
|
||||
watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()}
|
||||
_, err := state.EvalsByNamespace(watches[0], ns1)
|
||||
assert.Nil(err)
|
||||
_, err = state.EvalsByNamespace(watches[1], ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.Nil(state.UpsertEvals(1001, []*structs.Evaluation{eval1, eval2, eval3, eval4}))
|
||||
assert.True(watchFired(watches[0]))
|
||||
assert.True(watchFired(watches[1]))
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.EvalsByNamespace(ws, ns1)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.EvalsByNamespace(ws, ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
var out1 []*structs.Evaluation
|
||||
for {
|
||||
raw := iter1.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out1 = append(out1, raw.(*structs.Evaluation))
|
||||
}
|
||||
|
||||
var out2 []*structs.Evaluation
|
||||
for {
|
||||
raw := iter2.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out2 = append(out2, raw.(*structs.Evaluation))
|
||||
}
|
||||
|
||||
assert.Len(out1, 2)
|
||||
assert.Len(out2, 2)
|
||||
|
||||
for _, eval := range out1 {
|
||||
assert.Equal(ns1, eval.Namespace)
|
||||
}
|
||||
for _, eval := range out2 {
|
||||
assert.Equal(ns2, eval.Namespace)
|
||||
}
|
||||
|
||||
index, err := state.Index("evals")
|
||||
assert.Nil(err)
|
||||
assert.EqualValues(1001, index)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
|
||||
|
@ -3307,52 +2993,6 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
eval1 := mock.Eval()
|
||||
eval1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
eval2 := mock.Eval()
|
||||
eval2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
sharedPrefix := "aabb"
|
||||
|
||||
ns1, ns2 := "namespace1", "namespace2"
|
||||
eval1.Namespace = ns1
|
||||
eval2.Namespace = ns2
|
||||
|
||||
assert.Nil(state.UpsertEvals(1000, []*structs.Evaluation{eval1, eval2}))
|
||||
|
||||
gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation {
|
||||
var evals []*structs.Evaluation
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
evals = append(evals, raw.(*structs.Evaluation))
|
||||
}
|
||||
return evals
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.EvalsByIDPrefix(ws, ns1, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.EvalsByIDPrefix(ws, ns2, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
|
||||
evalsNs1 := gatherEvals(iter1)
|
||||
evalsNs2 := gatherEvals(iter2)
|
||||
assert.Len(evalsNs1, 1)
|
||||
assert.Len(evalsNs2, 1)
|
||||
|
||||
iter1, err = state.EvalsByIDPrefix(ws, ns1, eval1.ID[:8])
|
||||
assert.Nil(err)
|
||||
|
||||
evalsNs1 = gatherEvals(iter1)
|
||||
assert.Len(evalsNs1, 1)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_RestoreEval(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
eval := mock.Eval()
|
||||
|
@ -3742,79 +3382,6 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
ns1 := "namespaced"
|
||||
alloc1 := mock.Alloc()
|
||||
alloc2 := mock.Alloc()
|
||||
alloc1.Namespace = ns1
|
||||
alloc1.Job.Namespace = ns1
|
||||
alloc2.Namespace = ns1
|
||||
alloc2.Job.Namespace = ns1
|
||||
|
||||
ns2 := "new-namespace"
|
||||
alloc3 := mock.Alloc()
|
||||
alloc4 := mock.Alloc()
|
||||
alloc3.Namespace = ns2
|
||||
alloc3.Job.Namespace = ns2
|
||||
alloc4.Namespace = ns2
|
||||
alloc4.Job.Namespace = ns2
|
||||
|
||||
assert.Nil(state.UpsertJob(999, alloc1.Job))
|
||||
assert.Nil(state.UpsertJob(1000, alloc3.Job))
|
||||
|
||||
// Create watchsets so we can test that update fires the watch
|
||||
watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()}
|
||||
_, err := state.AllocsByNamespace(watches[0], ns1)
|
||||
assert.Nil(err)
|
||||
_, err = state.AllocsByNamespace(watches[1], ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.Nil(state.UpsertAllocs(1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4}))
|
||||
assert.True(watchFired(watches[0]))
|
||||
assert.True(watchFired(watches[1]))
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.AllocsByNamespace(ws, ns1)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.AllocsByNamespace(ws, ns2)
|
||||
assert.Nil(err)
|
||||
|
||||
var out1 []*structs.Allocation
|
||||
for {
|
||||
raw := iter1.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out1 = append(out1, raw.(*structs.Allocation))
|
||||
}
|
||||
|
||||
var out2 []*structs.Allocation
|
||||
for {
|
||||
raw := iter2.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
out2 = append(out2, raw.(*structs.Allocation))
|
||||
}
|
||||
|
||||
assert.Len(out1, 2)
|
||||
assert.Len(out2, 2)
|
||||
|
||||
for _, alloc := range out1 {
|
||||
assert.Equal(ns1, alloc.Namespace)
|
||||
}
|
||||
for _, alloc := range out2 {
|
||||
assert.Equal(ns2, alloc.Namespace)
|
||||
}
|
||||
|
||||
index, err := state.Index("allocs")
|
||||
assert.Nil(err)
|
||||
assert.EqualValues(1001, index)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertAlloc_Deployment(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
deployment := mock.Deployment()
|
||||
|
@ -4707,53 +4274,6 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
alloc1 := mock.Alloc()
|
||||
alloc1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
alloc2 := mock.Alloc()
|
||||
alloc2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2"
|
||||
sharedPrefix := "aabb"
|
||||
|
||||
ns1, ns2 := "namespace1", "namespace2"
|
||||
alloc1.Namespace = ns1
|
||||
alloc2.Namespace = ns2
|
||||
|
||||
assert.Nil(state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
|
||||
var allocs []*structs.Allocation
|
||||
for {
|
||||
raw := iter.Next()
|
||||
if raw == nil {
|
||||
break
|
||||
}
|
||||
alloc := raw.(*structs.Allocation)
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
return allocs
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
iter1, err := state.AllocsByIDPrefix(ws, ns1, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
iter2, err := state.AllocsByIDPrefix(ws, ns2, sharedPrefix)
|
||||
assert.Nil(err)
|
||||
|
||||
allocsNs1 := gatherAllocs(iter1)
|
||||
allocsNs2 := gatherAllocs(iter2)
|
||||
assert.Len(allocsNs1, 1)
|
||||
assert.Len(allocsNs2, 1)
|
||||
|
||||
iter1, err = state.AllocsByIDPrefix(ws, ns1, alloc1.ID[:8])
|
||||
assert.Nil(err)
|
||||
|
||||
allocsNs1 = gatherAllocs(iter1)
|
||||
assert.Len(allocsNs1, 1)
|
||||
assert.False(watchFired(ws))
|
||||
}
|
||||
|
||||
func TestStateStore_Allocs(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
var allocs []*structs.Allocation
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
package config
|
||||
|
||||
// SentinelConfig is configuration specific to Sentinel
|
||||
type SentinelConfig struct {
|
||||
// Imports are the configured imports
|
||||
Imports []*SentinelImport `hcl:"import,expand"`
|
||||
}
|
||||
|
||||
// SentinelImport is used per configured import
|
||||
type SentinelImport struct {
|
||||
Name string `hcl:",key"`
|
||||
Path string `hcl:"path"`
|
||||
Args []string `hcl:"args"`
|
||||
}
|
||||
|
||||
// Merge is used to merge two Sentinel configs together. The settings from the input always take precedence.
|
||||
func (a *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig {
|
||||
result := *a
|
||||
if len(b.Imports) > 0 {
|
||||
result.Imports = append(result.Imports, b.Imports...)
|
||||
}
|
||||
return &result
|
||||
}
|
|
@ -344,6 +344,9 @@ type JobRegisterRequest struct {
|
|||
EnforceIndex bool
|
||||
JobModifyIndex uint64
|
||||
|
||||
// PolicyOverride is set when the user is attempting to override any policies
|
||||
PolicyOverride bool
|
||||
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
|
@ -383,6 +386,8 @@ type JobListRequest struct {
|
|||
type JobPlanRequest struct {
|
||||
Job *Job
|
||||
Diff bool // Toggles an annotated diff
|
||||
// PolicyOverride is set when the user is attempting to override any policies
|
||||
PolicyOverride bool
|
||||
WriteRequest
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Install NVM for simple node.js version management
|
||||
wget -qO- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash
|
||||
|
||||
# This enables NVM without a logout/login
|
||||
export NVM_DIR="/home/vagrant/.nvm"
|
||||
# shellcheck source=/dev/null
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
|
||||
|
||||
# Install Node, Ember CLI, and Phantom for UI development
|
||||
nvm install 6.11.0
|
||||
nvm alias default 6.11.0
|
||||
npm install -g ember-cli phantomjs-prebuilt
|
||||
|
||||
# Install Yarn for front-end dependency management
|
||||
curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version 0.24.6
|
||||
export PATH="$HOME/.yarn/bin:\$PATH"
|
|
@ -0,0 +1,20 @@
|
|||
# EditorConfig helps developers define and maintain consistent
|
||||
# coding styles between different editors and IDEs
|
||||
# editorconfig.org
|
||||
|
||||
root = true
|
||||
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.hbs]
|
||||
insert_final_newline = false
|
||||
|
||||
[*.{diff,md}]
|
||||
trim_trailing_whitespace = false
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
/**
|
||||
Ember CLI sends analytics information by default. The data is completely
|
||||
anonymous, but there are times when you might want to disable this behavior.
|
||||
|
||||
Setting `disableAnalytics` to true will prevent any data from being sent.
|
||||
*/
|
||||
"disableAnalytics": false,
|
||||
"proxy": "http://127.0.0.1:4646"
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
mirage/
|
|
@ -0,0 +1,20 @@
|
|||
module.exports = {
|
||||
globals: {
|
||||
server: true,
|
||||
},
|
||||
env: {
|
||||
browser: true,
|
||||
es6: true,
|
||||
},
|
||||
extends: 'eslint:recommended',
|
||||
parserOptions: {
|
||||
ecmaVersion: 2017,
|
||||
sourceType: 'module',
|
||||
},
|
||||
rules: {
|
||||
indent: ['error', 2, { SwitchCase: 1 }],
|
||||
'linebreak-style': ['error', 'unix'],
|
||||
quotes: ['error', 'single', 'avoid-escape'],
|
||||
semi: ['error', 'always'],
|
||||
},
|
||||
};
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
language: node_js
|
||||
node_js:
|
||||
- "6"
|
||||
|
||||
sudo: false
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.npm
|
||||
|
||||
before_install:
|
||||
- npm config set spin false
|
||||
- npm install -g phantomjs-prebuilt
|
||||
- phantomjs --version
|
||||
|
||||
install:
|
||||
- npm install
|
||||
|
||||
script:
|
||||
- npm test
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"ignore_dirs": ["tmp", "dist"]
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
# Nomad UI
|
||||
|
||||
The official Nomad UI.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This is an [ember.js](https://emberjs.com/) project, and you will need the following tools installed on your computer.
|
||||
|
||||
* [Node.js](https://nodejs.org/)
|
||||
* [Yarn](https://yarnpkg.com)
|
||||
* [Ember CLI](https://ember-cli.com/)
|
||||
* [PhantomJS](http://phantomjs.org/) (for running tests)
|
||||
|
||||
## Installation
|
||||
|
||||
The Nomad UI gets cloned along with the rest of Nomad. To install dependencies, do the following from the root of the Nomad project:
|
||||
|
||||
```
|
||||
$ cd ui
|
||||
$ yarn
|
||||
```
|
||||
|
||||
## Running / Development
|
||||
|
||||
First, make sure nomad is running. The UI, in development mode, runs independently from Nomad, so this could be an official release or a dev branch. Likewise, Nomad can be running in server mode or dev mode. As long as the API is accessible, the UI will work as expected.
|
||||
|
||||
* `ember serve`
|
||||
* Visit your app at [http://localhost:4200](http://localhost:4200).
|
||||
|
||||
## Running / Development with Vagrant
|
||||
|
||||
All necessary tools for UI development are installed as part of the Vagrantfile. This is primarily to make it easy to build the UI from source while working on Nomad. Due to the filesystem requirements of [Broccoli](http://broccolijs.com/) (which powers Ember CLI), it is strongly discouraged to use Vagrant for developing changes to the UI.
|
||||
|
||||
That said, development with Vagrant is still possible, but the `ember serve` command requires two modifications:
|
||||
|
||||
* `--watch polling`: This allows the vm to notice file changes made in the host environment.
|
||||
* `--port 4201`: The default port 4200 is not forwarded, since local development is recommended.
|
||||
|
||||
This makes the full command for running the UI in development mode in Vagrant:
|
||||
|
||||
```
|
||||
$ ember serve --watch polling --port 4201
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
Nomad UI tests can be run independently of Nomad golang tests.
|
||||
|
||||
* `ember test` (single run, headless browser)
|
||||
* `ember test --server` (watches for changes, runs in a full browser)
|
||||
|
||||
### Building
|
||||
|
||||
Typically `make release` or `make dev-ui` will be the desired build workflow, but in the event that build artifacts need to be inspected, `ember build` will output compiled files in `ui/dist`.
|
||||
|
||||
* `ember build` (development)
|
||||
* `ember build --environment production` (production)
|
||||
|
||||
### Releasing
|
||||
|
||||
Nomad UI releases are in lockstep with Nomad releases and are integrated into the `make release` toolchain.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### The UI is running, but none of the API requests are working
|
||||
|
||||
By default (according to the `.embercli` file) a proxy address of `http://localhost:4646` is used. If you are running Nomad at a different address, you will need to override this setting when running ember serve: `ember serve --proxy http://newlocation:1111`.
|
||||
|
||||
#### Nomad is running in Vagrant, but I can't access the API from my host machine
|
||||
|
||||
Nomad binds to `127.0.0.1:4646` by default, which is the loopback address. Try running nomad bound to `0.0.0.0`: `bin/nomad -bind 0.0.0.0`.
|
||||
|
||||
Ports also need to be forwarded in the Vagrantfile. 4646 is already forwarded, but if a port other than the default is being used, that port needs to be added to the Vagrantfile and `vagrant reload` needs to be run.
|
|
@ -0,0 +1,9 @@
|
|||
import ApplicationAdapter from './application';
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
pathForType: () => 'agent/members',
|
||||
urlForFindRecord() {
|
||||
const [, ...args] = arguments;
|
||||
return this.urlForFindAll(...args);
|
||||
},
|
||||
});
|
|
@ -0,0 +1,58 @@
|
|||
import Ember from 'ember';
|
||||
import RESTAdapter from 'ember-data/adapters/rest';
|
||||
|
||||
const { get, computed, inject } = Ember;
|
||||
|
||||
export const namespace = 'v1';
|
||||
|
||||
export default RESTAdapter.extend({
|
||||
namespace,
|
||||
|
||||
token: inject.service(),
|
||||
|
||||
headers: computed('token.secret', function() {
|
||||
const token = this.get('token.secret');
|
||||
return (
|
||||
token && {
|
||||
'X-Nomad-Token': token,
|
||||
}
|
||||
);
|
||||
}),
|
||||
|
||||
// Single record requests deviate from REST practice by using
|
||||
// the singular form of the resource name.
|
||||
//
|
||||
// REST: /some-resources/:id
|
||||
// Nomad: /some-resource/:id
|
||||
//
|
||||
// This is the original implementation of _buildURL
|
||||
// without the pluralization of modelName
|
||||
urlForFindRecord(id, modelName) {
|
||||
let path;
|
||||
let url = [];
|
||||
let host = get(this, 'host');
|
||||
let prefix = this.urlPrefix();
|
||||
|
||||
if (modelName) {
|
||||
path = modelName.camelize();
|
||||
if (path) {
|
||||
url.push(path);
|
||||
}
|
||||
}
|
||||
|
||||
if (id) {
|
||||
url.push(encodeURIComponent(id));
|
||||
}
|
||||
|
||||
if (prefix) {
|
||||
url.unshift(prefix);
|
||||
}
|
||||
|
||||
url = url.join('/');
|
||||
if (!host && url && url.charAt(0) !== '/') {
|
||||
url = '/' + url;
|
||||
}
|
||||
|
||||
return url;
|
||||
},
|
||||
});
|
|
@ -0,0 +1,32 @@
|
|||
import Ember from 'ember';
|
||||
import ApplicationAdapter from './application';
|
||||
|
||||
const { RSVP } = Ember;
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
findRecord(store, { modelName }, id, snapshot) {
|
||||
// To make a findRecord response reflect the findMany response, the JobSummary
|
||||
// from /summary needs to be stitched into the response.
|
||||
return RSVP.hash({
|
||||
job: this._super(...arguments),
|
||||
summary: this.ajax(`${this.buildURL(modelName, id, snapshot, 'findRecord')}/summary`),
|
||||
}).then(({ job, summary }) => {
|
||||
job.JobSummary = summary;
|
||||
return job;
|
||||
});
|
||||
},
|
||||
|
||||
findAllocations(job) {
|
||||
const url = `${this.buildURL('job', job.get('id'), job, 'findRecord')}/allocations`;
|
||||
return this.ajax(url, 'GET').then(allocs => {
|
||||
return this.store.pushPayload('allocation', {
|
||||
allocations: allocs,
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
fetchRawDefinition(job) {
|
||||
const url = this.buildURL('job', job.get('id'), job, 'findRecord');
|
||||
return this.ajax(url, 'GET');
|
||||
},
|
||||
});
|
|
@ -0,0 +1,12 @@
|
|||
import ApplicationAdapter from './application';
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
findAllocations(node) {
|
||||
const url = `${this.buildURL('node', node.get('id'), node, 'findRecord')}/allocations`;
|
||||
return this.ajax(url, 'GET').then(allocs => {
|
||||
return this.store.pushPayload('allocation', {
|
||||
allocations: allocs,
|
||||
});
|
||||
});
|
||||
},
|
||||
});
|
|
@ -0,0 +1,18 @@
|
|||
import Ember from 'ember';
|
||||
import Resolver from './resolver';
|
||||
import loadInitializers from 'ember-load-initializers';
|
||||
import config from './config/environment';
|
||||
|
||||
let App;
|
||||
|
||||
Ember.MODEL_FACTORY_INJECTIONS = true;
|
||||
|
||||
App = Ember.Application.extend({
|
||||
modulePrefix: config.modulePrefix,
|
||||
podModulePrefix: config.podModulePrefix,
|
||||
Resolver
|
||||
});
|
||||
|
||||
loadInitializers(App, config.modulePrefix);
|
||||
|
||||
export default App;
|
|
@ -0,0 +1,20 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tr',
|
||||
|
||||
classNames: ['allocation-row', 'is-interactive'],
|
||||
|
||||
allocation: null,
|
||||
|
||||
// Used to determine whether the row should mention the node or the job
|
||||
context: null,
|
||||
|
||||
onClick() {},
|
||||
|
||||
click(event) {
|
||||
this.get('onClick')(event);
|
||||
},
|
||||
});
|
|
@ -0,0 +1,45 @@
|
|||
import Ember from 'ember';
|
||||
import DistributionBar from './distribution-bar';
|
||||
|
||||
const { computed } = Ember;
|
||||
|
||||
export default DistributionBar.extend({
|
||||
layoutName: 'components/distribution-bar',
|
||||
|
||||
allocationContainer: null,
|
||||
|
||||
data: computed(
|
||||
'allocationContainer.{queuedAllocs,completeAllocs,failedAllocs,runningAllocs,startingAllocs}',
|
||||
function() {
|
||||
if (!this.get('allocationContainer')) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const allocs = this.get('allocationContainer').getProperties(
|
||||
'queuedAllocs',
|
||||
'completeAllocs',
|
||||
'failedAllocs',
|
||||
'runningAllocs',
|
||||
'startingAllocs',
|
||||
'lostAllocs'
|
||||
);
|
||||
return [
|
||||
{ label: 'Queued', value: allocs.queuedAllocs, className: 'queued' },
|
||||
{
|
||||
label: 'Starting',
|
||||
value: allocs.startingAllocs,
|
||||
className: 'starting',
|
||||
layers: 2,
|
||||
},
|
||||
{ label: 'Running', value: allocs.runningAllocs, className: 'running' },
|
||||
{
|
||||
label: 'Complete',
|
||||
value: allocs.completeAllocs,
|
||||
className: 'complete',
|
||||
},
|
||||
{ label: 'Failed', value: allocs.failedAllocs, className: 'failed' },
|
||||
{ label: 'Lost', value: allocs.lostAllocs, className: 'lost' },
|
||||
];
|
||||
}
|
||||
),
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
});
|
|
@ -0,0 +1,24 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tr',
|
||||
classNames: ['client-node-row', 'is-interactive'],
|
||||
|
||||
node: null,
|
||||
|
||||
onClick() {},
|
||||
|
||||
click(event) {
|
||||
this.get('onClick')(event);
|
||||
},
|
||||
|
||||
didReceiveAttrs() {
|
||||
// Reload the node in order to get detail information
|
||||
const node = this.get('node');
|
||||
if (node) {
|
||||
node.reload();
|
||||
}
|
||||
},
|
||||
});
|
|
@ -0,0 +1,141 @@
|
|||
import Ember from 'ember';
|
||||
import d3 from 'npm:d3-selection';
|
||||
import 'npm:d3-transition';
|
||||
import styleStringProperty from '../utils/properties/style-string';
|
||||
|
||||
const { Component, computed, run, assign, guidFor } = Ember;
|
||||
const sumAggregate = (total, val) => total + val;
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['chart', 'distribution-bar'],
|
||||
classNameBindings: ['isNarrow:is-narrow'],
|
||||
|
||||
chart: null,
|
||||
data: null,
|
||||
activeDatum: null,
|
||||
isNarrow: false,
|
||||
|
||||
tooltipStyle: styleStringProperty('tooltipPosition'),
|
||||
maskId: null,
|
||||
|
||||
_data: computed('data', function() {
|
||||
const data = this.get('data');
|
||||
const sum = data.mapBy('value').reduce(sumAggregate, 0);
|
||||
|
||||
return data.map(({ label, value, className, layers }, index) => ({
|
||||
label,
|
||||
value,
|
||||
className,
|
||||
layers,
|
||||
index,
|
||||
percent: value / sum,
|
||||
offset:
|
||||
data
|
||||
.slice(0, index)
|
||||
.mapBy('value')
|
||||
.reduce(sumAggregate, 0) / sum,
|
||||
}));
|
||||
}),
|
||||
|
||||
didInsertElement() {
|
||||
const chart = d3.select(this.$('svg')[0]);
|
||||
const maskId = `dist-mask-${guidFor(this)}`;
|
||||
this.setProperties({ chart, maskId });
|
||||
|
||||
this.$('svg clipPath').attr('id', maskId);
|
||||
|
||||
chart.on('mouseleave', () => {
|
||||
run(() => {
|
||||
this.set('isActive', false);
|
||||
this.set('activeDatum', null);
|
||||
chart
|
||||
.selectAll('g')
|
||||
.classed('active', false)
|
||||
.classed('inactive', false);
|
||||
});
|
||||
});
|
||||
|
||||
this.renderChart();
|
||||
},
|
||||
|
||||
didUpdateAttrs() {
|
||||
this.renderChart();
|
||||
},
|
||||
|
||||
// prettier-ignore
|
||||
/* eslint-disable */
|
||||
renderChart() {
|
||||
const { chart, _data, isNarrow } = this.getProperties('chart', '_data', 'isNarrow');
|
||||
const width = this.$('svg').width();
|
||||
const filteredData = _data.filter(d => d.value > 0);
|
||||
|
||||
let slices = chart.select('.bars').selectAll('g').data(filteredData);
|
||||
let sliceCount = filteredData.length;
|
||||
|
||||
slices.exit().remove();
|
||||
|
||||
let slicesEnter = slices.enter()
|
||||
.append('g')
|
||||
.on('mouseenter', d => {
|
||||
run(() => {
|
||||
const slice = slices.filter(datum => datum === d);
|
||||
slices.classed('active', false).classed('inactive', true);
|
||||
slice.classed('active', true).classed('inactive', false);
|
||||
this.set('activeDatum', d);
|
||||
|
||||
const box = slice.node().getBBox();
|
||||
const pos = box.x + box.width / 2;
|
||||
|
||||
// Ensure that the position is set before the tooltip is visible
|
||||
run.schedule('afterRender', this, () => this.set('isActive', true));
|
||||
this.set('tooltipPosition', {
|
||||
left: pos,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
slices = slices.merge(slicesEnter);
|
||||
slices.attr('class', d => d.className || `slice-${filteredData.indexOf(d)}`);
|
||||
|
||||
const setWidth = d => `${width * d.percent - (d.index === sliceCount - 1 || d.index === 0 ? 1 : 2)}px`
|
||||
const setOffset = d => `${width * d.offset + (d.index === 0 ? 0 : 1)}px`
|
||||
|
||||
let hoverTargets = slices.selectAll('.target').data(d => [d]);
|
||||
hoverTargets.enter()
|
||||
.append('rect')
|
||||
.attr('class', 'target')
|
||||
.attr('width', setWidth)
|
||||
.attr('height', '100%')
|
||||
.attr('x', setOffset)
|
||||
.merge(hoverTargets)
|
||||
.transition()
|
||||
.duration(200)
|
||||
.attr('width', setWidth)
|
||||
.attr('x', setOffset)
|
||||
|
||||
|
||||
let layers = slices.selectAll('.bar').data((d, i) => {
|
||||
return new Array(d.layers || 1).fill(assign({ index: i }, d));
|
||||
});
|
||||
layers.enter()
|
||||
.append('rect')
|
||||
.attr('width', setWidth)
|
||||
.attr('x', setOffset)
|
||||
.attr('y', () => isNarrow ? '50%' : 0)
|
||||
.attr('clip-path', `url(#${this.get('maskId')})`)
|
||||
.attr('height', () => isNarrow ? '6px' : '100%')
|
||||
.merge(layers)
|
||||
.attr('class', (d, i) => `bar layer-${i}`)
|
||||
.transition()
|
||||
.duration(200)
|
||||
.attr('width', setWidth)
|
||||
.attr('x', setOffset)
|
||||
|
||||
if (isNarrow) {
|
||||
d3.select(this.get('element')).select('.mask')
|
||||
.attr('height', '6px')
|
||||
.attr('y', '50%');
|
||||
}
|
||||
},
|
||||
/* eslint-enable */
|
||||
});
|
|
@ -0,0 +1,10 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['job-deployment', 'boxed-section'],
|
||||
|
||||
deployment: null,
|
||||
isOpen: false,
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
});
|
|
@ -0,0 +1,43 @@
|
|||
import Ember from 'ember';
|
||||
import moment from 'moment';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'ol',
|
||||
classNames: ['timeline'],
|
||||
|
||||
deployments: computed(() => []),
|
||||
|
||||
sortedDeployments: computed('deployments.@each.version.submitTime', function() {
|
||||
return this.get('deployments')
|
||||
.sortBy('version.submitTime')
|
||||
.reverse();
|
||||
}),
|
||||
|
||||
annotatedDeployments: computed('sortedDeployments.@each.version', function() {
|
||||
const deployments = this.get('sortedDeployments');
|
||||
return deployments.map((deployment, index) => {
|
||||
const meta = {};
|
||||
|
||||
if (index === 0) {
|
||||
meta.showDate = true;
|
||||
} else {
|
||||
const previousDeployment = deployments.objectAt(index - 1);
|
||||
const previousSubmitTime = previousDeployment.get('version.submitTime');
|
||||
const submitTime = deployment.get('submitTime');
|
||||
if (
|
||||
submitTime &&
|
||||
previousSubmitTime &&
|
||||
moment(previousSubmitTime)
|
||||
.startOf('day')
|
||||
.diff(moment(submitTime).startOf('day'), 'days') > 0
|
||||
) {
|
||||
meta.showDate = true;
|
||||
}
|
||||
}
|
||||
|
||||
return { deployment, meta };
|
||||
});
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
});
|
|
@ -0,0 +1,16 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['job-diff'],
|
||||
classNameBindings: ['isEdited:is-edited', 'isAdded:is-added', 'isDeleted:is-deleted'],
|
||||
|
||||
diff: null,
|
||||
|
||||
verbose: true,
|
||||
|
||||
isEdited: computed.equal('diff.Type', 'Edited'),
|
||||
isAdded: computed.equal('diff.Type', 'Added'),
|
||||
isDeleted: computed.equal('diff.Type', 'Deleted'),
|
||||
});
|
|
@ -0,0 +1,24 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tr',
|
||||
classNames: ['job-row', 'is-interactive'],
|
||||
|
||||
job: null,
|
||||
|
||||
onClick() {},
|
||||
|
||||
click(event) {
|
||||
this.get('onClick')(event);
|
||||
},
|
||||
|
||||
didReceiveAttrs() {
|
||||
// Reload the job in order to get detail information
|
||||
const job = this.get('job');
|
||||
if (job) {
|
||||
job.reload();
|
||||
}
|
||||
},
|
||||
});
|
|
@ -0,0 +1,54 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
const changeTypes = ['Added', 'Deleted', 'Edited'];
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['job-version', 'boxed-section'],
|
||||
|
||||
version: null,
|
||||
isOpen: false,
|
||||
|
||||
// Passes through to the job-diff component
|
||||
verbose: true,
|
||||
|
||||
changeCount: computed('version.diff', function() {
|
||||
const diff = this.get('version.diff');
|
||||
const taskGroups = diff.TaskGroups || [];
|
||||
|
||||
if (!diff) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (
|
||||
fieldChanges(diff) +
|
||||
taskGroups.reduce(arrayOfFieldChanges, 0) +
|
||||
(taskGroups.mapBy('Tasks') || []).reduce(flatten, []).reduce(arrayOfFieldChanges, 0)
|
||||
);
|
||||
}),
|
||||
|
||||
actions: {
|
||||
toggleDiff() {
|
||||
this.toggleProperty('isOpen');
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const flatten = (accumulator, array) => accumulator.concat(array);
|
||||
const countChanges = (total, field) => (changeTypes.includes(field.Type) ? total + 1 : total);
|
||||
|
||||
function fieldChanges(diff) {
|
||||
return (
|
||||
(diff.Fields || []).reduce(countChanges, 0) +
|
||||
(diff.Objects || []).reduce(arrayOfFieldChanges, 0)
|
||||
);
|
||||
}
|
||||
|
||||
function arrayOfFieldChanges(count, diff) {
|
||||
if (!diff) {
|
||||
return count;
|
||||
}
|
||||
|
||||
return count + fieldChanges(diff);
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
import Ember from 'ember';
|
||||
import moment from 'moment';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'ol',
|
||||
classNames: ['timeline'],
|
||||
|
||||
versions: computed(() => []),
|
||||
|
||||
// Passes through to the job-diff component
|
||||
verbose: true,
|
||||
|
||||
annotatedVersions: computed('versions.[]', function() {
|
||||
const versions = this.get('versions');
|
||||
return versions.map((version, index) => {
|
||||
const meta = {};
|
||||
|
||||
if (index === 0) {
|
||||
meta.showDate = true;
|
||||
} else {
|
||||
const previousVersion = versions.objectAt(index - 1);
|
||||
if (
|
||||
moment(previousVersion.get('submitTime'))
|
||||
.startOf('day')
|
||||
.diff(moment(version.get('submitTime')).startOf('day'), 'days') > 0
|
||||
) {
|
||||
meta.showDate = true;
|
||||
}
|
||||
}
|
||||
|
||||
return { version, meta };
|
||||
});
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,34 @@
|
|||
import Ember from 'ember';
|
||||
import JSONFormatterPkg from 'npm:json-formatter-js';
|
||||
|
||||
const { Component, computed, run } = Ember;
|
||||
|
||||
// json-formatter-js is packaged in a funny way that ember-cli-browserify
|
||||
// doesn't unwrap properly.
|
||||
const { default: JSONFormatter } = JSONFormatterPkg;
|
||||
|
||||
export default Component.extend({
|
||||
classNames: ['json-viewer'],
|
||||
|
||||
json: null,
|
||||
expandDepth: 2,
|
||||
|
||||
formatter: computed('json', 'expandDepth', function() {
|
||||
return new JSONFormatter(this.get('json'), this.get('expandDepth'), {
|
||||
theme: 'nomad',
|
||||
});
|
||||
}),
|
||||
|
||||
didReceiveAttrs() {
|
||||
const json = this.get('json');
|
||||
if (!json) {
|
||||
return;
|
||||
}
|
||||
|
||||
run.scheduleOnce('afterRender', this, embedViewer);
|
||||
},
|
||||
});
|
||||
|
||||
function embedViewer() {
|
||||
this.$().empty().append(this.get('formatter').render());
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
source: computed(() => []),
|
||||
size: 25,
|
||||
page: 1,
|
||||
spread: 2,
|
||||
|
||||
startsAt: computed('size', 'page', function() {
|
||||
return (this.get('page') - 1) * this.get('size') + 1;
|
||||
}),
|
||||
|
||||
endsAt: computed('source.[]', 'size', 'page', function() {
|
||||
return Math.min(this.get('page') * this.get('size'), this.get('source.length'));
|
||||
}),
|
||||
|
||||
lastPage: computed('source.[]', 'size', function() {
|
||||
return Math.ceil(this.get('source.length') / this.get('size'));
|
||||
}),
|
||||
|
||||
pageLinks: computed('source.[]', 'page', 'spread', function() {
|
||||
const { spread, page, lastPage } = this.getProperties('spread', 'page', 'lastPage');
|
||||
|
||||
// When there is only one page, don't bother with page links
|
||||
if (lastPage === 1) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const lowerBound = Math.max(1, page - spread);
|
||||
const upperBound = Math.min(lastPage, page + spread) + 1;
|
||||
|
||||
return Array(upperBound - lowerBound).fill(null).map((_, index) => ({
|
||||
pageNumber: lowerBound + index,
|
||||
}));
|
||||
}),
|
||||
|
||||
list: computed('source.[]', 'page', 'size', function() {
|
||||
const size = this.get('size');
|
||||
const start = (this.get('page') - 1) * size;
|
||||
return this.get('source').slice(start, start + size);
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: '',
|
||||
});
|
|
@ -0,0 +1,17 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'table',
|
||||
classNames: ['table'],
|
||||
|
||||
source: computed(() => []),
|
||||
|
||||
// Plan for a future with metadata (e.g., isSelected)
|
||||
decoratedSource: computed('source.[]', function() {
|
||||
return this.get('source').map(row => ({
|
||||
model: row,
|
||||
}));
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,24 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'th',
|
||||
|
||||
// The prop that the table is currently sorted by
|
||||
currentProp: '',
|
||||
|
||||
// The prop this sorter controls
|
||||
prop: '',
|
||||
|
||||
classNames: ['is-selectable'],
|
||||
classNameBindings: ['isActive:is-active', 'sortDescending:desc:asc'],
|
||||
|
||||
isActive: computed('currentProp', 'prop', function() {
|
||||
return this.get('currentProp') === this.get('prop');
|
||||
}),
|
||||
|
||||
shouldSortDescending: computed('sortDescending', 'isActive', function() {
|
||||
return !this.get('isActive') || !this.get('sortDescending');
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tbody',
|
||||
});
|
|
@ -0,0 +1,7 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'thead',
|
||||
});
|
|
@ -0,0 +1,27 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, computed, run } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
// Passed to the component (mutable)
|
||||
searchTerm: null,
|
||||
|
||||
// Used as a debounce buffer
|
||||
_searchTerm: computed.reads('searchTerm'),
|
||||
|
||||
// Used to throttle sets to searchTerm
|
||||
debounce: 150,
|
||||
|
||||
classNames: ['field', 'has-addons'],
|
||||
|
||||
actions: {
|
||||
setSearchTerm(e) {
|
||||
this.set('_searchTerm', e.target.value);
|
||||
run.debounce(this, updateSearch, this.get('debounce'));
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
function updateSearch() {
|
||||
this.set('searchTerm', this.get('_searchTerm'));
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component, inject, computed } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
// TODO Switch back to the router service style when it is no longer feature-flagged
|
||||
// router: inject.service('router'),
|
||||
_router: inject.service('-routing'),
|
||||
router: computed.alias('_router.router'),
|
||||
|
||||
tagName: 'tr',
|
||||
classNames: ['server-agent-row', 'is-interactive'],
|
||||
classNameBindings: ['isActive:is-active'],
|
||||
|
||||
agent: null,
|
||||
isActive: computed('agent', 'router.currentURL', function() {
|
||||
// TODO Switch back to the router service style when it is no longer feature-flagged
|
||||
// const targetURL = this.get('router').urlFor('servers.server', this.get('agent'));
|
||||
// const currentURL = `${this.get('router.rootURL').slice(0, -1)}${this.get('router.currentURL')}`;
|
||||
|
||||
const router = this.get('router');
|
||||
const targetURL = router.generate('servers.server', this.get('agent'));
|
||||
const currentURL = `${router.get('rootURL').slice(0, -1)}${router
|
||||
.get('currentURL')
|
||||
.split('?')[0]}`;
|
||||
|
||||
return currentURL === targetURL;
|
||||
}),
|
||||
|
||||
click() {
|
||||
this.get('router').transitionTo('servers.server', this.get('agent'));
|
||||
},
|
||||
});
|
|
@ -0,0 +1,17 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Component } = Ember;
|
||||
|
||||
export default Component.extend({
|
||||
tagName: 'tr',
|
||||
|
||||
classNames: ['task-group-row', 'is-interactive'],
|
||||
|
||||
taskGroup: null,
|
||||
|
||||
onClick() {},
|
||||
|
||||
click(event) {
|
||||
this.get('onClick')(event);
|
||||
},
|
||||
});
|
|
@ -0,0 +1,17 @@
|
|||
import Ember from 'ember';
|
||||
import Sortable from 'nomad-ui/mixins/sortable';
|
||||
|
||||
const { Controller, computed } = Ember;
|
||||
|
||||
export default Controller.extend(Sortable, {
|
||||
queryParams: {
|
||||
sortProperty: 'sort',
|
||||
sortDescending: 'desc',
|
||||
},
|
||||
|
||||
sortProperty: 'name',
|
||||
sortDescending: false,
|
||||
|
||||
listToSort: computed.alias('model.states'),
|
||||
sortedStates: computed.alias('listSorted'),
|
||||
});
|
|
@ -0,0 +1,47 @@
|
|||
import Ember from 'ember';
|
||||
import FreestyleController from 'ember-freestyle/controllers/freestyle';
|
||||
|
||||
const { inject, computed } = Ember;
|
||||
|
||||
export default FreestyleController.extend({
|
||||
emberFreestyle: inject.service(),
|
||||
|
||||
timerTicks: 0,
|
||||
|
||||
startTimer: function() {
|
||||
this.set(
|
||||
'timer',
|
||||
setInterval(() => {
|
||||
this.incrementProperty('timerTicks');
|
||||
}, 500)
|
||||
);
|
||||
}.on('init'),
|
||||
|
||||
stopTimer: function() {
|
||||
clearInterval(this.get('timer'));
|
||||
}.on('willDestroy'),
|
||||
|
||||
distributionBarData: computed(() => {
|
||||
return [
|
||||
{ label: 'one', value: 10 },
|
||||
{ label: 'two', value: 20 },
|
||||
{ label: 'three', value: 30 },
|
||||
];
|
||||
}),
|
||||
|
||||
distributionBarDataWithClasses: computed(() => {
|
||||
return [
|
||||
{ label: 'Queued', value: 10, className: 'queued' },
|
||||
{ label: 'Complete', value: 20, className: 'complete' },
|
||||
{ label: 'Failed', value: 30, className: 'failed' },
|
||||
];
|
||||
}),
|
||||
|
||||
distributionBarDataRotating: computed('timerTicks', () => {
|
||||
return [
|
||||
{ label: 'one', value: Math.round(Math.random() * 50) },
|
||||
{ label: 'two', value: Math.round(Math.random() * 50) },
|
||||
{ label: 'three', value: Math.round(Math.random() * 50) },
|
||||
];
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,38 @@
|
|||
import Ember from 'ember';
|
||||
import Sortable from 'nomad-ui/mixins/sortable';
|
||||
import Searchable from 'nomad-ui/mixins/searchable';
|
||||
|
||||
const { Controller, computed } = Ember;
|
||||
|
||||
export default Controller.extend(Sortable, Searchable, {
|
||||
pendingJobs: computed.filterBy('model', 'status', 'pending'),
|
||||
runningJobs: computed.filterBy('model', 'status', 'running'),
|
||||
deadJobs: computed.filterBy('model', 'status', 'dead'),
|
||||
|
||||
queryParams: {
|
||||
currentPage: 'page',
|
||||
searchTerm: 'search',
|
||||
sortProperty: 'sort',
|
||||
sortDescending: 'desc',
|
||||
},
|
||||
|
||||
currentPage: 1,
|
||||
pageSize: 10,
|
||||
|
||||
sortProperty: 'modifyIndex',
|
||||
sortDescending: true,
|
||||
|
||||
searchProps: computed(() => ['id', 'name']),
|
||||
|
||||
listToSort: computed.alias('model'),
|
||||
listToSearch: computed.alias('listSorted'),
|
||||
sortedJobs: computed.alias('listSearched'),
|
||||
|
||||
isShowingDeploymentDetails: false,
|
||||
|
||||
actions: {
|
||||
gotoJob(job) {
|
||||
this.transitionToRoute('jobs.job', job);
|
||||
},
|
||||
},
|
||||
});
|
|
@ -0,0 +1,12 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Controller, computed } = Ember;
|
||||
|
||||
export default Controller.extend({
|
||||
breadcrumbs: computed('model.{name,id}', function() {
|
||||
return [
|
||||
{ label: 'Jobs', args: ['jobs'] },
|
||||
{ label: this.get('model.name'), args: ['jobs.job', this.get('model.id')] },
|
||||
];
|
||||
}),
|
||||
});
|
|
@ -0,0 +1,11 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Controller, computed, inject } = Ember;
|
||||
|
||||
export default Controller.extend({
|
||||
jobController: inject.controller('jobs.job'),
|
||||
|
||||
job: computed.alias('model.job'),
|
||||
|
||||
breadcrumbs: computed.alias('jobController.breadcrumbs'),
|
||||
});
|
|
@ -0,0 +1,12 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Controller, computed, inject } = Ember;
|
||||
|
||||
export default Controller.extend({
|
||||
jobController: inject.controller('jobs.job'),
|
||||
|
||||
job: computed.alias('model'),
|
||||
deployments: computed.alias('model.deployments'),
|
||||
|
||||
breadcrumbs: computed.alias('jobController.breadcrumbs'),
|
||||
});
|
|
@ -0,0 +1,37 @@
|
|||
import Ember from 'ember';
|
||||
import Sortable from 'nomad-ui/mixins/sortable';
|
||||
|
||||
const { Controller, computed, inject } = Ember;
|
||||
|
||||
export default Controller.extend(Sortable, {
|
||||
jobController: inject.controller('jobs.job'),
|
||||
|
||||
queryParams: {
|
||||
currentPage: 'page',
|
||||
searchTerm: 'search',
|
||||
sortProperty: 'sort',
|
||||
sortDescending: 'desc',
|
||||
},
|
||||
|
||||
currentPage: 1,
|
||||
pageSize: 10,
|
||||
|
||||
sortProperty: 'name',
|
||||
sortDescending: false,
|
||||
|
||||
breadcrumbs: computed.alias('jobController.breadcrumbs'),
|
||||
job: computed.alias('model'),
|
||||
|
||||
taskGroups: computed('model.taskGroups.[]', function() {
|
||||
return this.get('model.taskGroups') || [];
|
||||
}),
|
||||
|
||||
listToSort: computed.alias('taskGroups'),
|
||||
sortedTaskGroups: computed.alias('listSorted'),
|
||||
|
||||
actions: {
|
||||
gotoTaskGroup(taskGroup) {
|
||||
this.transitionToRoute('jobs.job.task-group', taskGroup.get('job'), taskGroup);
|
||||
},
|
||||
},
|
||||
});
|
|
@ -0,0 +1,44 @@
|
|||
import Ember from 'ember';
|
||||
import Sortable from 'nomad-ui/mixins/sortable';
|
||||
import Searchable from 'nomad-ui/mixins/searchable';
|
||||
|
||||
const { Controller, computed, inject } = Ember;
|
||||
|
||||
export default Controller.extend(Sortable, Searchable, {
|
||||
jobController: inject.controller('jobs.job'),
|
||||
|
||||
queryParams: {
|
||||
currentPage: 'page',
|
||||
searchTerm: 'search',
|
||||
sortProperty: 'sort',
|
||||
sortDescending: 'desc',
|
||||
},
|
||||
|
||||
currentPage: 1,
|
||||
pageSize: 10,
|
||||
|
||||
sortProperty: 'name',
|
||||
sortDescending: false,
|
||||
|
||||
searchProps: computed(() => ['id', 'name']),
|
||||
|
||||
allocations: computed('model.allocations.[]', function() {
|
||||
return this.get('model.allocations') || [];
|
||||
}),
|
||||
|
||||
listToSort: computed.alias('allocations'),
|
||||
listToSearch: computed.alias('listSorted'),
|
||||
sortedAllocations: computed.alias('listSearched'),
|
||||
|
||||
breadcrumbs: computed('jobController.breadcrumbs.[]', 'model.{name}', function() {
|
||||
return this.get('jobController.breadcrumbs').concat([
|
||||
{ label: this.get('model.name'), args: ['jobs.job.task-group', this.get('model.name')] },
|
||||
]);
|
||||
}),
|
||||
|
||||
actions: {
|
||||
gotoAllocation(allocation) {
|
||||
this.transitionToRoute('allocations.allocation', allocation);
|
||||
},
|
||||
},
|
||||
});
|
|
@ -0,0 +1,12 @@
|
|||
import Ember from 'ember';
|
||||
|
||||
const { Controller, computed, inject } = Ember;
|
||||
|
||||
export default Controller.extend({
|
||||
jobController: inject.controller('jobs.job'),
|
||||
|
||||
job: computed.alias('model'),
|
||||
versions: computed.alias('model.versions'),
|
||||
|
||||
breadcrumbs: computed.alias('jobController.breadcrumbs'),
|
||||
});
|
|
@ -0,0 +1,35 @@
|
|||
import Ember from 'ember';
|
||||
import Sortable from 'nomad-ui/mixins/sortable';
|
||||
import Searchable from 'nomad-ui/mixins/searchable';
|
||||
|
||||
const { Controller, computed } = Ember;
|
||||
|
||||
export default Controller.extend(Sortable, Searchable, {
|
||||
nodes: computed.alias('model.nodes'),
|
||||
agents: computed.alias('model.agents'),
|
||||
|
||||
queryParams: {
|
||||
currentPage: 'page',
|
||||
searchTerm: 'search',
|
||||
sortProperty: 'sort',
|
||||
sortDescending: 'desc',
|
||||
},
|
||||
|
||||
currentPage: 1,
|
||||
pageSize: 8,
|
||||
|
||||
sortProperty: 'modifyIndex',
|
||||
sortDescending: true,
|
||||
|
||||
searchProps: computed(() => ['id', 'name', 'datacenter']),
|
||||
|
||||
listToSort: computed.alias('nodes'),
|
||||
listToSearch: computed.alias('listSorted'),
|
||||
sortedNodes: computed.alias('listSearched'),
|
||||
|
||||
actions: {
|
||||
gotoNode(node) {
|
||||
this.transitionToRoute('nodes.node', node);
|
||||
},
|
||||
},
|
||||
});
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue