Merge branch 'master' into f-policy-json

This commit is contained in:
Buck Doyle 2019-08-29 11:11:21 -05:00
commit 8b06712d21
673 changed files with 181011 additions and 39469 deletions

View File

@ -1,5 +1,224 @@
version: 2 version: 2.1
references:
common_envs: &COMMON_ENVS
GOMAXPROCS: 1
NOMAD_SLOW_TEST: 1
GOTESTSUM_JUNITFILE: /tmp/test-reports/results.xml
ignore_for_ui_branches: &IGNORE_FOR_UI_BRANCHES
filters:
branches:
ignore: /^.-ui\b.*/
workflows:
build-test:
jobs:
- lint-go:
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-client"
test_packages: "./client/..."
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-nomad"
test_packages: "./nomad/..."
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
# API Tests run in a VM rather than container due to the FS tests
# requiring `mount` priviliges.
name: "test-api"
test_packages: "./api/..."
<<: *IGNORE_FOR_UI_BRANCHES
- test-container:
name: "test-devices"
test_packages: "./devices/..."
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-other"
exclude_packages: "./api|./client|./drivers/docker|./drivers/exec|./drivers/rkt|./drivers/shared/executor|./nomad|./devices"
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-docker"
test_packages: "./drivers/docker"
# docker is misbehaving in docker-machine-recent image
# and we get unexpected failures
# e.g. https://circleci.com/gh/hashicorp/nomad/3854
executor: go-machine
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-exec"
test_packages: "./drivers/exec"
<<: *IGNORE_FOR_UI_BRANCHES
- test-machine:
name: "test-shared-exec"
test_packages: "./drivers/shared/executor"
<<: *IGNORE_FOR_UI_BRANCHES
- test-rkt:
<<: *IGNORE_FOR_UI_BRANCHES
- test-ui
# - build-deps-image:
# context: dani-test
# filters:
# branches:
# only: dani/circleci
website:
jobs:
- build-website:
context: static-sites
filters:
branches:
only: stable-website
executors:
go:
working_directory: /go/src/github.com/hashicorp/nomad
docker:
- image: circleci/golang:1.12.9
go-machine:
working_directory: ~/go/src/github.com/hashicorp/nomad
machine:
image: circleci/classic:201808-01
docker-builder:
working_directory: ~/go/src/github.com/hashicorp/nomad
machine: true # TODO: Find latest docker image id
# uses a more recent image with unattended upgrades disabled properly
# but seems to break docker builds
go-machine-recent:
working_directory: ~/go/src/github.com/hashicorp/nomad
machine:
image: ubuntu-1604:201903-01
jobs: jobs:
build-deps-image:
executor: docker-builder
steps:
- checkout
- run: docker build -t hashicorpnomad/ci-build-image:$CIRCLE_SHA1 . -f ./Dockerfile.ci
- run: docker push hashicorpnomad/ci-build-image:$CIRCLE_SHA1
lint-go:
executor: go
environment:
<<: *COMMON_ENVS
GOPATH: /go
steps:
- checkout
- install-protoc
- run: make deps lint-deps
- run: make check
test-container:
executor: go
parameters:
test_packages:
type: string
default: ""
exclude_packages:
type: string
default: ""
environment:
<<: *COMMON_ENVS
GOTEST_PKGS: "<< parameters.test_packages >>"
GOTEST_PKGS_EXCLUDE: "<< parameters.exclude_packages >>"
GOPATH: /go
steps:
- checkout
- run: make deps
- install-protoc
- install-consul
- install-vault
- run-tests
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-reports
test-rkt:
executor: go-machine-recent
environment:
<<: *COMMON_ENVS
GOTEST_PKGS: "./drivers/rkt"
GOPATH: /home/circleci/go
RKT_VERSION: 1.29.0
steps:
- checkout
- install-golang
- install-protoc
- run:
name: install rkt
command: |
gpg --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
wget https://github.com/rkt/rkt/releases/download/v$RKT_VERSION/rkt_$RKT_VERSION-1_amd64.deb
wget https://github.com/rkt/rkt/releases/download/v$RKT_VERSION/rkt_$RKT_VERSION-1_amd64.deb.asc
gpg --verify rkt_$RKT_VERSION-1_amd64.deb.asc
sudo dpkg -i rkt_$RKT_VERSION-1_amd64.deb
- run: PATH="$GOPATH/bin:/usr/local/go/bin:$PATH" make bootstrap
- run-tests
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-reports
test-machine:
executor: "<< parameters.executor >>"
parameters:
test_packages:
type: string
default: ""
exclude_packages:
type: string
default: ""
executor:
type: string
default: "go-machine-recent"
environment:
<<: *COMMON_ENVS
GOTEST_PKGS_EXCLUDE: "<< parameters.exclude_packages >>"
GOTEST_PKGS: "<< parameters.test_packages >>"
GOPATH: /home/circleci/go
steps:
- checkout
- install-golang
- install-protoc
- install-consul
- install-vault
- run: PATH="$GOPATH/bin:/usr/local/go/bin:$PATH" make bootstrap
- run-tests
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-reports
test-ui:
docker:
- image: circleci/node:10-browsers
environment:
# See https://git.io/vdao3 for details.
JOBS: 2
steps:
- checkout
- restore_cache:
keys:
- v1-deps-{{ checksum "ui/yarn.lock" }}
- v1-deps-
- run:
name: yarn install
command: cd ui && yarn install
- save_cache:
key: v1-deps-{{ checksum "ui/yarn.lock" }}
paths:
- ./ui/node_modules
- run:
name: lint:js
command: cd ui && yarn run lint:js
- run:
name: lint:hbs
command: cd ui && yarn run lint:hbs
- run:
name: Ember tests
command: cd ui && yarn test
build-website: build-website:
# setting the working_directory along with the checkout path allows us to not have # setting the working_directory along with the checkout path allows us to not have
# to cd into the website/ directory for commands # to cd into the website/ directory for commands
@ -32,12 +251,67 @@ jobs:
name: website deploy name: website deploy
command: ./scripts/deploy.sh command: ./scripts/deploy.sh
workflows: commands:
version: 2 install-golang:
website: parameters:
jobs: version:
- build-website: type: string
context: static-sites default: "1.12.9"
filters: steps:
branches: - run:
only: stable-website name: install golang << parameters.version >>
command: |
sudo rm -rf /usr/local/go
wget -q -O /tmp/golang.tar.gz https://dl.google.com/go/go<< parameters.version >>.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf /tmp/golang.tar.gz
rm -rf /tmp/golang.tar.gz
install-vault:
parameters:
version:
type: string
default: 1.0.0
steps:
- run:
name: Install Vault << parameters.version >>
command: |
wget -q -O /tmp/vault.zip https://releases.hashicorp.com/vault/<< parameters.version >>/vault_<< parameters.version>>_linux_amd64.zip
sudo unzip -d /usr/local/bin /tmp/vault.zip
rm -rf /tmp/vault*
install-consul:
parameters:
version:
type: string
default: 1.6.0-rc1
steps:
- run:
name: Install Consul << parameters.version >>
command: |
wget -q -O /tmp/consul.zip https://releases.hashicorp.com/consul/<< parameters.version >>/consul_<< parameters.version >>_linux_amd64.zip
sudo unzip -d /usr/local/bin /tmp/consul.zip
rm -rf /tmp/consul*
install-protoc:
steps:
- run:
name: install protoc
command: |
sudo rm -rf /usr/bin/protoc
sudo ./scripts/vagrant-linux-priv-protoc.sh
run-tests:
steps:
- run:
name: Running Nomad Tests
command: |
if [ -z $GOTEST_PKGS_EXCLUDE ];
then
unset GOTEST_PKGS_EXCLUDE
else
unset GOTEST_PKGS
fi
mkdir -p /tmp/test-reports
sudo -E PATH="$GOPATH/bin:/usr/local/go/bin:$PATH" make generate-structs
sudo -E PATH="$GOPATH/bin:/usr/local/go/bin:$PATH" make test-nomad

1
.gitignore vendored
View File

@ -60,6 +60,7 @@ nomad_linux_amd64
nomad_darwin_amd64 nomad_darwin_amd64
TODO.md TODO.md
codecgen-*.generated.go codecgen-*.generated.go
GNUMakefile.local
.terraform .terraform
*.tfstate* *.tfstate*

View File

@ -1,3 +0,0 @@
{
"siteId": "442034dd-3749-45d9-992e-480ab871ee28"
}

2
.netlify/ui-redirects Normal file
View File

@ -0,0 +1,2 @@
/ /ui
/ui/* /ui/index.html 200

View File

@ -28,6 +28,11 @@ matrix:
sudo: required sudo: required
env: GOTEST_PKGS="./client" env: GOTEST_PKGS="./client"
<<: *skip_for_ui_branches <<: *skip_for_ui_branches
- os: linux
dist: xenial
sudo: required
env: GOTEST_PKGS="./command"
<<: *skip_for_ui_branches
- os: linux - os: linux
dist: xenial dist: xenial
sudo: required sudo: required
@ -46,7 +51,7 @@ matrix:
- os: linux - os: linux
dist: xenial dist: xenial
sudo: required sudo: required
env: GOTEST_PKGS_EXCLUDE="./api|./client|./drivers/docker|./drivers/exec|./nomad" env: GOTEST_PKGS_EXCLUDE="./api|./client|./command|./drivers/docker|./drivers/exec|./nomad"
<<: *skip_for_ui_branches <<: *skip_for_ui_branches
- os: linux - os: linux
dist: xenial dist: xenial

View File

@ -1,4 +1,40 @@
## 0.9.4 (Unreleased) ## 0.10.0 (Unreleased)
IMPROVEMENTS:
* agent: allow the job GC interval to be configured [[GH-5978](https://github.com/hashicorp/nomad/issues/5978)]
* agent: add `-dev=connect` parameter to support running in dev mode with Consul Connect [[GH-6126](https://github.com/hashicorp/nomad/issues/6126)]
* api: add follow parameter to file streaming endpoint to support older browsers [[GH-6049](https://github.com/hashicorp/nomad/issues/6049)]
* metrics: Add job status (pending, running, dead) metrics [[GH-6003](https://github.com/hashicorp/nomad/issues/6003)]
* ui: Add creation time to evaluations table [[GH-6050](https://github.com/hashicorp/nomad/pull/6050)]
BUG FIXES:
* command/run: Fixed `nomad run ...` on Windows so it works with unprivileged accounts [[GH-6009](https://github.com/hashicorp/nomad/issues/6009)]
* ui: Fixed navigation via clicking recent allocation row [[GH-6087](https://github.com/hashicorp/nomad/pull/6087)]
* ui: Fixed links containing IPv6 addresses to include required square brackets [[GH-6007](https://github.com/hashicorp/nomad/pull/6007)]
## 0.9.5 (21 August 2019)
SECURITY:
* client/template: Fix security vulnerabilities associated with task template rendering (CVE-2019-14802), introduced in Nomad 0.5.0 [[GH-6055](https://github.com/hashicorp/nomad/issues/6055)] [[GH-6075](https://github.com/hashicorp/nomad/issues/6075)]
* client/artifact: Fix a privilege escalation in the `exec` driver exploitable by artifacts with setuid permissions (CVE-2019-14803) [[GH-6176](https://github.com/hashicorp/nomad/issues/6176)]
__BACKWARDS INCOMPATIBILITIES:__
* client/template: When rendering a task template, only task environment variables are included by default. [[GH-6055](https://github.com/hashicorp/nomad/issues/6055)]
* client/template: When rendering a task template, the `plugin` function is no longer permitted by default and will raise an error. [[GH-6075](https://github.com/hashicorp/nomad/issues/6075)]
* client/template: When rendering a task template, path parameters for the `file` function will be restricted to the task directory by default. Relative paths or symlinks that point outside the task directory will raise an error. [[GH-6075](https://github.com/hashicorp/nomad/issues/6075)]
IMPROVEMENTS:
* core: Added create and modify timestamps to evaluations [[GH-5881](https://github.com/hashicorp/nomad/pull/5881)]
BUG FIXES:
* api: Fixed job region to default to client node region if none provided [[GH-6064](https://github.com/hashicorp/nomad/pull/6064)]
* ui: Fixed links containing IPv6 addresses to include required square brackets [[GH-6007](https://github.com/hashicorp/nomad/pull/6007)]
* vault: Fix deadlock when reloading server Vault configuration [[GH-6082](https://github.com/hashicorp/nomad/issues/6082)]
## 0.9.4 (July 30, 2019)
IMPROVEMENTS: IMPROVEMENTS:
* api: Inferred content type of file in alloc filesystem stat endpoint [[GH-5907](https://github.com/hashicorp/nomad/issues/5907)] * api: Inferred content type of file in alloc filesystem stat endpoint [[GH-5907](https://github.com/hashicorp/nomad/issues/5907)]
@ -6,13 +42,14 @@ IMPROVEMENTS:
* core: Deregister nodes in batches rather than one at a time [[GH-5784](https://github.com/hashicorp/nomad/pull/5784)] * core: Deregister nodes in batches rather than one at a time [[GH-5784](https://github.com/hashicorp/nomad/pull/5784)]
* core: Removed deprecated upgrade path code pertaining to older versions of Nomad [[GH-5894](https://github.com/hashicorp/nomad/issues/5894)] * core: Removed deprecated upgrade path code pertaining to older versions of Nomad [[GH-5894](https://github.com/hashicorp/nomad/issues/5894)]
* core: System jobs that fail because of resource availability are retried when resources are freed [[GH-5900](https://github.com/hashicorp/nomad/pull/5900)] * core: System jobs that fail because of resource availability are retried when resources are freed [[GH-5900](https://github.com/hashicorp/nomad/pull/5900)]
* core: Support reloading log level in agent via SIGHUP [[GH-5996](https://github.com/hashicorp/nomad/issues/5996)]
* client: Improved task event display message to include kill time out [[GH-5943](https://github.com/hashicorp/nomad/issues/5943)] * client: Improved task event display message to include kill time out [[GH-5943](https://github.com/hashicorp/nomad/issues/5943)]
* client: Removed extraneous information to improve formatting for hcl parsing error messages [[GH-5972](https://github.com/hashicorp/nomad/pull/5972)] * client: Removed extraneous information to improve formatting for hcl parsing error messages [[GH-5972](https://github.com/hashicorp/nomad/pull/5972)]
* driver/docker: Added logging defaults to use json-file log driver with log rotation [[GH-5846](https://github.com/hashicorp/nomad/pull/5846)] * driver/docker: Added logging defaults to use json-file log driver with log rotation [[GH-5846](https://github.com/hashicorp/nomad/pull/5846)]
* metrics: Added namespace label as appropriate to metrics [[GH-5847](https://github.com/hashicorp/nomad/issues/5847)] * metrics: Added namespace label as appropriate to metrics [[GH-5847](https://github.com/hashicorp/nomad/issues/5847)]
* ui: Moved client status, draining, and eligibility fields into single state column [[GH-5789](https://github.com/hashicorp/nomad/pull/5789)]
* ui: Added buttons to copy client and allocation UUIDs [[GH-5926](https://github.com/hashicorp/nomad/pull/5926)]
* ui: Added page titles [[GH-5924](https://github.com/hashicorp/nomad/pull/5924)] * ui: Added page titles [[GH-5924](https://github.com/hashicorp/nomad/pull/5924)]
* ui: Added buttons to copy client and allocation UUIDs [[GH-5926](https://github.com/hashicorp/nomad/pull/5926)]
* ui: Moved client status, draining, and eligibility fields into single state column [[GH-5789](https://github.com/hashicorp/nomad/pull/5789)]
BUG FIXES: BUG FIXES:
@ -34,7 +71,7 @@ BUG FIXES:
* driver: Fixed an issue preventing external driver plugins from launching executor process [[GH-5726](https://github.com/hashicorp/nomad/issues/5726)] * driver: Fixed an issue preventing external driver plugins from launching executor process [[GH-5726](https://github.com/hashicorp/nomad/issues/5726)]
* driver/docker: Fixed a bug mounting relative paths on Windows [[GH-5811](https://github.com/hashicorp/nomad/issues/5811)] * driver/docker: Fixed a bug mounting relative paths on Windows [[GH-5811](https://github.com/hashicorp/nomad/issues/5811)]
* driver/exec: Upgraded libcontainer dependency to avoid zombie `runc:[1:CHILD]]` processes [[GH-5851](https://github.com/hashicorp/nomad/issues/5851)] * driver/exec: Upgraded libcontainer dependency to avoid zombie `runc:[1:CHILD]]` processes [[GH-5851](https://github.com/hashicorp/nomad/issues/5851)]
* metrics: Added metrics for raft and state store indexes. [[GH-5841](https://github.com/hashicorp/nomad/issues/5841)] * metrics: Added metrics for raft and state store indexes. [[GH-5841](https://github.com/hashicorp/nomad/issues/5841)]
* metrics: Upgrade prometheus client to avoid label conflicts [[GH-5850](https://github.com/hashicorp/nomad/issues/5850)] * metrics: Upgrade prometheus client to avoid label conflicts [[GH-5850](https://github.com/hashicorp/nomad/issues/5850)]
* ui: Fixed ability to click sort arrow to change sort direction [[GH-5833](https://github.com/hashicorp/nomad/pull/5833)] * ui: Fixed ability to click sort arrow to change sort direction [[GH-5833](https://github.com/hashicorp/nomad/pull/5833)]
@ -1617,4 +1654,3 @@ BUG FIXES:
## 0.1.0 (September 28, 2015) ## 0.1.0 (September 28, 2015)
* Initial release * Initial release

View File

@ -6,7 +6,7 @@ GIT_COMMIT := $(shell git rev-parse HEAD)
GIT_DIRTY := $(if $(shell git status --porcelain),+CHANGES) GIT_DIRTY := $(if $(shell git status --porcelain),+CHANGES)
GO_LDFLAGS := "-X github.com/hashicorp/nomad/version.GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)" GO_LDFLAGS := "-X github.com/hashicorp/nomad/version.GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)"
GO_TAGS = GO_TAGS ?=
GO_TEST_CMD = $(if $(shell which gotestsum),gotestsum --,go test) GO_TEST_CMD = $(if $(shell which gotestsum),gotestsum --,go test)
@ -25,10 +25,10 @@ endif
# On Linux we build for Linux and Windows # On Linux we build for Linux and Windows
ifeq (Linux,$(THIS_OS)) ifeq (Linux,$(THIS_OS))
ifeq ($(TRAVIS),true) ifeq ($(CI),true)
$(info Running in Travis, verbose mode is disabled) $(info Running in a CI environment, verbose mode is disabled)
else else
VERBOSE="true" VERBOSE="true"
endif endif
@ -51,6 +51,9 @@ ifeq (FreeBSD,$(THIS_OS))
ALL_TARGETS += freebsd_amd64 ALL_TARGETS += freebsd_amd64
endif endif
# include per-user customization after all variables are defined
-include GNUMakefile.local
pkg/darwin_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for darwin/amd64 pkg/darwin_amd64/nomad: $(SOURCE_FILES) ## Build Nomad for darwin/amd64
@echo "==> Building $@ with tags $(GO_TAGS)..." @echo "==> Building $@ with tags $(GO_TAGS)..."
@CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 \ @CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 \
@ -199,7 +202,7 @@ checkscripts: ## Lint shell scripts
@find scripts -type f -name '*.sh' | xargs shellcheck @find scripts -type f -name '*.sh' | xargs shellcheck
.PHONY: generate-all .PHONY: generate-all
generate-all: generate-structs proto generate-all: generate-structs proto generate-examples
.PHONY: generate-structs .PHONY: generate-structs
generate-structs: LOCAL_PACKAGES = $(shell go list ./... | grep -v '/vendor/') generate-structs: LOCAL_PACKAGES = $(shell go list ./... | grep -v '/vendor/')
@ -214,6 +217,11 @@ proto:
protoc -I . -I ../../.. --go_out=plugins=grpc:. $$file; \ protoc -I . -I ../../.. --go_out=plugins=grpc:. $$file; \
done done
.PHONY: generate-examples
generate-examples: command/job_init.bindata_assetfs.go
command/job_init.bindata_assetfs.go: command/assets/*
go-bindata-assetfs -pkg command -o command/job_init.bindata_assetfs.go ./command/assets/...
vendorfmt: vendorfmt:
@echo "--> Formatting vendor/vendor.json" @echo "--> Formatting vendor/vendor.json"
@ -236,7 +244,7 @@ dev: vendorfmt changelogfmt ## Build for the current development platform
@rm -f $(GOPATH)/bin/nomad @rm -f $(GOPATH)/bin/nomad
@$(MAKE) --no-print-directory \ @$(MAKE) --no-print-directory \
$(DEV_TARGET) \ $(DEV_TARGET) \
GO_TAGS="$(NOMAD_UI_TAG)" GO_TAGS="$(GO_TAGS) $(NOMAD_UI_TAG)"
@mkdir -p $(PROJECT_ROOT)/bin @mkdir -p $(PROJECT_ROOT)/bin
@mkdir -p $(GOPATH)/bin @mkdir -p $(GOPATH)/bin
@cp $(PROJECT_ROOT)/$(DEV_TARGET) $(PROJECT_ROOT)/bin/ @cp $(PROJECT_ROOT)/$(DEV_TARGET) $(PROJECT_ROOT)/bin/
@ -356,6 +364,7 @@ help: ## Display this usage information
@echo "This host will build the following targets if 'make release' is invoked:" @echo "This host will build the following targets if 'make release' is invoked:"
@echo $(ALL_TARGETS) | sed 's/^/ /' @echo $(ALL_TARGETS) | sed 's/^/ /'
.PHONY: ui-screenshots
ui-screenshots: ui-screenshots:
@echo "==> Collecting UI screenshots..." @echo "==> Collecting UI screenshots..."
# Build the screenshots image if it doesn't exist yet # Build the screenshots image if it doesn't exist yet
@ -367,6 +376,7 @@ ui-screenshots:
--volume "$(shell pwd)/scripts/screenshots/screenshots:/screenshots" \ --volume "$(shell pwd)/scripts/screenshots/screenshots:/screenshots" \
nomad-ui-screenshots nomad-ui-screenshots
.PHONY: ui-screenshots-local
ui-screenshots-local: ui-screenshots-local:
@echo "==> Collecting UI screenshots (local)..." @echo "==> Collecting UI screenshots (local)..."
@cd scripts/screenshots/src && SCREENSHOTS_DIR="../screenshots" node index.js @cd scripts/screenshots/src && SCREENSHOTS_DIR="../screenshots" node index.js

View File

@ -140,7 +140,7 @@ Who Uses Nomad
Contributing to Nomad Contributing to Nomad
-------------------- --------------------
If you wish to contribute to Nomad, you will need [Go](https://www.golang.org) installed on your machine (version 1.11.11+ is *required*). If you wish to contribute to Nomad, you will need [Go](https://www.golang.org) installed on your machine (version 1.12.9+ is *required*).
See the [`contributing`](contributing/) directory for more developer documentation. See the [`contributing`](contributing/) directory for more developer documentation.

View File

@ -51,6 +51,13 @@ type ACL struct {
// We use an iradix for the purposes of ordered iteration. // We use an iradix for the purposes of ordered iteration.
wildcardNamespaces *iradix.Tree wildcardNamespaces *iradix.Tree
// hostVolumes maps a named host volume to a capabilitySet
hostVolumes *iradix.Tree
// wildcardHostVolumes maps a glob pattern of host volume names to a capabilitySet
// We use an iradix for the purposes of ordered iteration.
wildcardHostVolumes *iradix.Tree
agent string agent string
node string node string
operator string operator string
@ -83,6 +90,8 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) {
acl := &ACL{} acl := &ACL{}
nsTxn := iradix.New().Txn() nsTxn := iradix.New().Txn()
wnsTxn := iradix.New().Txn() wnsTxn := iradix.New().Txn()
hvTxn := iradix.New().Txn()
whvTxn := iradix.New().Txn()
for _, policy := range policies { for _, policy := range policies {
NAMESPACES: NAMESPACES:
@ -128,6 +137,49 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) {
} }
} }
HOSTVOLUMES:
for _, hv := range policy.HostVolumes {
// Should the volume be matched using a glob?
globDefinition := strings.Contains(hv.Name, "*")
// Check for existing capabilities
var capabilities capabilitySet
if globDefinition {
raw, ok := whvTxn.Get([]byte(hv.Name))
if ok {
capabilities = raw.(capabilitySet)
} else {
capabilities = make(capabilitySet)
whvTxn.Insert([]byte(hv.Name), capabilities)
}
} else {
raw, ok := hvTxn.Get([]byte(hv.Name))
if ok {
capabilities = raw.(capabilitySet)
} else {
capabilities = make(capabilitySet)
hvTxn.Insert([]byte(hv.Name), capabilities)
}
}
// Deny always takes precedence
if capabilities.Check(HostVolumeCapabilityDeny) {
continue
}
// Add in all the capabilities
for _, cap := range hv.Capabilities {
if cap == HostVolumeCapabilityDeny {
// Overwrite any existing capabilities
capabilities.Clear()
capabilities.Set(HostVolumeCapabilityDeny)
continue HOSTVOLUMES
}
capabilities.Set(cap)
}
}
// Take the maximum privilege for agent, node, and operator // Take the maximum privilege for agent, node, and operator
if policy.Agent != nil { if policy.Agent != nil {
acl.agent = maxPrivilege(acl.agent, policy.Agent.Policy) acl.agent = maxPrivilege(acl.agent, policy.Agent.Policy)
@ -146,6 +198,9 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) {
// Finalize the namespaces // Finalize the namespaces
acl.namespaces = nsTxn.Commit() acl.namespaces = nsTxn.Commit()
acl.wildcardNamespaces = wnsTxn.Commit() acl.wildcardNamespaces = wnsTxn.Commit()
acl.hostVolumes = hvTxn.Commit()
acl.wildcardHostVolumes = whvTxn.Commit()
return acl, nil return acl, nil
} }
@ -162,7 +217,7 @@ func (a *ACL) AllowNamespaceOperation(ns string, op string) bool {
} }
// Check for a matching capability set // Check for a matching capability set
capabilities, ok := a.matchingCapabilitySet(ns) capabilities, ok := a.matchingNamespaceCapabilitySet(ns)
if !ok { if !ok {
return false return false
} }
@ -179,7 +234,7 @@ func (a *ACL) AllowNamespace(ns string) bool {
} }
// Check for a matching capability set // Check for a matching capability set
capabilities, ok := a.matchingCapabilitySet(ns) capabilities, ok := a.matchingNamespaceCapabilitySet(ns)
if !ok { if !ok {
return false return false
} }
@ -192,12 +247,50 @@ func (a *ACL) AllowNamespace(ns string) bool {
return !capabilities.Check(PolicyDeny) return !capabilities.Check(PolicyDeny)
} }
// matchingCapabilitySet looks for a capabilitySet that matches the namespace, // AllowHostVolumeOperation checks if a given operation is allowed for a host volume
func (a *ACL) AllowHostVolumeOperation(hv string, op string) bool {
// Hot path management tokens
if a.management {
return true
}
// Check for a matching capability set
capabilities, ok := a.matchingHostVolumeCapabilitySet(hv)
if !ok {
return false
}
// Check if the capability has been granted
return capabilities.Check(op)
}
// AllowHostVolume checks if any operations are allowed for a HostVolume
func (a *ACL) AllowHostVolume(ns string) bool {
// Hot path management tokens
if a.management {
return true
}
// Check for a matching capability set
capabilities, ok := a.matchingHostVolumeCapabilitySet(ns)
if !ok {
return false
}
// Check if the capability has been granted
if len(capabilities) == 0 {
return false
}
return !capabilities.Check(PolicyDeny)
}
// matchingNamespaceCapabilitySet looks for a capabilitySet that matches the namespace,
// if no concrete definitions are found, then we return the closest matching // if no concrete definitions are found, then we return the closest matching
// glob. // glob.
// The closest matching glob is the one that has the smallest character // The closest matching glob is the one that has the smallest character
// difference between the namespace and the glob. // difference between the namespace and the glob.
func (a *ACL) matchingCapabilitySet(ns string) (capabilitySet, bool) { func (a *ACL) matchingNamespaceCapabilitySet(ns string) (capabilitySet, bool) {
// Check for a concrete matching capability set // Check for a concrete matching capability set
raw, ok := a.namespaces.Get([]byte(ns)) raw, ok := a.namespaces.Get([]byte(ns))
if ok { if ok {
@ -205,18 +298,34 @@ func (a *ACL) matchingCapabilitySet(ns string) (capabilitySet, bool) {
} }
// We didn't find a concrete match, so lets try and evaluate globs. // We didn't find a concrete match, so lets try and evaluate globs.
return a.findClosestMatchingGlob(ns) return a.findClosestMatchingGlob(a.wildcardNamespaces, ns)
}
// matchingHostVolumeCapabilitySet looks for a capabilitySet that matches the host volume name,
// if no concrete definitions are found, then we return the closest matching
// glob.
// The closest matching glob is the one that has the smallest character
// difference between the volume name and the glob.
func (a *ACL) matchingHostVolumeCapabilitySet(name string) (capabilitySet, bool) {
// Check for a concrete matching capability set
raw, ok := a.hostVolumes.Get([]byte(name))
if ok {
return raw.(capabilitySet), true
}
// We didn't find a concrete match, so lets try and evaluate globs.
return a.findClosestMatchingGlob(a.wildcardHostVolumes, name)
} }
type matchingGlob struct { type matchingGlob struct {
ns string name string
difference int difference int
capabilitySet capabilitySet capabilitySet capabilitySet
} }
func (a *ACL) findClosestMatchingGlob(ns string) (capabilitySet, bool) { func (a *ACL) findClosestMatchingGlob(radix *iradix.Tree, ns string) (capabilitySet, bool) {
// First, find all globs that match. // First, find all globs that match.
matchingGlobs := a.findAllMatchingWildcards(ns) matchingGlobs := findAllMatchingWildcards(radix, ns)
// If none match, let's return. // If none match, let's return.
if len(matchingGlobs) == 0 { if len(matchingGlobs) == 0 {
@ -238,19 +347,19 @@ func (a *ACL) findClosestMatchingGlob(ns string) (capabilitySet, bool) {
return matchingGlobs[0].capabilitySet, true return matchingGlobs[0].capabilitySet, true
} }
func (a *ACL) findAllMatchingWildcards(ns string) []matchingGlob { func findAllMatchingWildcards(radix *iradix.Tree, name string) []matchingGlob {
var matches []matchingGlob var matches []matchingGlob
nsLen := len(ns) nsLen := len(name)
a.wildcardNamespaces.Root().Walk(func(bk []byte, iv interface{}) bool { radix.Root().Walk(func(bk []byte, iv interface{}) bool {
k := string(bk) k := string(bk)
v := iv.(capabilitySet) v := iv.(capabilitySet)
isMatch := glob.Glob(k, ns) isMatch := glob.Glob(k, name)
if isMatch { if isMatch {
pair := matchingGlob{ pair := matchingGlob{
ns: k, name: k,
difference: nsLen - len(k) + strings.Count(k, glob.GLOB), difference: nsLen - len(k) + strings.Count(k, glob.GLOB),
capabilitySet: v, capabilitySet: v,
} }

View File

@ -314,6 +314,56 @@ func TestWildcardNamespaceMatching(t *testing.T) {
} }
} }
func TestWildcardHostVolumeMatching(t *testing.T) {
tests := []struct {
Policy string
Allow bool
}{
{ // Wildcard matches
Policy: `host_volume "prod-api-*" { policy = "write" }`,
Allow: true,
},
{ // Non globbed volumes are not wildcards
Policy: `host_volume "prod-api" { policy = "write" }`,
Allow: false,
},
{ // Concrete matches take precedence
Policy: `host_volume "prod-api-services" { policy = "deny" }
host_volume "prod-api-*" { policy = "write" }`,
Allow: false,
},
{
Policy: `host_volume "prod-api-*" { policy = "deny" }
host_volume "prod-api-services" { policy = "write" }`,
Allow: true,
},
{ // The closest character match wins
Policy: `host_volume "*-api-services" { policy = "deny" }
host_volume "prod-api-*" { policy = "write" }`, // 4 vs 8 chars
Allow: false,
},
{
Policy: `host_volume "prod-api-*" { policy = "write" }
host_volume "*-api-services" { policy = "deny" }`, // 4 vs 8 chars
Allow: false,
},
}
for _, tc := range tests {
t.Run(tc.Policy, func(t *testing.T) {
assert := assert.New(t)
policy, err := Parse(tc.Policy)
assert.NoError(err)
assert.NotNil(policy.HostVolumes)
acl, err := NewACL(false, []*Policy{policy})
assert.Nil(err)
assert.Equal(tc.Allow, acl.AllowHostVolume("prod-api-services"))
})
}
}
func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) { func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) {
tests := []struct { tests := []struct {
Policy string Policy string
@ -351,8 +401,8 @@ func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) {
assert.Nil(err) assert.Nil(err)
var namespaces []string var namespaces []string
for _, cs := range acl.findAllMatchingWildcards(tc.NS) { for _, cs := range findAllMatchingWildcards(acl.wildcardNamespaces, tc.NS) {
namespaces = append(namespaces, cs.ns) namespaces = append(namespaces, cs.name)
} }
assert.Equal(tc.MatchingGlobs, namespaces) assert.Equal(tc.MatchingGlobs, namespaces)
@ -404,7 +454,7 @@ func TestACL_matchingCapabilitySet_difference(t *testing.T) {
acl, err := NewACL(false, []*Policy{policy}) acl, err := NewACL(false, []*Policy{policy})
assert.Nil(err) assert.Nil(err)
matches := acl.findAllMatchingWildcards(tc.NS) matches := findAllMatchingWildcards(acl.wildcardNamespaces, tc.NS)
assert.Equal(tc.Difference, matches[0].difference) assert.Equal(tc.Difference, matches[0].difference)
}) })
} }

View File

@ -21,6 +21,7 @@ const (
// The Policy stanza is a short hand for granting several of these. When capabilities are // The Policy stanza is a short hand for granting several of these. When capabilities are
// combined we take the union of all capabilities. If the deny capability is present, it // combined we take the union of all capabilities. If the deny capability is present, it
// takes precedence and overwrites all other capabilities. // takes precedence and overwrites all other capabilities.
NamespaceCapabilityDeny = "deny" NamespaceCapabilityDeny = "deny"
NamespaceCapabilityListJobs = "list-jobs" NamespaceCapabilityListJobs = "list-jobs"
NamespaceCapabilityReadJob = "read-job" NamespaceCapabilityReadJob = "read-job"
@ -38,20 +39,37 @@ var (
validNamespace = regexp.MustCompile("^[a-zA-Z0-9-*]{1,128}$") validNamespace = regexp.MustCompile("^[a-zA-Z0-9-*]{1,128}$")
) )
const (
// The following are the fine-grained capabilities that can be granted for a volume set.
// The Policy stanza is a short hand for granting several of these. When capabilities are
// combined we take the union of all capabilities. If the deny capability is present, it
// takes precedence and overwrites all other capabilities.
HostVolumeCapabilityDeny = "deny"
HostVolumeCapabilityMountReadOnly = "mount-readonly"
HostVolumeCapabilityMountReadWrite = "mount-readwrite"
)
var (
validVolume = regexp.MustCompile("^[a-zA-Z0-9-*]{1,128}$")
)
// Policy represents a parsed HCL or JSON policy. // Policy represents a parsed HCL or JSON policy.
type Policy struct { type Policy struct {
Namespaces []*NamespacePolicy `hcl:"namespace,expand"` Namespaces []*NamespacePolicy `hcl:"namespace,expand"`
Agent *AgentPolicy `hcl:"agent"` HostVolumes []*HostVolumePolicy `hcl:"host_volume,expand"`
Node *NodePolicy `hcl:"node"` Agent *AgentPolicy `hcl:"agent"`
Operator *OperatorPolicy `hcl:"operator"` Node *NodePolicy `hcl:"node"`
Quota *QuotaPolicy `hcl:"quota"` Operator *OperatorPolicy `hcl:"operator"`
Raw string `hcl:"-"` Quota *QuotaPolicy `hcl:"quota"`
Raw string `hcl:"-"`
} }
// IsEmpty checks to make sure that at least one policy has been set and is not // IsEmpty checks to make sure that at least one policy has been set and is not
// comprised of only a raw policy. // comprised of only a raw policy.
func (p *Policy) IsEmpty() bool { func (p *Policy) IsEmpty() bool {
return len(p.Namespaces) == 0 && return len(p.Namespaces) == 0 &&
len(p.HostVolumes) == 0 &&
p.Agent == nil && p.Agent == nil &&
p.Node == nil && p.Node == nil &&
p.Operator == nil && p.Operator == nil &&
@ -65,6 +83,13 @@ type NamespacePolicy struct {
Capabilities []string Capabilities []string
} }
// HostVolumePolicy is the policy for a specific named host volume
type HostVolumePolicy struct {
Name string `hcl:",key"`
Policy string
Capabilities []string
}
type AgentPolicy struct { type AgentPolicy struct {
Policy string Policy string
} }
@ -134,6 +159,28 @@ func expandNamespacePolicy(policy string) []string {
} }
} }
func isHostVolumeCapabilityValid(cap string) bool {
switch cap {
case HostVolumeCapabilityDeny, HostVolumeCapabilityMountReadOnly, HostVolumeCapabilityMountReadWrite:
return true
default:
return false
}
}
func expandHostVolumePolicy(policy string) []string {
switch policy {
case PolicyDeny:
return []string{HostVolumeCapabilityDeny}
case PolicyRead:
return []string{HostVolumeCapabilityMountReadOnly}
case PolicyWrite:
return []string{HostVolumeCapabilityMountReadOnly, HostVolumeCapabilityMountReadWrite}
default:
return nil
}
}
// Parse is used to parse the specified ACL rules into an // Parse is used to parse the specified ACL rules into an
// intermediary set of policies, before being compiled into // intermediary set of policies, before being compiled into
// the ACL // the ACL
@ -178,6 +225,27 @@ func Parse(rules string) (*Policy, error) {
} }
} }
for _, hv := range p.HostVolumes {
if !validVolume.MatchString(hv.Name) {
return nil, fmt.Errorf("Invalid host volume name: %#v", hv)
}
if hv.Policy != "" && !isPolicyValid(hv.Policy) {
return nil, fmt.Errorf("Invalid host volume policy: %#v", hv)
}
for _, cap := range hv.Capabilities {
if !isHostVolumeCapabilityValid(cap) {
return nil, fmt.Errorf("Invalid host volume capability '%s': %#v", cap, hv)
}
}
// Expand the short hand policy to the capabilities and
// add to any existing capabilities
if hv.Policy != "" {
extraCap := expandHostVolumePolicy(hv.Policy)
hv.Capabilities = append(hv.Capabilities, extraCap...)
}
}
if p.Agent != nil && !isPolicyValid(p.Agent.Policy) { if p.Agent != nil && !isPolicyValid(p.Agent.Policy) {
return nil, fmt.Errorf("Invalid agent policy: %#v", p.Agent) return nil, fmt.Errorf("Invalid agent policy: %#v", p.Agent)
} }

View File

@ -199,6 +199,53 @@ func TestParse(t *testing.T) {
}, },
}, },
}, },
{
`
host_volume "production-tls-*" {
capabilities = ["mount-readonly"]
}
`,
"",
&Policy{
HostVolumes: []*HostVolumePolicy{
{
Name: "production-tls-*",
Policy: "",
Capabilities: []string{
HostVolumeCapabilityMountReadOnly,
},
},
},
},
},
{
`
host_volume "production-tls-*" {
capabilities = ["mount-readwrite"]
}
`,
"",
&Policy{
HostVolumes: []*HostVolumePolicy{
{
Name: "production-tls-*",
Policy: "",
Capabilities: []string{
HostVolumeCapabilityMountReadWrite,
},
},
},
},
},
{
`
host_volume "volume has a space" {
capabilities = ["mount-readwrite"]
}
`,
"Invalid host volume name",
nil,
},
} }
for idx, tc := range tcases { for idx, tc := range tcases {

View File

@ -458,7 +458,8 @@ type AllocatedTaskResources struct {
} }
type AllocatedSharedResources struct { type AllocatedSharedResources struct {
DiskMB int64 DiskMB int64
Networks []*NetworkResource
} }
type AllocatedCpuResources struct { type AllocatedCpuResources struct {

View File

@ -453,8 +453,8 @@ func (c *Client) getNodeClientImpl(nodeID string, timeout time.Duration, q *Quer
// If the client is configured for a particular region use that // If the client is configured for a particular region use that
region = c.config.Region region = c.config.Region
default: default:
// No region information is given so use the default. // No region information is given so use GlobalRegion as the default.
region = "global" region = GlobalRegion
} }
// Get an API client for the node // Get an API client for the node

View File

@ -20,7 +20,7 @@ func TestCompose(t *testing.T) {
{ {
CIDR: "0.0.0.0/0", CIDR: "0.0.0.0/0",
MBits: intToPtr(100), MBits: intToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}}, ReservedPorts: []Port{{"", 80, 0}, {"", 443, 0}},
}, },
}, },
}) })
@ -111,8 +111,8 @@ func TestCompose(t *testing.T) {
CIDR: "0.0.0.0/0", CIDR: "0.0.0.0/0",
MBits: intToPtr(100), MBits: intToPtr(100),
ReservedPorts: []Port{ ReservedPorts: []Port{
{"", 80}, {"", 80, 0},
{"", 443}, {"", 443, 0},
}, },
}, },
}, },

View File

@ -80,6 +80,8 @@ type Evaluation struct {
SnapshotIndex uint64 SnapshotIndex uint64
CreateIndex uint64 CreateIndex uint64
ModifyIndex uint64 ModifyIndex uint64
CreateTime int64
ModifyTime int64
} }
// EvalIndexSort is a wrapper to sort evaluations by CreateIndex. // EvalIndexSort is a wrapper to sort evaluations by CreateIndex.

View File

@ -25,6 +25,13 @@ const (
// DefaultNamespace is the default namespace. // DefaultNamespace is the default namespace.
DefaultNamespace = "default" DefaultNamespace = "default"
// For Job configuration, GlobalRegion is a sentinel region value
// that users may specify to indicate the job should be run on
// the region of the node that the job was submitted to.
// For Client configuration, if no region information is given,
// the client node will default to be part of the GlobalRegion.
GlobalRegion = "global"
) )
const ( const (
@ -704,7 +711,7 @@ func (j *Job) Canonicalize() {
j.Stop = boolToPtr(false) j.Stop = boolToPtr(false)
} }
if j.Region == nil { if j.Region == nil {
j.Region = stringToPtr("global") j.Region = stringToPtr(GlobalRegion)
} }
if j.Namespace == nil { if j.Namespace == nil {
j.Namespace = stringToPtr("default") j.Namespace = stringToPtr("default")

View File

@ -436,6 +436,12 @@ type DriverInfo struct {
UpdateTime time.Time UpdateTime time.Time
} }
// HostVolumeInfo is used to return metadata about a given HostVolume.
type HostVolumeInfo struct {
Path string
ReadOnly bool
}
// Node is used to deserialize a node entry. // Node is used to deserialize a node entry.
type Node struct { type Node struct {
ID string ID string
@ -459,6 +465,7 @@ type Node struct {
StatusUpdatedAt int64 StatusUpdatedAt int64
Events []*NodeEvent Events []*NodeEvent
Drivers map[string]*DriverInfo Drivers map[string]*DriverInfo
HostVolumes map[string]*HostVolumeInfo
CreateIndex uint64 CreateIndex uint64
ModifyIndex uint64 ModifyIndex uint64
} }

View File

@ -86,11 +86,13 @@ func (r *Resources) Merge(other *Resources) {
type Port struct { type Port struct {
Label string Label string
Value int `mapstructure:"static"` Value int `mapstructure:"static"`
To int `mapstructure:"to"`
} }
// NetworkResource is used to describe required network // NetworkResource is used to describe required network
// resources of a given task. // resources of a given task.
type NetworkResource struct { type NetworkResource struct {
Mode string
Device string Device string
CIDR string CIDR string
IP string IP string
@ -105,6 +107,14 @@ func (n *NetworkResource) Canonicalize() {
} }
} }
func (n *NetworkResource) HasPorts() bool {
if n == nil {
return false
}
return len(n.ReservedPorts)+len(n.DynamicPorts) > 0
}
// NodeDeviceResource captures a set of devices sharing a common // NodeDeviceResource captures a set of devices sharing a common
// vendor/type/device_name tuple. // vendor/type/device_name tuple.
type NodeDeviceResource struct { type NodeDeviceResource struct {

176
api/services.go Normal file
View File

@ -0,0 +1,176 @@
package api
import (
"fmt"
"time"
)
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int `mapstructure:"limit"`
Grace *time.Duration `mapstructure:"grace"`
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
}
// Canonicalize CheckRestart fields if not nil.
func (c *CheckRestart) Canonicalize() {
if c == nil {
return
}
if c.Grace == nil {
c.Grace = timeToPtr(1 * time.Second)
}
}
// Copy returns a copy of CheckRestart or nil if unset.
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
nc.Limit = c.Limit
if c.Grace != nil {
g := *c.Grace
nc.Grace = &g
}
nc.IgnoreWarnings = c.IgnoreWarnings
return nc
}
// Merge values from other CheckRestart over default values on this
// CheckRestart and return merged copy.
func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
if c == nil {
// Just return other
return o
}
nc := c.Copy()
if o == nil {
// Nothing to merge
return nc
}
if o.Limit > 0 {
nc.Limit = o.Limit
}
if o.Grace != nil {
nc.Grace = o.Grace
}
if o.IgnoreWarnings {
nc.IgnoreWarnings = o.IgnoreWarnings
}
return nc
}
// ServiceCheck represents the consul health check that Nomad registers.
type ServiceCheck struct {
//FIXME Id is unused. Remove?
Id string
Name string
Type string
Command string
Args []string
Path string
Protocol string
PortLabel string `mapstructure:"port"`
AddressMode string `mapstructure:"address_mode"`
Interval time.Duration
Timeout time.Duration
InitialStatus string `mapstructure:"initial_status"`
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
Header map[string][]string
Method string
CheckRestart *CheckRestart `mapstructure:"check_restart"`
GRPCService string `mapstructure:"grpc_service"`
GRPCUseTLS bool `mapstructure:"grpc_use_tls"`
TaskName string `mapstructure:"task"`
}
// Service represents a Consul service definition.
type Service struct {
//FIXME Id is unused. Remove?
Id string
Name string
Tags []string
CanaryTags []string `mapstructure:"canary_tags"`
PortLabel string `mapstructure:"port"`
AddressMode string `mapstructure:"address_mode"`
Checks []ServiceCheck
CheckRestart *CheckRestart `mapstructure:"check_restart"`
Connect *ConsulConnect
Meta map[string]string
}
// Canonicalize the Service by ensuring its name and address mode are set. Task
// will be nil for group services.
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
if s.Name == "" {
if t != nil {
s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name)
} else {
s.Name = fmt.Sprintf("%s-%s", *job.Name, *tg.Name)
}
}
// Default to AddressModeAuto
if s.AddressMode == "" {
s.AddressMode = "auto"
}
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
// into each check.
for i, check := range s.Checks {
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
s.Checks[i].CheckRestart.Canonicalize()
}
}
// ConsulConnect represents a Consul Connect jobspec stanza.
type ConsulConnect struct {
Native bool
SidecarService *ConsulSidecarService `mapstructure:"sidecar_service"`
SidecarTask *SidecarTask `mapstructure:"sidecar_task"`
}
// ConsulSidecarService represents a Consul Connect SidecarService jobspec
// stanza.
type ConsulSidecarService struct {
Port string
Proxy *ConsulProxy
}
// SidecarTask represents a subset of Task fields that can be set to override
// the fields of the Task generated for the sidecar
type SidecarTask struct {
Name string
Driver string
User string
Config map[string]interface{}
Env map[string]string
Resources *Resources
Meta map[string]string
KillTimeout *time.Duration `mapstructure:"kill_timeout"`
LogConfig *LogConfig `mapstructure:"logs"`
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"`
KillSignal string `mapstructure:"kill_signal"`
}
// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza.
type ConsulProxy struct {
Upstreams []*ConsulUpstream
Config map[string]interface{}
}
// ConsulUpstream represents a Consul Connect upstream jobspec stanza.
type ConsulUpstream struct {
DestinationName string `mapstructure:"destination_name"`
LocalBindPort int `mapstructure:"local_bind_port"`
}

56
api/services_test.go Normal file
View File

@ -0,0 +1,56 @@
package api
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
// inherited by Checks.
func TestService_CheckRestart(t *testing.T) {
job := &Job{Name: stringToPtr("job")}
tg := &TaskGroup{Name: stringToPtr("group")}
task := &Task{Name: "task"}
service := &Service{
CheckRestart: &CheckRestart{
Limit: 11,
Grace: timeToPtr(11 * time.Second),
IgnoreWarnings: true,
},
Checks: []ServiceCheck{
{
Name: "all-set",
CheckRestart: &CheckRestart{
Limit: 22,
Grace: timeToPtr(22 * time.Second),
IgnoreWarnings: true,
},
},
{
Name: "some-set",
CheckRestart: &CheckRestart{
Limit: 33,
Grace: timeToPtr(33 * time.Second),
},
},
{
Name: "unset",
},
},
}
service.Canonicalize(task, tg, job)
assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
}

View File

@ -274,124 +274,6 @@ func (s *Spread) Canonicalize() {
} }
} }
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int `mapstructure:"limit"`
Grace *time.Duration `mapstructure:"grace"`
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
}
// Canonicalize CheckRestart fields if not nil.
func (c *CheckRestart) Canonicalize() {
if c == nil {
return
}
if c.Grace == nil {
c.Grace = timeToPtr(1 * time.Second)
}
}
// Copy returns a copy of CheckRestart or nil if unset.
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
nc.Limit = c.Limit
if c.Grace != nil {
g := *c.Grace
nc.Grace = &g
}
nc.IgnoreWarnings = c.IgnoreWarnings
return nc
}
// Merge values from other CheckRestart over default values on this
// CheckRestart and return merged copy.
func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
if c == nil {
// Just return other
return o
}
nc := c.Copy()
if o == nil {
// Nothing to merge
return nc
}
if o.Limit > 0 {
nc.Limit = o.Limit
}
if o.Grace != nil {
nc.Grace = o.Grace
}
if o.IgnoreWarnings {
nc.IgnoreWarnings = o.IgnoreWarnings
}
return nc
}
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Id string
Name string
Type string
Command string
Args []string
Path string
Protocol string
PortLabel string `mapstructure:"port"`
AddressMode string `mapstructure:"address_mode"`
Interval time.Duration
Timeout time.Duration
InitialStatus string `mapstructure:"initial_status"`
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
Header map[string][]string
Method string
CheckRestart *CheckRestart `mapstructure:"check_restart"`
GRPCService string `mapstructure:"grpc_service"`
GRPCUseTLS bool `mapstructure:"grpc_use_tls"`
}
// The Service model represents a Consul service definition
type Service struct {
Id string
Name string
Tags []string
CanaryTags []string `mapstructure:"canary_tags"`
PortLabel string `mapstructure:"port"`
AddressMode string `mapstructure:"address_mode"`
Checks []ServiceCheck
CheckRestart *CheckRestart `mapstructure:"check_restart"`
}
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
if s.Name == "" {
s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name)
}
// Default to AddressModeAuto
if s.AddressMode == "" {
s.AddressMode = "auto"
}
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
// into each check.
for i, check := range s.Checks {
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
s.Checks[i].CheckRestart.Canonicalize()
}
}
// EphemeralDisk is an ephemeral disk object // EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct { type EphemeralDisk struct {
Sticky *bool Sticky *bool
@ -480,6 +362,23 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy {
return nm return nm
} }
// VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use.
type VolumeRequest struct {
Name string
Type string
ReadOnly bool `mapstructure:"read_only"`
Config map[string]interface{}
}
// VolumeMount represents the relationship between a destination path in a task
// and the task group volume that should be mounted there.
type VolumeMount struct {
Volume string
Destination string
ReadOnly bool `mapstructure:"read_only"`
}
// TaskGroup is the unit of scheduling. // TaskGroup is the unit of scheduling.
type TaskGroup struct { type TaskGroup struct {
Name *string Name *string
@ -488,12 +387,15 @@ type TaskGroup struct {
Affinities []*Affinity Affinities []*Affinity
Tasks []*Task Tasks []*Task
Spreads []*Spread Spreads []*Spread
Volumes map[string]*VolumeRequest
RestartPolicy *RestartPolicy RestartPolicy *RestartPolicy
ReschedulePolicy *ReschedulePolicy ReschedulePolicy *ReschedulePolicy
EphemeralDisk *EphemeralDisk EphemeralDisk *EphemeralDisk
Update *UpdateStrategy Update *UpdateStrategy
Migrate *MigrateStrategy Migrate *MigrateStrategy
Networks []*NetworkResource
Meta map[string]string Meta map[string]string
Services []*Service
} }
// NewTaskGroup creates a new TaskGroup. // NewTaskGroup creates a new TaskGroup.
@ -604,6 +506,12 @@ func (g *TaskGroup) Canonicalize(job *Job) {
for _, a := range g.Affinities { for _, a := range g.Affinities {
a.Canonicalize() a.Canonicalize()
} }
for _, n := range g.Networks {
n.Canonicalize()
}
for _, s := range g.Services {
s.Canonicalize(nil, g, job)
}
} }
// Constrain is used to add a constraint to a task group. // Constrain is used to add a constraint to a task group.
@ -690,9 +598,11 @@ type Task struct {
Vault *Vault Vault *Vault
Templates []*Template Templates []*Template
DispatchPayload *DispatchPayloadConfig DispatchPayload *DispatchPayloadConfig
VolumeMounts []*VolumeMount
Leader bool Leader bool
ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` ShutdownDelay time.Duration `mapstructure:"shutdown_delay"`
KillSignal string `mapstructure:"kill_signal"` KillSignal string `mapstructure:"kill_signal"`
Kind string
} }
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {

View File

@ -269,7 +269,7 @@ func TestTask_Require(t *testing.T) {
{ {
CIDR: "0.0.0.0/0", CIDR: "0.0.0.0/0",
MBits: intToPtr(100), MBits: intToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}}, ReservedPorts: []Port{{"", 80, 0}, {"", 443, 0}},
}, },
}, },
} }
@ -577,54 +577,6 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
} }
} }
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
// inherited by Checks.
func TestService_CheckRestart(t *testing.T) {
job := &Job{Name: stringToPtr("job")}
tg := &TaskGroup{Name: stringToPtr("group")}
task := &Task{Name: "task"}
service := &Service{
CheckRestart: &CheckRestart{
Limit: 11,
Grace: timeToPtr(11 * time.Second),
IgnoreWarnings: true,
},
Checks: []ServiceCheck{
{
Name: "all-set",
CheckRestart: &CheckRestart{
Limit: 22,
Grace: timeToPtr(22 * time.Second),
IgnoreWarnings: true,
},
},
{
Name: "some-set",
CheckRestart: &CheckRestart{
Limit: 33,
Grace: timeToPtr(33 * time.Second),
},
},
{
Name: "unset",
},
},
}
service.Canonicalize(task, tg, job)
assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
}
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly // TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
func TestSpread_Canonicalize(t *testing.T) { func TestSpread_Canonicalize(t *testing.T) {
job := &Job{ job := &Job{

View File

@ -17,6 +17,19 @@ install:
- cmd: docker info - cmd: docker info
- cmd: docker run --rm dantoml/busybox-windows:08012019 echo hi there - cmd: docker run --rm dantoml/busybox-windows:08012019 echo hi there
- cmd: |
cd C:\go
del /F/Q/S *.* > NUL
cd %APPVEYOR_BUILD_FOLDER%
rmdir /Q/S C:\go
# install go 1.12.9 to match version used for cutting a release
- cmd: |
mkdir c:\go
appveyor DownloadFile "https://dl.google.com/go/go1.12.9.windows-amd64.zip" -FileName "%TEMP%\\go.zip"
- ps: Expand-Archive $Env:TEMP\go.zip -DestinationPath C:\
- cmd: set PATH=%GOBIN%;c:\go\bin;%PATH% - cmd: set PATH=%GOBIN%;c:\go\bin;%PATH%
- cmd: echo %Path% - cmd: echo %Path%
- cmd: go version - cmd: go version

View File

@ -60,6 +60,10 @@ var (
// TaskDirs is the set of directories created in each tasks directory. // TaskDirs is the set of directories created in each tasks directory.
TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | 0777} TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | 0777}
// AllocGRPCSocket is the path relative to the task dir root for the
// unix socket connected to Consul's gRPC endpoint.
AllocGRPCSocket = filepath.Join(TmpDirName, "consul_grpc.sock")
) )
// AllocDir allows creating, destroying, and accessing an allocation's // AllocDir allows creating, destroying, and accessing an allocation's

View File

@ -238,7 +238,12 @@ func (t *Tracker) watchTaskEvents() {
// Store the task states // Store the task states
t.l.Lock() t.l.Lock()
for task, state := range alloc.TaskStates { for task, state := range alloc.TaskStates {
t.taskHealth[task].state = state //TODO(schmichael) for now skip unknown tasks as
//they're task group services which don't currently
//support checks anyway
if v, ok := t.taskHealth[task]; ok {
v.state = state
}
} }
t.l.Unlock() t.l.Unlock()
@ -355,7 +360,12 @@ OUTER:
// Store the task registrations // Store the task registrations
t.l.Lock() t.l.Lock()
for task, reg := range allocReg.Tasks { for task, reg := range allocReg.Tasks {
t.taskHealth[task].taskRegistrations = reg //TODO(schmichael) for now skip unknown tasks as
//they're task group services which don't currently
//support checks anyway
if v, ok := t.taskHealth[task]; ok {
v.taskRegistrations = reg
}
} }
t.l.Unlock() t.l.Unlock()

View File

@ -185,7 +185,9 @@ func NewAllocRunner(config *Config) (*allocRunner, error) {
ar.allocDir = allocdir.NewAllocDir(ar.logger, filepath.Join(config.ClientConfig.AllocDir, alloc.ID)) ar.allocDir = allocdir.NewAllocDir(ar.logger, filepath.Join(config.ClientConfig.AllocDir, alloc.ID))
// Initialize the runners hooks. // Initialize the runners hooks.
ar.initRunnerHooks() if err := ar.initRunnerHooks(config.ClientConfig); err != nil {
return nil, err
}
// Create the TaskRunners // Create the TaskRunners
if err := ar.initTaskRunners(tg.Tasks); err != nil { if err := ar.initTaskRunners(tg.Tasks); err != nil {
@ -763,14 +765,15 @@ func (ar *allocRunner) destroyImpl() {
// state if Run() ran at all. // state if Run() ran at all.
<-ar.taskStateUpdateHandlerCh <-ar.taskStateUpdateHandlerCh
// Cleanup state db // Mark alloc as destroyed
ar.destroyedLock.Lock()
// Cleanup state db; while holding the lock to avoid
// a race periodic PersistState that may resurrect the alloc
if err := ar.stateDB.DeleteAllocationBucket(ar.id); err != nil { if err := ar.stateDB.DeleteAllocationBucket(ar.id); err != nil {
ar.logger.Warn("failed to delete allocation state", "error", err) ar.logger.Warn("failed to delete allocation state", "error", err)
} }
// Mark alloc as destroyed
ar.destroyedLock.Lock()
if !ar.shutdown { if !ar.shutdown {
ar.shutdown = true ar.shutdown = true
close(ar.shutdownCh) close(ar.shutdownCh)
@ -782,6 +785,24 @@ func (ar *allocRunner) destroyImpl() {
ar.destroyedLock.Unlock() ar.destroyedLock.Unlock()
} }
func (ar *allocRunner) PersistState() error {
ar.destroyedLock.Lock()
defer ar.destroyedLock.Unlock()
if ar.destroyed {
err := ar.stateDB.DeleteAllocationBucket(ar.id)
if err != nil {
ar.logger.Warn("failed to delete allocation bucket", "error", err)
}
return nil
}
// TODO: consider persisting deployment state along with task status.
// While we study why only the alloc is persisted, I opted to maintain current
// behavior and not risk adding yet more IO calls unnecessarily.
return ar.stateDB.PutAllocation(ar.Alloc())
}
// Destroy the alloc runner by stopping it if it is still running and cleaning // Destroy the alloc runner by stopping it if it is still running and cleaning
// up all of its resources. // up all of its resources.
// //

View File

@ -6,9 +6,28 @@ import (
multierror "github.com/hashicorp/go-multierror" multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/allocrunner/interfaces"
clientconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
) )
type networkIsolationSetter interface {
SetNetworkIsolation(*drivers.NetworkIsolationSpec)
}
// allocNetworkIsolationSetter is a shim to allow the alloc network hook to
// set the alloc network isolation configuration without full access
// to the alloc runner
type allocNetworkIsolationSetter struct {
ar *allocRunner
}
func (a *allocNetworkIsolationSetter) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) {
for _, tr := range a.ar.tasks {
tr.SetNetworkIsolation(n)
}
}
// allocHealthSetter is a shim to allow the alloc health watcher hook to set // allocHealthSetter is a shim to allow the alloc health watcher hook to set
// and clear the alloc health without full access to the alloc runner state // and clear the alloc health without full access to the alloc runner state
type allocHealthSetter struct { type allocHealthSetter struct {
@ -76,12 +95,24 @@ func (a *allocHealthSetter) SetHealth(healthy, isDeploy bool, trackerTaskEvents
} }
// initRunnerHooks intializes the runners hooks. // initRunnerHooks intializes the runners hooks.
func (ar *allocRunner) initRunnerHooks() { func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error {
hookLogger := ar.logger.Named("runner_hook") hookLogger := ar.logger.Named("runner_hook")
// create health setting shim // create health setting shim
hs := &allocHealthSetter{ar} hs := &allocHealthSetter{ar}
// create network isolation setting shim
ns := &allocNetworkIsolationSetter{ar: ar}
// build the network manager
nm, err := newNetworkManager(ar.Alloc(), ar.driverManager)
if err != nil {
return fmt.Errorf("failed to configure network manager: %v", err)
}
// create network configurator
nc := newNetworkConfigurator(hookLogger, ar.Alloc(), config)
// Create the alloc directory hook. This is run first to ensure the // Create the alloc directory hook. This is run first to ensure the
// directory path exists for other hooks. // directory path exists for other hooks.
ar.runnerHooks = []interfaces.RunnerHook{ ar.runnerHooks = []interfaces.RunnerHook{
@ -89,7 +120,11 @@ func (ar *allocRunner) initRunnerHooks() {
newUpstreamAllocsHook(hookLogger, ar.prevAllocWatcher), newUpstreamAllocsHook(hookLogger, ar.prevAllocWatcher),
newDiskMigrationHook(hookLogger, ar.prevAllocMigrator, ar.allocDir), newDiskMigrationHook(hookLogger, ar.prevAllocMigrator, ar.allocDir),
newAllocHealthWatcherHook(hookLogger, ar.Alloc(), hs, ar.Listener(), ar.consulClient), newAllocHealthWatcherHook(hookLogger, ar.Alloc(), hs, ar.Listener(), ar.consulClient),
newNetworkHook(hookLogger, ns, ar.Alloc(), nm, nc),
newGroupServiceHook(hookLogger, ar.Alloc(), ar.consulClient),
} }
return nil
} }
// prerun is used to run the runners prerun hooks. // prerun is used to run the runners prerun hooks.

View File

@ -1001,3 +1001,61 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
require.Fail(t, "err: %v", err) require.Fail(t, "err: %v", err)
}) })
} }
// TestAllocRunner_PersistState_Destroyed asserts that destroyed allocs don't persist anymore
func TestAllocRunner_PersistState_Destroyed(t *testing.T) {
t.Parallel()
alloc := mock.BatchAlloc()
taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name
conf, cleanup := testAllocRunnerConfig(t, alloc)
conf.StateDB = state.NewMemDB(conf.Logger)
defer cleanup()
ar, err := NewAllocRunner(conf)
require.NoError(t, err)
defer destroy(ar)
go ar.Run()
select {
case <-ar.WaitCh():
case <-time.After(10 * time.Second):
require.Fail(t, "timed out waiting for alloc to complete")
}
// test final persisted state upon completion
require.NoError(t, ar.PersistState())
allocs, _, err := conf.StateDB.GetAllAllocations()
require.NoError(t, err)
require.Len(t, allocs, 1)
require.Equal(t, alloc.ID, allocs[0].ID)
_, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, taskName)
require.NoError(t, err)
require.Equal(t, structs.TaskStateDead, ts.State)
// check that DB alloc is empty after destroying AR
ar.Destroy()
select {
case <-ar.DestroyCh():
case <-time.After(10 * time.Second):
require.Fail(t, "timedout waiting for destruction")
}
allocs, _, err = conf.StateDB.GetAllAllocations()
require.NoError(t, err)
require.Empty(t, allocs)
_, ts, err = conf.StateDB.GetTaskRunnerState(alloc.ID, taskName)
require.NoError(t, err)
require.Nil(t, ts)
// check that DB alloc is empty after persisting state of destroyed AR
ar.PersistState()
allocs, _, err = conf.StateDB.GetAllAllocations()
require.NoError(t, err)
require.Empty(t, allocs)
_, ts, err = conf.StateDB.GetTaskRunnerState(alloc.ID, taskName)
require.NoError(t, err)
require.Nil(t, ts)
}

View File

@ -117,11 +117,13 @@ func TestAllocRunner_Restore_RunningTerminal(t *testing.T) {
// 2 removals (canary+noncanary) during prekill // 2 removals (canary+noncanary) during prekill
// 2 removals (canary+noncanary) during exited // 2 removals (canary+noncanary) during exited
// 2 removals (canary+noncanary) during stop // 2 removals (canary+noncanary) during stop
// 1 remove group during stop
consulOps := conf2.Consul.(*consul.MockConsulServiceClient).GetOps() consulOps := conf2.Consul.(*consul.MockConsulServiceClient).GetOps()
require.Len(t, consulOps, 6) require.Len(t, consulOps, 7)
for _, op := range consulOps { for _, op := range consulOps[:6] {
require.Equal(t, "remove", op.Op) require.Equal(t, "remove", op.Op)
} }
require.Equal(t, "remove_group", consulOps[6].Op)
// Assert terminated task event was emitted // Assert terminated task event was emitted
events := ar2.AllocState().TaskStates[task.Name].Events events := ar2.AllocState().TaskStates[task.Name].Events

View File

@ -0,0 +1,66 @@
package allocrunner
import (
"sync"
hclog "github.com/hashicorp/go-hclog"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/nomad/structs"
)
// groupServiceHook manages task group Consul service registration and
// deregistration.
type groupServiceHook struct {
alloc *structs.Allocation
consulClient consul.ConsulServiceAPI
prerun bool
mu sync.Mutex
logger log.Logger
}
func newGroupServiceHook(logger hclog.Logger, alloc *structs.Allocation, consulClient consul.ConsulServiceAPI) *groupServiceHook {
h := &groupServiceHook{
alloc: alloc,
consulClient: consulClient,
}
h.logger = logger.Named(h.Name())
return h
}
func (*groupServiceHook) Name() string {
return "group_services"
}
func (h *groupServiceHook) Prerun() error {
h.mu.Lock()
defer func() {
// Mark prerun as true to unblock Updates
h.prerun = true
h.mu.Unlock()
}()
return h.consulClient.RegisterGroup(h.alloc)
}
func (h *groupServiceHook) Update(req *interfaces.RunnerUpdateRequest) error {
h.mu.Lock()
defer h.mu.Unlock()
oldAlloc := h.alloc
h.alloc = req.Alloc
if !h.prerun {
// Update called before Prerun. Update alloc and exit to allow
// Prerun to do initial registration.
return nil
}
return h.consulClient.UpdateGroup(oldAlloc, h.alloc)
}
func (h *groupServiceHook) Postrun() error {
h.mu.Lock()
defer h.mu.Unlock()
return h.consulClient.RemoveGroup(h.alloc)
}

View File

@ -0,0 +1,119 @@
package allocrunner
import (
"testing"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/client/consul"
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
var _ interfaces.RunnerPrerunHook = (*groupServiceHook)(nil)
var _ interfaces.RunnerUpdateHook = (*groupServiceHook)(nil)
var _ interfaces.RunnerPostrunHook = (*groupServiceHook)(nil)
// TestGroupServiceHook_NoGroupServices asserts calling group service hooks
// without group services does not error.
func TestGroupServiceHook_NoGroupServices(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
logger := testlog.HCLogger(t)
consulClient := consul.NewMockConsulServiceClient(t, logger)
h := newGroupServiceHook(logger, alloc, consulClient)
require.NoError(t, h.Prerun())
req := &interfaces.RunnerUpdateRequest{Alloc: alloc}
require.NoError(t, h.Update(req))
require.NoError(t, h.Postrun())
ops := consulClient.GetOps()
require.Len(t, ops, 3)
require.Equal(t, "add_group", ops[0].Op)
require.Equal(t, "update_group", ops[1].Op)
require.Equal(t, "remove_group", ops[2].Op)
}
// TestGroupServiceHook_GroupServices asserts group service hooks with group
// services does not error.
func TestGroupServiceHook_GroupServices(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
{
Mode: "bridge",
IP: "10.0.0.1",
DynamicPorts: []structs.Port{
{
Label: "connect-proxy-testconnect",
Value: 9999,
To: 9998,
},
},
},
}
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
tg.Services = []*structs.Service{
{
Name: "testconnect",
PortLabel: "9999",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
logger := testlog.HCLogger(t)
consulClient := consul.NewMockConsulServiceClient(t, logger)
h := newGroupServiceHook(logger, alloc, consulClient)
require.NoError(t, h.Prerun())
req := &interfaces.RunnerUpdateRequest{Alloc: alloc}
require.NoError(t, h.Update(req))
require.NoError(t, h.Postrun())
ops := consulClient.GetOps()
require.Len(t, ops, 3)
require.Equal(t, "add_group", ops[0].Op)
require.Equal(t, "update_group", ops[1].Op)
require.Equal(t, "remove_group", ops[2].Op)
}
// TestGroupServiceHook_Error asserts group service hooks with group
// services but no group network returns an error.
func TestGroupServiceHook_Error(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
tg.Services = []*structs.Service{
{
Name: "testconnect",
PortLabel: "9999",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
logger := testlog.HCLogger(t)
// No need to set Consul client or call Run. This hould fail before
// attempting to register.
consulClient := agentconsul.NewServiceClient(nil, logger, false)
h := newGroupServiceHook(logger, alloc, consulClient)
require.Error(t, h.Prerun())
req := &interfaces.RunnerUpdateRequest{Alloc: alloc}
require.Error(t, h.Update(req))
require.Error(t, h.Postrun())
}

View File

@ -0,0 +1,88 @@
package allocrunner
import (
"fmt"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
// networkHook is an alloc lifecycle hook that manages the network namespace
// for an alloc
type networkHook struct {
// setter is a callback to set the network isolation spec when after the
// network is created
setter networkIsolationSetter
// manager is used when creating the network namespace. This defaults to
// bind mounting a network namespace descritor under /var/run/netns but
// can be created by a driver if nessicary
manager drivers.DriverNetworkManager
// alloc should only be read from
alloc *structs.Allocation
// spec described the network namespace and is syncronized by specLock
spec *drivers.NetworkIsolationSpec
// networkConfigurator configures the network interfaces, routes, etc once
// the alloc network has been created
networkConfigurator NetworkConfigurator
logger hclog.Logger
}
func newNetworkHook(logger hclog.Logger, ns networkIsolationSetter,
alloc *structs.Allocation, netManager drivers.DriverNetworkManager,
netConfigurator NetworkConfigurator) *networkHook {
return &networkHook{
setter: ns,
alloc: alloc,
manager: netManager,
networkConfigurator: netConfigurator,
logger: logger,
}
}
func (h *networkHook) Name() string {
return "network"
}
func (h *networkHook) Prerun() error {
tg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup)
if len(tg.Networks) == 0 || tg.Networks[0].Mode == "host" || tg.Networks[0].Mode == "" {
return nil
}
if h.manager == nil || h.networkConfigurator == nil {
h.logger.Trace("shared network namespaces are not supported on this platform, skipping network hook")
return nil
}
spec, err := h.manager.CreateNetwork(h.alloc.ID)
if err != nil {
return fmt.Errorf("failed to create network for alloc: %v", err)
}
if spec != nil {
h.spec = spec
h.setter.SetNetworkIsolation(spec)
}
if err := h.networkConfigurator.Setup(h.alloc, spec); err != nil {
return fmt.Errorf("failed to configure networking for alloc: %v", err)
}
return nil
}
func (h *networkHook) Postrun() error {
if h.spec == nil {
return nil
}
if err := h.networkConfigurator.Teardown(h.alloc, h.spec); err != nil {
h.logger.Error("failed to cleanup network for allocation, resources may have leaked", "alloc", h.alloc.ID, "error", err)
}
return h.manager.DestroyNetwork(h.alloc.ID, h.spec)
}

View File

@ -0,0 +1,86 @@
package allocrunner
import (
"testing"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
"github.com/hashicorp/nomad/plugins/drivers/testutils"
"github.com/stretchr/testify/require"
)
// statically assert network hook implements the expected interfaces
var _ interfaces.RunnerPrerunHook = (*networkHook)(nil)
var _ interfaces.RunnerPostrunHook = (*networkHook)(nil)
type mockNetworkIsolationSetter struct {
t *testing.T
expectedSpec *drivers.NetworkIsolationSpec
called bool
}
func (m *mockNetworkIsolationSetter) SetNetworkIsolation(spec *drivers.NetworkIsolationSpec) {
m.called = true
require.Exactly(m.t, m.expectedSpec, spec)
}
// Test that the prerun and postrun hooks call the setter with the expected spec when
// the network mode is not host
func TestNetworkHook_Prerun_Postrun(t *testing.T) {
alloc := mock.Alloc()
alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{
{
Mode: "bridge",
},
}
spec := &drivers.NetworkIsolationSpec{
Mode: drivers.NetIsolationModeGroup,
Path: "test",
Labels: map[string]string{"abc": "123"},
}
destroyCalled := false
nm := &testutils.MockDriver{
MockNetworkManager: testutils.MockNetworkManager{
CreateNetworkF: func(allocID string) (*drivers.NetworkIsolationSpec, error) {
require.Equal(t, alloc.ID, allocID)
return spec, nil
},
DestroyNetworkF: func(allocID string, netSpec *drivers.NetworkIsolationSpec) error {
destroyCalled = true
require.Equal(t, alloc.ID, allocID)
require.Exactly(t, spec, netSpec)
return nil
},
},
}
setter := &mockNetworkIsolationSetter{
t: t,
expectedSpec: spec,
}
require := require.New(t)
logger := testlog.HCLogger(t)
hook := newNetworkHook(logger, setter, alloc, nm, &hostNetworkConfigurator{})
require.NoError(hook.Prerun())
require.True(setter.called)
require.False(destroyCalled)
require.NoError(hook.Postrun())
require.True(destroyCalled)
// reset and use host network mode
setter.called = false
destroyCalled = false
alloc.Job.TaskGroups[0].Networks[0].Mode = "host"
hook = newNetworkHook(logger, setter, alloc, nm, &hostNetworkConfigurator{})
require.NoError(hook.Prerun())
require.False(setter.called)
require.False(destroyCalled)
require.NoError(hook.Postrun())
require.False(destroyCalled)
}

View File

@ -0,0 +1,139 @@
package allocrunner
import (
"context"
"fmt"
"strings"
hclog "github.com/hashicorp/go-hclog"
clientconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/lib/nsutil"
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
func newNetworkManager(alloc *structs.Allocation, driverManager drivermanager.Manager) (nm drivers.DriverNetworkManager, err error) {
// The defaultNetworkManager is used if a driver doesn't need to create the network
nm = &defaultNetworkManager{}
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
// default netmode to host, this can be overridden by the task or task group
tgNetMode := "host"
if len(tg.Networks) > 0 && tg.Networks[0].Mode != "" {
tgNetMode = tg.Networks[0].Mode
}
// networkInitiator tracks the task driver which needs to create the network
// to check for multiple drivers needing the create the network
var networkInitiator string
// driverCaps tracks which drivers we've checked capabilities for so as not
// to do extra work
driverCaps := make(map[string]struct{})
for _, task := range tg.Tasks {
// the task's netmode defaults to the the task group but can be overridden
taskNetMode := tgNetMode
if len(task.Resources.Networks) > 0 && task.Resources.Networks[0].Mode != "" {
taskNetMode = task.Resources.Networks[0].Mode
}
// netmode host should always work to support backwards compat
if taskNetMode == "host" {
continue
}
// check to see if capabilities of this task's driver have already been checked
if _, ok := driverCaps[task.Driver]; ok {
continue
}
driver, err := driverManager.Dispense(task.Driver)
if err != nil {
return nil, fmt.Errorf("failed to dispense driver %s: %v", task.Driver, err)
}
caps, err := driver.Capabilities()
if err != nil {
return nil, fmt.Errorf("failed to retrive capabilities for driver %s: %v",
task.Driver, err)
}
// check that the driver supports the requested network isolation mode
netIsolationMode := netModeToIsolationMode(taskNetMode)
if !caps.HasNetIsolationMode(netIsolationMode) {
return nil, fmt.Errorf("task %s does not support %q networking mode", task.Name, taskNetMode)
}
// check if the driver needs to create the network and if a different
// driver has already claimed it needs to initiate the network
if caps.MustInitiateNetwork {
if networkInitiator != "" {
return nil, fmt.Errorf("tasks %s and %s want to initiate networking but only one driver can do so", networkInitiator, task.Name)
}
netManager, ok := driver.(drivers.DriverNetworkManager)
if !ok {
return nil, fmt.Errorf("driver %s does not implement network management RPCs", task.Driver)
}
nm = netManager
networkInitiator = task.Name
}
// mark this driver's capabilities as checked
driverCaps[task.Driver] = struct{}{}
}
return nm, nil
}
// defaultNetworkManager creates a network namespace for the alloc
type defaultNetworkManager struct{}
func (*defaultNetworkManager) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
netns, err := nsutil.NewNS(allocID)
if err != nil {
return nil, err
}
spec := &drivers.NetworkIsolationSpec{
Mode: drivers.NetIsolationModeGroup,
Path: netns.Path(),
Labels: make(map[string]string),
}
return spec, nil
}
func (*defaultNetworkManager) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
return nsutil.UnmountNS(spec.Path)
}
func netModeToIsolationMode(netMode string) drivers.NetIsolationMode {
switch strings.ToLower(netMode) {
case "host":
return drivers.NetIsolationModeHost
case "bridge", "none":
return drivers.NetIsolationModeGroup
case "driver":
return drivers.NetIsolationModeTask
default:
return drivers.NetIsolationModeHost
}
}
func newNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, config *clientconfig.Config) NetworkConfigurator {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
// Check if network stanza is given
if len(tg.Networks) == 0 {
return &hostNetworkConfigurator{}
}
switch strings.ToLower(tg.Networks[0].Mode) {
case "bridge":
return newBridgeNetworkConfigurator(log, context.Background(), config.BridgeNetworkName, config.BridgeNetworkAllocSubnet, config.CNIPath)
default:
return &hostNetworkConfigurator{}
}
}

View File

@ -0,0 +1,190 @@
package allocrunner
import (
"testing"
"github.com/hashicorp/nomad/client/pluginmanager"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
"github.com/hashicorp/nomad/plugins/drivers/testutils"
"github.com/stretchr/testify/require"
)
var mockDrivers = map[string]drivers.DriverPlugin{
"hostonly": &testutils.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
NetIsolationModes: []drivers.NetIsolationMode{drivers.NetIsolationModeHost},
}, nil
},
},
"group1": &testutils.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
}, nil
},
},
"group2": &testutils.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
}, nil
},
},
"mustinit1": &testutils.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
MustInitiateNetwork: true,
}, nil
},
},
"mustinit2": &testutils.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup},
MustInitiateNetwork: true,
}, nil
},
},
}
type mockDriverManager struct {
pluginmanager.MockPluginManager
}
func (m *mockDriverManager) Dispense(driver string) (drivers.DriverPlugin, error) {
return mockDrivers[driver], nil
}
func TestNewNetworkManager(t *testing.T) {
for _, tc := range []struct {
name string
alloc *structs.Allocation
err bool
mustInit bool
errContains string
}{
{
name: "defaults/backwards compat",
alloc: &structs.Allocation{
TaskGroup: "group",
Job: &structs.Job{
TaskGroups: []*structs.TaskGroup{
{
Name: "group",
Networks: []*structs.NetworkResource{},
Tasks: []*structs.Task{
{
Name: "task1",
Driver: "group1",
Resources: &structs.Resources{},
},
{
Name: "task2",
Driver: "group2",
Resources: &structs.Resources{},
},
{
Name: "task3",
Driver: "mustinit1",
Resources: &structs.Resources{},
},
},
},
},
},
},
},
{
name: "driver /w must init network",
alloc: &structs.Allocation{
TaskGroup: "group",
Job: &structs.Job{
TaskGroups: []*structs.TaskGroup{
{
Name: "group",
Networks: []*structs.NetworkResource{
{
Mode: "bridge",
},
},
Tasks: []*structs.Task{
{
Name: "task1",
Driver: "group1",
Resources: &structs.Resources{},
},
{
Name: "task2",
Driver: "mustinit2",
Resources: &structs.Resources{},
},
},
},
},
},
},
mustInit: true,
},
{
name: "multiple mustinit",
alloc: &structs.Allocation{
TaskGroup: "group",
Job: &structs.Job{
TaskGroups: []*structs.TaskGroup{
{
Name: "group",
Networks: []*structs.NetworkResource{
{
Mode: "bridge",
},
},
Tasks: []*structs.Task{
{
Name: "task1",
Driver: "mustinit1",
Resources: &structs.Resources{},
},
{
Name: "task2",
Driver: "mustinit2",
Resources: &structs.Resources{},
},
},
},
},
},
},
err: true,
errContains: "want to initiate networking but only one",
},
} {
t.Run(tc.name, func(t *testing.T) {
require := require.New(t)
nm, err := newNetworkManager(tc.alloc, &mockDriverManager{})
if tc.err {
require.Error(err)
require.Contains(err.Error(), tc.errContains)
} else {
require.NoError(err)
}
if tc.mustInit {
_, ok := nm.(*testutils.MockDriver)
require.True(ok)
} else if tc.err {
require.Nil(nm)
} else {
_, ok := nm.(*defaultNetworkManager)
require.True(ok)
}
})
}
}

View File

@ -0,0 +1,20 @@
//+build !linux
package allocrunner
import (
hclog "github.com/hashicorp/go-hclog"
clientconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
// TODO: Support windows shared networking
func newNetworkManager(alloc *structs.Allocation, driverManager drivermanager.Manager) (nm drivers.DriverNetworkManager, err error) {
return nil, nil
}
func newNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, config *clientconfig.Config) NetworkConfigurator {
return &hostNetworkConfigurator{}
}

View File

@ -0,0 +1,25 @@
package allocrunner
import (
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
// NetworkConfigurator sets up and tears down the interfaces, routes, firewall
// rules, etc for the configured networking mode of the allocation.
type NetworkConfigurator interface {
Setup(*structs.Allocation, *drivers.NetworkIsolationSpec) error
Teardown(*structs.Allocation, *drivers.NetworkIsolationSpec) error
}
// hostNetworkConfigurator is a noop implementation of a NetworkConfigurator for
// when the alloc join's a client host's network namespace and thus does not
// require further configuration
type hostNetworkConfigurator struct{}
func (h *hostNetworkConfigurator) Setup(*structs.Allocation, *drivers.NetworkIsolationSpec) error {
return nil
}
func (h *hostNetworkConfigurator) Teardown(*structs.Allocation, *drivers.NetworkIsolationSpec) error {
return nil
}

View File

@ -0,0 +1,272 @@
package allocrunner
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/coreos/go-iptables/iptables"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
const (
// envCNIPath is the environment variable name to use to derive the CNI path
// when it is not explicitly set by the client
envCNIPath = "CNI_PATH"
// defaultCNIPath is the CNI path to use when it is not set by the client
// and is not set by environment variable
defaultCNIPath = "/opt/cni/bin"
// defaultNomadBridgeName is the name of the bridge to use when not set by
// the client
defaultNomadBridgeName = "nomad"
// bridgeNetworkAllocIfName is the name that is set for the interface created
// inside of the alloc network which is connected to the bridge
bridgeNetworkContainerIfName = "eth0"
// defaultNomadAllocSubnet is the subnet to use for host local ip address
// allocation when not specified by the client
defaultNomadAllocSubnet = "172.26.64.0/20" // end 172.26.79.255
// cniAdminChainName is the name of the admin iptables chain used to allow
// forwarding traffic to allocations
cniAdminChainName = "NOMAD-ADMIN"
)
// bridgeNetworkConfigurator is a NetworkConfigurator which adds the alloc to a
// shared bridge, configures masquerading for egress traffic and port mapping
// for ingress
type bridgeNetworkConfigurator struct {
ctx context.Context
cniConfig *libcni.CNIConfig
allocSubnet string
bridgeName string
rand *rand.Rand
logger hclog.Logger
}
func newBridgeNetworkConfigurator(log hclog.Logger, ctx context.Context, bridgeName, ipRange, cniPath string) *bridgeNetworkConfigurator {
b := &bridgeNetworkConfigurator{
ctx: ctx,
bridgeName: bridgeName,
allocSubnet: ipRange,
rand: rand.New(rand.NewSource(time.Now().Unix())),
logger: log,
}
if cniPath == "" {
if cniPath = os.Getenv(envCNIPath); cniPath == "" {
cniPath = defaultCNIPath
}
}
b.cniConfig = libcni.NewCNIConfig(filepath.SplitList(cniPath), nil)
if b.bridgeName == "" {
b.bridgeName = defaultNomadBridgeName
}
if b.allocSubnet == "" {
b.allocSubnet = defaultNomadAllocSubnet
}
return b
}
// ensureForwardingRules ensures that a forwarding rule is added to iptables
// to allow traffic inbound to the bridge network
// // ensureForwardingRules ensures that a forwarding rule is added to iptables
// to allow traffic inbound to the bridge network
func (b *bridgeNetworkConfigurator) ensureForwardingRules() error {
ipt, err := iptables.New()
if err != nil {
return err
}
if err = ensureChain(ipt, "filter", cniAdminChainName); err != nil {
return err
}
if err := ensureFirstChainRule(ipt, cniAdminChainName, b.generateAdminChainRule()); err != nil {
return err
}
return nil
}
// ensureChain ensures that the given chain exists, creating it if missing
func ensureChain(ipt *iptables.IPTables, table, chain string) error {
chains, err := ipt.ListChains(table)
if err != nil {
return fmt.Errorf("failed to list iptables chains: %v", err)
}
for _, ch := range chains {
if ch == chain {
return nil
}
}
err = ipt.NewChain(table, chain)
// if err is for chain already existing return as it is possible another
// goroutine created it first
if e, ok := err.(*iptables.Error); ok && e.ExitStatus() == 1 {
return nil
}
return err
}
// ensureFirstChainRule ensures the given rule exists as the first rule in the chain
func ensureFirstChainRule(ipt *iptables.IPTables, chain string, rule []string) error {
exists, err := ipt.Exists("filter", chain, rule...)
if !exists && err == nil {
// iptables rules are 1-indexed
err = ipt.Insert("filter", chain, 1, rule...)
}
return err
}
// generateAdminChainRule builds the iptables rule that is inserted into the
// CNI admin chain to ensure traffic forwarding to the bridge network
func (b *bridgeNetworkConfigurator) generateAdminChainRule() []string {
return []string{"-o", b.bridgeName, "-d", b.allocSubnet, "-j", "ACCEPT"}
}
// Setup calls the CNI plugins with the add action
func (b *bridgeNetworkConfigurator) Setup(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) error {
if err := b.ensureForwardingRules(); err != nil {
return fmt.Errorf("failed to initialize table forwarding rules: %v", err)
}
netconf, err := b.buildNomadNetConfig()
if err != nil {
return err
}
// Depending on the version of bridge cni plugin used, a known race could occure
// where two alloc attempt to create the nomad bridge at the same time, resulting
// in one of them to fail. This rety attempts to overcome any
const retry = 3
for attempt := 1; ; attempt++ {
result, err := b.cniConfig.AddNetworkList(b.ctx, netconf, b.runtimeConf(alloc, spec))
if err == nil {
break
}
b.logger.Warn("failed to configure bridge network", "err", err, "result", result.String(), "attempt", attempt)
if attempt == retry {
return err
}
// Sleep for 1 second + jitter
time.Sleep(time.Second + (time.Duration(b.rand.Int63n(1000)) * time.Millisecond))
}
return nil
}
// Teardown calls the CNI plugins with the delete action
func (b *bridgeNetworkConfigurator) Teardown(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) error {
netconf, err := b.buildNomadNetConfig()
if err != nil {
return err
}
err = b.cniConfig.DelNetworkList(b.ctx, netconf, b.runtimeConf(alloc, spec))
return err
}
// getPortMapping builds a list of portMapping structs that are used as the
// portmapping capability arguments for the portmap CNI plugin
func getPortMapping(alloc *structs.Allocation) []*portMapping {
ports := []*portMapping{}
for _, network := range alloc.AllocatedResources.Shared.Networks {
for _, port := range append(network.DynamicPorts, network.ReservedPorts...) {
if port.To < 1 {
continue
}
for _, proto := range []string{"tcp", "udp"} {
ports = append(ports, &portMapping{
Host: port.Value,
Container: port.To,
Proto: proto,
})
}
}
}
return ports
}
// portMapping is the json representation of the portmapping capability arguments
// for the portmap CNI plugin
type portMapping struct {
Host int `json:"hostPort"`
Container int `json:"containerPort"`
Proto string `json:"protocol"`
}
// runtimeConf builds the configuration needed by CNI to locate the target netns
func (b *bridgeNetworkConfigurator) runtimeConf(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) *libcni.RuntimeConf {
return &libcni.RuntimeConf{
ContainerID: fmt.Sprintf("nomad-%s", alloc.ID[:8]),
NetNS: spec.Path,
IfName: bridgeNetworkContainerIfName,
CapabilityArgs: map[string]interface{}{
"portMappings": getPortMapping(alloc),
},
}
}
// buildNomadNetConfig generates the CNI network configuration for the bridge
// networking mode
func (b *bridgeNetworkConfigurator) buildNomadNetConfig() (*libcni.NetworkConfigList, error) {
rendered := fmt.Sprintf(nomadCNIConfigTemplate, b.bridgeName, b.allocSubnet, cniAdminChainName)
return libcni.ConfListFromBytes([]byte(rendered))
}
const nomadCNIConfigTemplate = `{
"cniVersion": "0.4.0",
"name": "nomad",
"plugins": [
{
"type": "bridge",
"bridge": "%s",
"ipMasq": true,
"isGateway": true,
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "%s"
}
]
],
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
},
{
"type": "firewall",
"backend": "iptables",
"iptablesAdminChainName": "%s"
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
`

View File

@ -0,0 +1,151 @@
package taskrunner
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs"
)
var _ interfaces.TaskPrestartHook = &envoyBootstrapHook{}
// envoyBootstrapHook writes the bootstrap config for the Connect Envoy proxy
// sidecar.
type envoyBootstrapHook struct {
alloc *structs.Allocation
// Bootstrapping Envoy requires talking directly to Consul to generate
// the bootstrap.json config. Runtime Envoy configuration is done via
// Consul's gRPC endpoint.
consulHTTPAddr string
logger log.Logger
}
func newEnvoyBootstrapHook(alloc *structs.Allocation, consulHTTPAddr string, logger log.Logger) *envoyBootstrapHook {
h := &envoyBootstrapHook{
alloc: alloc,
consulHTTPAddr: consulHTTPAddr,
}
h.logger = logger.Named(h.Name())
return h
}
func (envoyBootstrapHook) Name() string {
return "envoy_bootstrap"
}
func (h *envoyBootstrapHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
if !req.Task.Kind.IsConnectProxy() {
// Not a Connect proxy sidecar
resp.Done = true
return nil
}
serviceName := req.Task.Kind.Value()
if serviceName == "" {
return fmt.Errorf("Connect proxy sidecar does not specify service name")
}
tg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup)
var service *structs.Service
for _, s := range tg.Services {
if s.Name == serviceName {
service = s
break
}
}
if service == nil {
return fmt.Errorf("Connect proxy sidecar task exists but no services configured with a sidecar")
}
h.logger.Debug("bootstrapping Connect proxy sidecar", "task", req.Task.Name, "service", serviceName)
//TODO(schmichael) relies on GRPCSocket being created
//TODO(schmichael) unnecessasry if the sidecar is running on the host netns
grpcAddr := "unix://" + filepath.Join(allocdir.SharedAllocName, allocdir.AllocGRPCSocket)
// Envoy bootstrap configuration may contain a Consul token, so write
// it to the secrets directory like Vault tokens.
fn := filepath.Join(req.TaskDir.SecretsDir, "envoy_bootstrap.json")
canary := h.alloc.DeploymentStatus.IsCanary()
id := agentconsul.MakeTaskServiceID(h.alloc.ID, "group-"+tg.Name, service, canary)
h.logger.Debug("bootstrapping envoy", "sidecar_for", service.Name, "boostrap_file", fn, "sidecar_for_id", id, "grpc_addr", grpcAddr)
// Since Consul services are registered asynchronously with this task
// hook running, retry a small number of times with backoff.
for tries := 3; ; tries-- {
cmd := exec.CommandContext(ctx, "consul", "connect", "envoy",
"-grpc-addr", grpcAddr,
"-http-addr", h.consulHTTPAddr,
"-bootstrap",
"-sidecar-for", id,
)
// Redirect output to secrets/envoy_bootstrap.json
fd, err := os.Create(fn)
if err != nil {
return fmt.Errorf("error creating secrets/envoy_bootstrap.json for envoy: %v", err)
}
cmd.Stdout = fd
buf := bytes.NewBuffer(nil)
cmd.Stderr = buf
// Generate bootstrap
err = cmd.Run()
// Close bootstrap.json
fd.Close()
if err == nil {
// Happy path! Bootstrap was created, exit.
break
}
// Check for error from command
if tries == 0 {
h.logger.Error("error creating bootstrap configuration for Connect proxy sidecar", "error", err, "stderr", buf.String())
// Cleanup the bootstrap file. An errors here is not
// important as (a) we test to ensure the deletion
// occurs, and (b) the file will either be rewritten on
// retry or eventually garbage collected if the task
// fails.
os.Remove(fn)
// ExitErrors are recoverable since they indicate the
// command was runnable but exited with a unsuccessful
// error code.
_, recoverable := err.(*exec.ExitError)
return structs.NewRecoverableError(
fmt.Errorf("error creating bootstrap configuration for Connect proxy sidecar: %v", err),
recoverable,
)
}
// Sleep before retrying to give Consul services time to register
select {
case <-time.After(2 * time.Second):
case <-ctx.Done():
// Killed before bootstrap, exit without setting Done
return nil
}
}
// Bootstrap written. Mark as done and move on.
resp.Done = true
return nil
}

View File

@ -0,0 +1,247 @@
package taskrunner
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"testing"
consulapi "github.com/hashicorp/consul/api"
consultest "github.com/hashicorp/consul/testutil"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/client/testutil"
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
var _ interfaces.TaskPrestartHook = (*envoyBootstrapHook)(nil)
// TestTaskRunner_EnvoyBootstrapHook_Prestart asserts the EnvoyBootstrapHook
// creates Envoy's bootstrap.json configuration based on Connect proxy sidecars
// registered for the task.
func TestTaskRunner_EnvoyBootstrapHook_Ok(t *testing.T) {
t.Parallel()
testutil.RequireConsul(t)
testconsul, err := consultest.NewTestServerConfig(func(c *consultest.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
{
Mode: "bridge",
IP: "10.0.0.1",
DynamicPorts: []structs.Port{
{
Label: "connect-proxy-foo",
Value: 9999,
To: 9999,
},
},
},
}
tg := alloc.Job.TaskGroups[0]
tg.Services = []*structs.Service{
{
Name: "foo",
PortLabel: "9999", // Just need a valid port, nothing will bind to it
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
sidecarTask := &structs.Task{
Name: "sidecar",
Kind: "connect-proxy:foo",
}
tg.Tasks = append(tg.Tasks, sidecarTask)
logger := testlog.HCLogger(t)
tmpAllocDir, err := ioutil.TempDir("", "EnvoyBootstrapHookTest")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmpAllocDir)
allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), tmpAllocDir)
defer allocDir.Destroy()
// Register Group Services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testconsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterGroup(alloc))
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(alloc, testconsul.HTTPAddr, logger)
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
TaskDir: allocDir.NewTaskDir(sidecarTask.Name),
}
require.NoError(t, req.TaskDir.Build(false, nil))
resp := &interfaces.TaskPrestartResponse{}
// Run the hook
require.NoError(t, h.Prestart(context.Background(), req, resp))
// Assert it is Done
require.True(t, resp.Done)
f, err := os.Open(filepath.Join(req.TaskDir.SecretsDir, "envoy_bootstrap.json"))
require.NoError(t, err)
defer f.Close()
// Assert bootstrap configuration is valid json
var out map[string]interface{}
require.NoError(t, json.NewDecoder(f).Decode(&out))
}
// TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook
// is a noop for non-Connect proxy sidecar tasks.
func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) {
t.Parallel()
logger := testlog.HCLogger(t)
tmpAllocDir, err := ioutil.TempDir("", "EnvoyBootstrapHookTest")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmpAllocDir)
allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), tmpAllocDir)
defer allocDir.Destroy()
alloc := mock.Alloc()
task := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0]
// Run Envoy bootstrap Hook. Use invalid Consul address as it should
// not get hit.
h := newEnvoyBootstrapHook(alloc, "http://127.0.0.2:1", logger)
req := &interfaces.TaskPrestartRequest{
Task: task,
TaskDir: allocDir.NewTaskDir(task.Name),
}
require.NoError(t, req.TaskDir.Build(false, nil))
resp := &interfaces.TaskPrestartResponse{}
// Run the hook
require.NoError(t, h.Prestart(context.Background(), req, resp))
// Assert it is Done
require.True(t, resp.Done)
// Assert no file was written
_, err = os.Open(filepath.Join(req.TaskDir.SecretsDir, "envoy_bootstrap.json"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
}
// TestTaskRunner_EnvoyBootstrapHook_RecoverableError asserts the Envoy
// bootstrap hook returns a Recoverable error if the bootstrap command runs but
// fails.
func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
t.Parallel()
testutil.RequireConsul(t)
testconsul, err := consultest.NewTestServerConfig(func(c *consultest.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
{
Mode: "bridge",
IP: "10.0.0.1",
DynamicPorts: []structs.Port{
{
Label: "connect-proxy-foo",
Value: 9999,
To: 9999,
},
},
},
}
tg := alloc.Job.TaskGroups[0]
tg.Services = []*structs.Service{
{
Name: "foo",
PortLabel: "9999", // Just need a valid port, nothing will bind to it
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
sidecarTask := &structs.Task{
Name: "sidecar",
Kind: "connect-proxy:foo",
}
tg.Tasks = append(tg.Tasks, sidecarTask)
logger := testlog.HCLogger(t)
tmpAllocDir, err := ioutil.TempDir("", "EnvoyBootstrapHookTest")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmpAllocDir)
allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), tmpAllocDir)
defer allocDir.Destroy()
// Unlike the successful test above, do NOT register the group services
// yet. This should cause a recoverable error similar to if Consul was
// not running.
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(alloc, testconsul.HTTPAddr, logger)
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
TaskDir: allocDir.NewTaskDir(sidecarTask.Name),
}
require.NoError(t, req.TaskDir.Build(false, nil))
resp := &interfaces.TaskPrestartResponse{}
// Run the hook
err = h.Prestart(context.Background(), req, resp)
require.Error(t, err)
require.True(t, structs.IsRecoverable(err))
// Assert it is not Done
require.False(t, resp.Done)
// Assert no file was written
_, err = os.Open(filepath.Join(req.TaskDir.SecretsDir, "envoy_bootstrap.json"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
}

View File

@ -52,6 +52,7 @@ func getClient(src string, mode gg.ClientMode, dst string) *gg.Client {
Dst: dst, Dst: dst,
Mode: mode, Mode: mode,
Getters: getters, Getters: getters,
Umask: 060000000,
} }
} }

View File

@ -8,12 +8,14 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime"
"strings" "strings"
"testing" "testing"
"github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
) )
// fakeReplacer is a noop version of taskenv.TaskEnv.ReplaceEnv // fakeReplacer is a noop version of taskenv.TaskEnv.ReplaceEnv
@ -214,6 +216,55 @@ func TestGetArtifact_Archive(t *testing.T) {
checkContents(taskDir, expected, t) checkContents(taskDir, expected, t)
} }
func TestGetArtifact_Setuid(t *testing.T) {
// Create the test server hosting the file to download
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("./test-fixtures/"))))
defer ts.Close()
// Create a temp directory to download into and create some of the same
// files that exist in the artifact to ensure they are overridden
taskDir, err := ioutil.TempDir("", "nomad-test")
require.NoError(t, err)
defer os.RemoveAll(taskDir)
file := "setuid.tgz"
artifact := &structs.TaskArtifact{
GetterSource: fmt.Sprintf("%s/%s", ts.URL, file),
GetterOptions: map[string]string{
"checksum": "sha1:e892194748ecbad5d0f60c6c6b2db2bdaa384a90",
},
}
require.NoError(t, GetArtifact(taskEnv, artifact, taskDir))
var expected map[string]int
if runtime.GOOS == "windows" {
// windows doesn't support Chmod changing file permissions.
expected = map[string]int{
"public": 0666,
"private": 0666,
"setuid": 0666,
}
} else {
// Verify the unarchiving masked files properly.
expected = map[string]int{
"public": 0666,
"private": 0600,
"setuid": 0755,
}
}
for file, perm := range expected {
path := filepath.Join(taskDir, "setuid", file)
s, err := os.Stat(path)
require.NoError(t, err)
p := os.FileMode(perm)
o := s.Mode()
require.Equalf(t, p, o, "%s expected %o found %o", file, p, o)
}
}
func TestGetGetterUrl_Queries(t *testing.T) { func TestGetGetterUrl_Queries(t *testing.T) {
cases := []struct { cases := []struct {
name string name string

Binary file not shown.

View File

@ -252,6 +252,15 @@ func interpolateServices(taskEnv *taskenv.TaskEnv, services []*structs.Service)
service.PortLabel = taskEnv.ReplaceEnv(service.PortLabel) service.PortLabel = taskEnv.ReplaceEnv(service.PortLabel)
service.Tags = taskEnv.ParseAndReplace(service.Tags) service.Tags = taskEnv.ParseAndReplace(service.Tags)
service.CanaryTags = taskEnv.ParseAndReplace(service.CanaryTags) service.CanaryTags = taskEnv.ParseAndReplace(service.CanaryTags)
if len(service.Meta) > 0 {
meta := make(map[string]string, len(service.Meta))
for k, v := range service.Meta {
meta[k] = taskEnv.ReplaceEnv(v)
}
service.Meta = meta
}
interpolated[i] = service interpolated[i] = service
} }

View File

@ -202,6 +202,9 @@ type TaskRunner struct {
// fails and the Run method should wait until serversContactedCh is // fails and the Run method should wait until serversContactedCh is
// closed. // closed.
waitOnServers bool waitOnServers bool
networkIsolationLock sync.Mutex
networkIsolationSpec *drivers.NetworkIsolationSpec
} }
type Config struct { type Config struct {
@ -895,6 +898,8 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
invocationid := uuid.Generate()[:8] invocationid := uuid.Generate()[:8]
taskResources := tr.taskResources taskResources := tr.taskResources
env := tr.envBuilder.Build() env := tr.envBuilder.Build()
tr.networkIsolationLock.Lock()
defer tr.networkIsolationLock.Unlock()
return &drivers.TaskConfig{ return &drivers.TaskConfig{
ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid), ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid),
@ -909,15 +914,16 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
PercentTicks: float64(taskResources.Cpu.CpuShares) / float64(tr.clientConfig.Node.NodeResources.Cpu.CpuShares), PercentTicks: float64(taskResources.Cpu.CpuShares) / float64(tr.clientConfig.Node.NodeResources.Cpu.CpuShares),
}, },
}, },
Devices: tr.hookResources.getDevices(), Devices: tr.hookResources.getDevices(),
Mounts: tr.hookResources.getMounts(), Mounts: tr.hookResources.getMounts(),
Env: env.Map(), Env: env.Map(),
DeviceEnv: env.DeviceEnv(), DeviceEnv: env.DeviceEnv(),
User: task.User, User: task.User,
AllocDir: tr.taskDir.AllocDir, AllocDir: tr.taskDir.AllocDir,
StdoutPath: tr.logmonHookConfig.stdoutFifo, StdoutPath: tr.logmonHookConfig.stdoutFifo,
StderrPath: tr.logmonHookConfig.stderrFifo, StderrPath: tr.logmonHookConfig.stderrFifo,
AllocID: tr.allocID, AllocID: tr.allocID,
NetworkIsolation: tr.networkIsolationSpec,
} }
} }
@ -1181,6 +1187,14 @@ func (tr *TaskRunner) Update(update *structs.Allocation) {
} }
} }
// SetNetworkIsolation is called by the PreRun allocation hook after configuring
// the network isolation for the allocation
func (tr *TaskRunner) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) {
tr.networkIsolationLock.Lock()
tr.networkIsolationSpec = n
tr.networkIsolationLock.Unlock()
}
// triggerUpdate if there isn't already an update pending. Should be called // triggerUpdate if there isn't already an update pending. Should be called
// instead of calling updateHooks directly to serialize runs of update hooks. // instead of calling updateHooks directly to serialize runs of update hooks.
// TaskRunner state should be updated prior to triggering update hooks. // TaskRunner state should be updated prior to triggering update hooks.
@ -1347,7 +1361,12 @@ func appendTaskEvent(state *structs.TaskState, event *structs.TaskEvent, capacit
} }
func (tr *TaskRunner) TaskExecHandler() drivermanager.TaskExecHandler { func (tr *TaskRunner) TaskExecHandler() drivermanager.TaskExecHandler {
return tr.getDriverHandle().ExecStreaming // Check it is running
handle := tr.getDriverHandle()
if handle == nil {
return nil
}
return handle.ExecStreaming
} }
func (tr *TaskRunner) DriverCapabilities() (*drivers.Capabilities, error) { func (tr *TaskRunner) DriverCapabilities() (*drivers.Capabilities, error) {

View File

@ -56,14 +56,17 @@ func (tr *TaskRunner) initHooks() {
// Create the task directory hook. This is run first to ensure the // Create the task directory hook. This is run first to ensure the
// directory path exists for other hooks. // directory path exists for other hooks.
alloc := tr.Alloc()
tr.runnerHooks = []interfaces.TaskHook{ tr.runnerHooks = []interfaces.TaskHook{
newValidateHook(tr.clientConfig, hookLogger), newValidateHook(tr.clientConfig, hookLogger),
newTaskDirHook(tr, hookLogger), newTaskDirHook(tr, hookLogger),
newLogMonHook(tr.logmonHookConfig, hookLogger), newLogMonHook(tr.logmonHookConfig, hookLogger),
newDispatchHook(tr.Alloc(), hookLogger), newDispatchHook(alloc, hookLogger),
newVolumeHook(tr, hookLogger),
newArtifactHook(tr, hookLogger), newArtifactHook(tr, hookLogger),
newStatsHook(tr, tr.clientConfig.StatsCollectionInterval, hookLogger), newStatsHook(tr, tr.clientConfig.StatsCollectionInterval, hookLogger),
newDeviceHook(tr.devicemanager, hookLogger), newDeviceHook(tr.devicemanager, hookLogger),
newEnvoyBootstrapHook(alloc, tr.clientConfig.ConsulConfig.Addr, hookLogger),
} }
// If Vault is enabled, add the hook // If Vault is enabled, add the hook

View File

@ -508,8 +508,12 @@ func templateRunner(config *TaskTemplateManagerConfig) (
return nil, nil, err return nil, nil, err
} }
// Set Nomad's environment variables // Set Nomad's environment variables.
runner.Env = config.EnvBuilder.Build().All() // consul-template falls back to the host process environment if a
// variable isn't explicitly set in the configuration, so we need
// to mask the environment out to ensure only the task env vars are
// available.
runner.Env = maskProcessEnv(config.EnvBuilder.Build().All())
// Build the lookup // Build the lookup
idMap := runner.TemplateConfigMapping() idMap := runner.TemplateConfigMapping()
@ -525,13 +529,27 @@ func templateRunner(config *TaskTemplateManagerConfig) (
return runner, lookup, nil return runner, lookup, nil
} }
// maskProcessEnv masks away any environment variable not found in task env.
// It manipulates the parameter directly and returns it without copying.
func maskProcessEnv(env map[string]string) map[string]string {
procEnvs := os.Environ()
for _, e := range procEnvs {
ekv := strings.SplitN(e, "=", 2)
if _, ok := env[ekv[0]]; !ok {
env[ekv[0]] = ""
}
}
return env
}
// parseTemplateConfigs converts the tasks templates in the config into // parseTemplateConfigs converts the tasks templates in the config into
// consul-templates // consul-templates
func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[ctconf.TemplateConfig]*structs.Template, error) { func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.TemplateConfig]*structs.Template, error) {
allowAbs := config.ClientConfig.ReadBoolDefault(hostSrcOption, true) allowAbs := config.ClientConfig.ReadBoolDefault(hostSrcOption, true)
taskEnv := config.EnvBuilder.Build() taskEnv := config.EnvBuilder.Build()
ctmpls := make(map[ctconf.TemplateConfig]*structs.Template, len(config.Templates)) ctmpls := make(map[*ctconf.TemplateConfig]*structs.Template, len(config.Templates))
for _, tmpl := range config.Templates { for _, tmpl := range config.Templates {
var src, dest string var src, dest string
if tmpl.SourcePath != "" { if tmpl.SourcePath != "" {
@ -555,6 +573,10 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[ctconf.Templat
ct.Contents = &tmpl.EmbeddedTmpl ct.Contents = &tmpl.EmbeddedTmpl
ct.LeftDelim = &tmpl.LeftDelim ct.LeftDelim = &tmpl.LeftDelim
ct.RightDelim = &tmpl.RightDelim ct.RightDelim = &tmpl.RightDelim
ct.FunctionBlacklist = config.ClientConfig.TemplateConfig.FunctionBlacklist
if !config.ClientConfig.TemplateConfig.DisableSandbox {
ct.SandboxPath = &config.TaskDir
}
// Set the permissions // Set the permissions
if tmpl.Perms != "" { if tmpl.Perms != "" {
@ -567,7 +589,7 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[ctconf.Templat
} }
ct.Finalize() ct.Finalize()
ctmpls[*ct] = tmpl ctmpls[ct] = tmpl
} }
return ctmpls, nil return ctmpls, nil
@ -576,7 +598,7 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[ctconf.Templat
// newRunnerConfig returns a consul-template runner configuration, setting the // newRunnerConfig returns a consul-template runner configuration, setting the
// Vault and Consul configurations based on the clients configs. // Vault and Consul configurations based on the clients configs.
func newRunnerConfig(config *TaskTemplateManagerConfig, func newRunnerConfig(config *TaskTemplateManagerConfig,
templateMapping map[ctconf.TemplateConfig]*structs.Template) (*ctconf.Config, error) { templateMapping map[*ctconf.TemplateConfig]*structs.Template) (*ctconf.Config, error) {
cc := config.ClientConfig cc := config.ClientConfig
conf := ctconf.DefaultConfig() conf := ctconf.DefaultConfig()
@ -585,7 +607,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig,
flat := ctconf.TemplateConfigs(make([]*ctconf.TemplateConfig, 0, len(templateMapping))) flat := ctconf.TemplateConfigs(make([]*ctconf.TemplateConfig, 0, len(templateMapping)))
for ctmpl := range templateMapping { for ctmpl := range templateMapping {
local := ctmpl local := ctmpl
flat = append(flat, &local) flat = append(flat, local)
} }
conf.Templates = &flat conf.Templates = &flat

View File

@ -16,6 +16,7 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
sconfig "github.com/hashicorp/nomad/nomad/structs/config" sconfig "github.com/hashicorp/nomad/nomad/structs/config"
@ -124,8 +125,13 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
mockHooks: NewMockTaskHooks(), mockHooks: NewMockTaskHooks(),
templates: templates, templates: templates,
node: mock.Node(), node: mock.Node(),
config: &config.Config{Region: region}, config: &config.Config{
emitRate: DefaultMaxTemplateEventRate, Region: region,
TemplateConfig: &config.ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
}},
emitRate: DefaultMaxTemplateEventRate,
} }
// Build the task environment // Build the task environment
@ -1022,6 +1028,52 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
require.Contains(harness.mockHooks.KillEvent.DisplayMessage, "failed to send signals") require.Contains(harness.mockHooks.KillEvent.DisplayMessage, "failed to send signals")
} }
// TestTaskTemplateManager_FiltersProcessEnvVars asserts that we only render
// environment variables found in task env-vars and not read the nomad host
// process environment variables. nomad host process environment variables
// are to be treated the same as not found environment variables.
func TestTaskTemplateManager_FiltersEnvVars(t *testing.T) {
t.Parallel()
defer os.Setenv("NOMAD_TASK_NAME", os.Getenv("NOMAD_TASK_NAME"))
os.Setenv("NOMAD_TASK_NAME", "should be overridden by task")
testenv := "TESTENV_" + strings.ReplaceAll(uuid.Generate(), "-", "")
os.Setenv(testenv, "MY_TEST_VALUE")
defer os.Unsetenv(testenv)
// Make a template that will render immediately
content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}
TEST_ENV: {{ env "` + testenv + `" }}
TEST_ENV_NOT_FOUND: {{env "` + testenv + `_NOTFOUND" }}`
expected := fmt.Sprintf("Hello Nomad Task: %s\nTEST_ENV: \nTEST_ENV_NOT_FOUND: ", TestTaskName)
file := "my.tmpl"
template := &structs.Template{
EmbeddedTmpl: content,
DestPath: file,
ChangeMode: structs.TemplateChangeModeNoop,
}
harness := newTestHarness(t, []*structs.Template{template}, false, false)
harness.start(t)
defer harness.stop()
// Wait for the unblock
select {
case <-harness.mockHooks.UnblockCh:
case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second):
require.Fail(t, "Task unblock should have been called")
}
// Check the file is there
path := filepath.Join(harness.taskDir, file)
raw, err := ioutil.ReadFile(path)
require.NoError(t, err)
require.Equal(t, expected, string(raw))
}
// TestTaskTemplateManager_Env asserts templates with the env flag set are read // TestTaskTemplateManager_Env asserts templates with the env flag set are read
// into the task's environment. // into the task's environment.
func TestTaskTemplateManager_Env(t *testing.T) { func TestTaskTemplateManager_Env(t *testing.T) {

View File

@ -0,0 +1,127 @@
package taskrunner
import (
"context"
"fmt"
log "github.com/hashicorp/go-hclog"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
type volumeHook struct {
alloc *structs.Allocation
runner *TaskRunner
logger log.Logger
}
func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook {
h := &volumeHook{
alloc: runner.Alloc(),
runner: runner,
}
h.logger = logger.Named(h.Name())
return h
}
func (*volumeHook) Name() string {
return "volumes"
}
func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) error {
var result error
for n, req := range requestedByAlias {
if req.Type != structs.VolumeTypeHost {
continue
}
cfg, err := structs.ParseHostVolumeConfig(req.Config)
if err != nil {
result = multierror.Append(result, fmt.Errorf("failed to parse config for %s: %v", n, err))
continue
}
_, ok := clientVolumesByName[cfg.Source]
if !ok {
result = multierror.Append(result, fmt.Errorf("missing %s", cfg.Source))
}
}
return result
}
// hostVolumeMountConfigurations takes the users requested volume mounts,
// volumes, and the client host volume configuration and converts them into a
// format that can be used by drivers.
func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeMount, taskVolumesByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) {
var mounts []*drivers.MountConfig
for _, m := range taskMounts {
req, ok := taskVolumesByAlias[m.Volume]
if !ok {
// Should never happen unless we misvalidated on job submission
return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume)
}
cfg, err := structs.ParseHostVolumeConfig(req.Config)
if err != nil {
return nil, fmt.Errorf("failed to parse config for %s: %v", m.Volume, err)
}
hostVolume, ok := clientVolumesByName[cfg.Source]
if !ok {
// Should never happen, but unless the client volumes were mutated during
// the execution of this hook.
return nil, fmt.Errorf("No host volume named: %s", cfg.Source)
}
mcfg := &drivers.MountConfig{
HostPath: hostVolume.Path,
TaskPath: m.Destination,
Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly,
}
mounts = append(mounts, mcfg)
}
return mounts, nil
}
func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
volumes := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes
mounts := h.runner.hookResources.getMounts()
hostVolumes := h.runner.clientConfig.Node.HostVolumes
// Always validate volumes to ensure that we do not allow volumes to be used
// if a host is restarted and loses the host volume configuration.
if err := validateHostVolumes(volumes, hostVolumes); err != nil {
h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes)
return fmt.Errorf("host volume validation error: %v", err)
}
requestedMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes)
if err != nil {
h.logger.Error("Failed to generate volume mounts", "error", err)
return err
}
// Because this hook is also ran on restores, we only add mounts that do not
// already exist. Although this loop is somewhat expensive, there are only
// a small number of mounts that exist within most individual tasks. We may
// want to revisit this using a `hookdata` param to be "mount only once"
REQUESTED:
for _, m := range requestedMounts {
for _, em := range mounts {
if em.IsEqual(m) {
continue REQUESTED
}
}
mounts = append(mounts, m)
}
h.runner.hookResources.setMounts(mounts)
return nil
}

View File

@ -14,11 +14,11 @@ import (
"sync" "sync"
"time" "time"
"github.com/armon/go-metrics" metrics "github.com/armon/go-metrics"
consulapi "github.com/hashicorp/consul/api" consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-hclog" hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror" multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/allocrunner/interfaces"
@ -92,6 +92,14 @@ const (
// allocSyncRetryIntv is the interval on which we retry updating // allocSyncRetryIntv is the interval on which we retry updating
// the status of the allocation // the status of the allocation
allocSyncRetryIntv = 5 * time.Second allocSyncRetryIntv = 5 * time.Second
// defaultConnectSidecarImage is the image set in the node meta by default
// to be used by Consul Connect sidecar tasks
defaultConnectSidecarImage = "envoyproxy/envoy:v1.11.1"
// defaultConnectLogLevel is the log level set in the node meta by default
// to be used by Consul Connect sidecar tasks
defaultConnectLogLevel = "info"
) )
var ( var (
@ -131,6 +139,7 @@ type AllocRunner interface {
ShutdownCh() <-chan struct{} ShutdownCh() <-chan struct{}
Signal(taskName, signal string) error Signal(taskName, signal string) error
GetTaskEventHandler(taskName string) drivermanager.EventHandler GetTaskEventHandler(taskName string) drivermanager.EventHandler
PersistState() error
RestartTask(taskName string, taskEvent *structs.TaskEvent) error RestartTask(taskName string, taskEvent *structs.TaskEvent) error
RestartAll(taskEvent *structs.TaskEvent) error RestartAll(taskEvent *structs.TaskEvent) error
@ -997,6 +1006,15 @@ func (c *Client) restoreState() error {
// Load each alloc back // Load each alloc back
for _, alloc := range allocs { for _, alloc := range allocs {
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
// See hasLocalState for details. Skipping suspicious allocs
// now. If allocs should be run, they will be started when the client
// gets allocs from servers.
if !c.hasLocalState(alloc) {
c.logger.Warn("found a alloc without any local state, skipping restore", "alloc_id", alloc.ID)
continue
}
//XXX On Restore we give up on watching previous allocs because //XXX On Restore we give up on watching previous allocs because
// we need the local AllocRunners initialized first. We could // we need the local AllocRunners initialized first. We could
// add a second loop to initialize just the alloc watcher. // add a second loop to initialize just the alloc watcher.
@ -1053,6 +1071,42 @@ func (c *Client) restoreState() error {
return nil return nil
} }
// hasLocalState returns true if we have any other associated state
// with alloc beyond the task itself
//
// Useful for detecting if a potentially completed alloc got resurrected
// after AR was destroyed. In such cases, re-running the alloc lead to
// unexpected reruns and may lead to process and task exhaustion on node.
//
// The heuristic used here is an alloc is suspect if we see no other information
// and no other task/status info is found.
//
// Also, an alloc without any client state will not be restored correctly; there will
// be no tasks processes to reattach to, etc. In such cases, client should
// wait until it gets allocs from server to launch them.
//
// See:
// * https://github.com/hashicorp/nomad/pull/6207
// * https://github.com/hashicorp/nomad/issues/5984
//
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
func (c *Client) hasLocalState(alloc *structs.Allocation) bool {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if tg == nil {
// corrupt alloc?!
return false
}
for _, task := range tg.Tasks {
ls, tr, _ := c.stateDB.GetTaskRunnerState(alloc.ID, task.Name)
if ls != nil || tr != nil {
return true
}
}
return false
}
func (c *Client) handleInvalidAllocs(alloc *structs.Allocation, err error) { func (c *Client) handleInvalidAllocs(alloc *structs.Allocation, err error) {
c.invalidAllocsLock.Lock() c.invalidAllocsLock.Lock()
c.invalidAllocs[alloc.ID] = struct{}{} c.invalidAllocs[alloc.ID] = struct{}{}
@ -1076,7 +1130,7 @@ func (c *Client) saveState() error {
for id, ar := range runners { for id, ar := range runners {
go func(id string, ar AllocRunner) { go func(id string, ar AllocRunner) {
err := c.stateDB.PutAllocation(ar.Alloc()) err := ar.PersistState()
if err != nil { if err != nil {
c.logger.Error("error saving alloc state", "error", err, "alloc_id", id) c.logger.Error("error saving alloc state", "error", err, "alloc_id", id)
l.Lock() l.Lock()
@ -1225,10 +1279,29 @@ func (c *Client) setupNode() error {
if node.Name == "" { if node.Name == "" {
node.Name, _ = os.Hostname() node.Name, _ = os.Hostname()
} }
// TODO(dani): Fingerprint these to handle volumes that don't exist/have bad perms.
if node.HostVolumes == nil {
if l := len(c.config.HostVolumes); l != 0 {
node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig, l)
for k, v := range c.config.HostVolumes {
node.HostVolumes[k] = v.Copy()
}
}
}
if node.Name == "" { if node.Name == "" {
node.Name = node.ID node.Name = node.ID
} }
node.Status = structs.NodeStatusInit node.Status = structs.NodeStatusInit
// Setup default meta
if _, ok := node.Meta["connect.sidecar_image"]; !ok {
node.Meta["connect.sidecar_image"] = defaultConnectSidecarImage
}
if _, ok := node.Meta["connect.log_level"]; !ok {
node.Meta["connect.log_level"] = defaultConnectLogLevel
}
return nil return nil
} }

View File

@ -10,10 +10,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/go-memdb" memdb "github.com/hashicorp/go-memdb"
trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul" consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/pluginutils/catalog" "github.com/hashicorp/nomad/helper/pluginutils/catalog"
"github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/testlog"
@ -27,7 +29,7 @@ import (
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/hashicorp/go-hclog" hclog "github.com/hashicorp/go-hclog"
cstate "github.com/hashicorp/nomad/client/state" cstate "github.com/hashicorp/nomad/client/state"
ctestutil "github.com/hashicorp/nomad/client/testutil" ctestutil "github.com/hashicorp/nomad/client/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -1644,3 +1646,44 @@ func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) {
assert.EqualValues(t, n, un) assert.EqualValues(t, n, un)
} }
} }
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
func TestClient_hasLocalState(t *testing.T) {
t.Parallel()
c, cleanup := TestClient(t, nil)
defer cleanup()
c.stateDB = state.NewMemDB(c.logger)
t.Run("plain alloc", func(t *testing.T) {
alloc := mock.BatchAlloc()
c.stateDB.PutAllocation(alloc)
require.False(t, c.hasLocalState(alloc))
})
t.Run("alloc with a task with local state", func(t *testing.T) {
alloc := mock.BatchAlloc()
taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name
ls := &trstate.LocalState{}
c.stateDB.PutAllocation(alloc)
c.stateDB.PutTaskRunnerLocalState(alloc.ID, taskName, ls)
require.True(t, c.hasLocalState(alloc))
})
t.Run("alloc with a task with task state", func(t *testing.T) {
alloc := mock.BatchAlloc()
taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name
ts := &structs.TaskState{
State: structs.TaskStateRunning,
}
c.stateDB.PutAllocation(alloc)
c.stateDB.PutTaskState(alloc.ID, taskName, ts)
require.True(t, c.hasLocalState(alloc))
})
}

View File

@ -201,6 +201,9 @@ type Config struct {
// DisableRemoteExec disables remote exec targeting tasks on this client // DisableRemoteExec disables remote exec targeting tasks on this client
DisableRemoteExec bool DisableRemoteExec bool
// TemplateConfig includes configuration for template rendering
TemplateConfig *ClientTemplateConfig
// BackwardsCompatibleMetrics determines whether to show methods of // BackwardsCompatibleMetrics determines whether to show methods of
// displaying metrics for older versions, or to only show the new format // displaying metrics for older versions, or to only show the new format
BackwardsCompatibleMetrics bool BackwardsCompatibleMetrics bool
@ -221,6 +224,38 @@ type Config struct {
// StateDBFactory is used to override stateDB implementations, // StateDBFactory is used to override stateDB implementations,
StateDBFactory state.NewStateDBFunc StateDBFactory state.NewStateDBFunc
// CNIPath is the path used to search for CNI plugins. Multiple paths can
// be specified with colon delimited
CNIPath string
// BridgeNetworkName is the name to use for the bridge created in bridge
// networking mode. This defaults to 'nomad' if not set
BridgeNetworkName string
// BridgeNetworkAllocSubnet is the IP subnet to use for address allocation
// for allocations in bridge networking mode. Subnet must be in CIDR
// notation
BridgeNetworkAllocSubnet string
// HostVolumes is a map of the configured host volumes by name.
HostVolumes map[string]*structs.ClientHostVolumeConfig
}
type ClientTemplateConfig struct {
FunctionBlacklist []string
DisableSandbox bool
}
func (c *ClientTemplateConfig) Copy() *ClientTemplateConfig {
if c == nil {
return nil
}
nc := new(ClientTemplateConfig)
*nc = *c
nc.FunctionBlacklist = helper.CopySliceString(nc.FunctionBlacklist)
return nc
} }
func (c *Config) Copy() *Config { func (c *Config) Copy() *Config {
@ -229,30 +264,36 @@ func (c *Config) Copy() *Config {
nc.Node = nc.Node.Copy() nc.Node = nc.Node.Copy()
nc.Servers = helper.CopySliceString(nc.Servers) nc.Servers = helper.CopySliceString(nc.Servers)
nc.Options = helper.CopyMapStringString(nc.Options) nc.Options = helper.CopyMapStringString(nc.Options)
nc.HostVolumes = structs.CopyMapStringClientHostVolumeConfig(nc.HostVolumes)
nc.ConsulConfig = c.ConsulConfig.Copy() nc.ConsulConfig = c.ConsulConfig.Copy()
nc.VaultConfig = c.VaultConfig.Copy() nc.VaultConfig = c.VaultConfig.Copy()
nc.TemplateConfig = c.TemplateConfig.Copy()
return nc return nc
} }
// DefaultConfig returns the default configuration // DefaultConfig returns the default configuration
func DefaultConfig() *Config { func DefaultConfig() *Config {
return &Config{ return &Config{
Version: version.GetVersion(), Version: version.GetVersion(),
VaultConfig: config.DefaultVaultConfig(), VaultConfig: config.DefaultVaultConfig(),
ConsulConfig: config.DefaultConsulConfig(), ConsulConfig: config.DefaultConsulConfig(),
LogOutput: os.Stderr, LogOutput: os.Stderr,
Region: "global", Region: "global",
StatsCollectionInterval: 1 * time.Second, StatsCollectionInterval: 1 * time.Second,
TLSConfig: &config.TLSConfig{}, TLSConfig: &config.TLSConfig{},
LogLevel: "DEBUG", LogLevel: "DEBUG",
GCInterval: 1 * time.Minute, GCInterval: 1 * time.Minute,
GCParallelDestroys: 2, GCParallelDestroys: 2,
GCDiskUsageThreshold: 80, GCDiskUsageThreshold: 80,
GCInodeUsageThreshold: 70, GCInodeUsageThreshold: 70,
GCMaxAllocs: 50, GCMaxAllocs: 50,
NoHostUUID: true, NoHostUUID: true,
DisableTaggedMetrics: false, DisableTaggedMetrics: false,
DisableRemoteExec: false, DisableRemoteExec: false,
TemplateConfig: &ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
},
BackwardsCompatibleMetrics: false, BackwardsCompatibleMetrics: false,
RPCHoldTimeout: 5 * time.Second, RPCHoldTimeout: 5 * time.Second,
} }

View File

@ -2,11 +2,15 @@ package consul
import ( import (
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs"
) )
// ConsulServiceAPI is the interface the Nomad Client uses to register and // ConsulServiceAPI is the interface the Nomad Client uses to register and
// remove services and checks from Consul. // remove services and checks from Consul.
type ConsulServiceAPI interface { type ConsulServiceAPI interface {
RegisterGroup(*structs.Allocation) error
RemoveGroup(*structs.Allocation) error
UpdateGroup(oldAlloc, newAlloc *structs.Allocation) error
RegisterTask(*consul.TaskServices) error RegisterTask(*consul.TaskServices) error
RemoveTask(*consul.TaskServices) RemoveTask(*consul.TaskServices)
UpdateTask(old, newTask *consul.TaskServices) error UpdateTask(old, newTask *consul.TaskServices) error

View File

@ -7,6 +7,7 @@ import (
log "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs"
testing "github.com/mitchellh/go-testing-interface" testing "github.com/mitchellh/go-testing-interface"
) )
@ -14,17 +15,20 @@ import (
type MockConsulOp struct { type MockConsulOp struct {
Op string // add, remove, or update Op string // add, remove, or update
AllocID string AllocID string
Task string Name string // task or group name
} }
func NewMockConsulOp(op, allocID, task string) MockConsulOp { func NewMockConsulOp(op, allocID, name string) MockConsulOp {
if op != "add" && op != "remove" && op != "update" && op != "alloc_registrations" { switch op {
case "add", "remove", "update", "alloc_registrations",
"add_group", "remove_group", "update_group":
default:
panic(fmt.Errorf("invalid consul op: %s", op)) panic(fmt.Errorf("invalid consul op: %s", op))
} }
return MockConsulOp{ return MockConsulOp{
Op: op, Op: op,
AllocID: allocID, AllocID: allocID,
Task: task, Name: name,
} }
} }
@ -50,6 +54,33 @@ func NewMockConsulServiceClient(t testing.T, logger log.Logger) *MockConsulServi
return &m return &m
} }
func (m *MockConsulServiceClient) RegisterGroup(alloc *structs.Allocation) error {
m.mu.Lock()
defer m.mu.Unlock()
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
m.logger.Trace("RegisterGroup", "alloc_id", alloc.ID, "num_services", len(tg.Services))
m.ops = append(m.ops, NewMockConsulOp("add_group", alloc.ID, alloc.TaskGroup))
return nil
}
func (m *MockConsulServiceClient) UpdateGroup(_, alloc *structs.Allocation) error {
m.mu.Lock()
defer m.mu.Unlock()
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
m.logger.Trace("UpdateGroup", "alloc_id", alloc.ID, "num_services", len(tg.Services))
m.ops = append(m.ops, NewMockConsulOp("update_group", alloc.ID, alloc.TaskGroup))
return nil
}
func (m *MockConsulServiceClient) RemoveGroup(alloc *structs.Allocation) error {
m.mu.Lock()
defer m.mu.Unlock()
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
m.logger.Trace("RemoveGroup", "alloc_id", alloc.ID, "num_services", len(tg.Services))
m.ops = append(m.ops, NewMockConsulOp("remove_group", alloc.ID, alloc.TaskGroup))
return nil
}
func (m *MockConsulServiceClient) UpdateTask(old, newSvcs *consul.TaskServices) error { func (m *MockConsulServiceClient) UpdateTask(old, newSvcs *consul.TaskServices) error {
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()

View File

@ -118,6 +118,7 @@ func New(c *Config) *manager {
loader: c.Loader, loader: c.Loader,
pluginConfig: c.PluginConfig, pluginConfig: c.PluginConfig,
updater: c.Updater, updater: c.Updater,
statsInterval: c.StatsInterval,
instances: make(map[loader.PluginID]*instanceManager), instances: make(map[loader.PluginID]*instanceManager),
reattachConfigs: make(map[loader.PluginID]*pstructs.ReattachConfig), reattachConfigs: make(map[loader.PluginID]*pstructs.ReattachConfig),
fingerprintResCh: make(chan struct{}, 1), fingerprintResCh: make(chan struct{}, 1),

View File

@ -0,0 +1,154 @@
// Copyright 2018 CNI authors
// Copyright 2019 HashiCorp
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The functions in this file are derived from:
// https://github.com/containernetworking/plugins/blob/0950a3607bf5e8a57c6a655c7e573e6aab0dc650/pkg/testutils/netns_linux.go
package nsutil
import (
"fmt"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/containernetworking/plugins/pkg/ns"
"golang.org/x/sys/unix"
)
// NetNSRunDir is the directory which new network namespaces will be bind mounted
const NetNSRunDir = "/var/run/netns"
// NewNS creates a new persistent (bind-mounted) network namespace and returns
// an object representing that namespace, without switching to it.
func NewNS(nsName string) (ns.NetNS, error) {
// Create the directory for mounting network namespaces
// This needs to be a shared mountpoint in case it is mounted in to
// other namespaces (containers)
err := os.MkdirAll(NetNSRunDir, 0755)
if err != nil {
return nil, err
}
// Remount the namespace directory shared. This will fail if it is not
// already a mountpoint, so bind-mount it on to itself to "upgrade" it
// to a mountpoint.
err = unix.Mount("", NetNSRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
if err != unix.EINVAL {
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", NetNSRunDir, err)
}
// Recursively remount /var/run/netns on itself. The recursive flag is
// so that any existing netns bindmounts are carried over.
err = unix.Mount(NetNSRunDir, NetNSRunDir, "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return nil, fmt.Errorf("mount --rbind %s %s failed: %q", NetNSRunDir, NetNSRunDir, err)
}
// Now we can make it shared
err = unix.Mount("", NetNSRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", NetNSRunDir, err)
}
}
// create an empty file at the mount point
nsPath := path.Join(NetNSRunDir, nsName)
mountPointFd, err := os.Create(nsPath)
if err != nil {
return nil, err
}
mountPointFd.Close()
// Ensure the mount point is cleaned up on errors; if the namespace
// was successfully mounted this will have no effect because the file
// is in-use
defer os.RemoveAll(nsPath)
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function
go (func() {
defer wg.Done()
runtime.LockOSThread()
// Don't unlock. By not unlocking, golang will kill the OS thread when the
// goroutine is done (for go1.10+)
var origNS ns.NetNS
origNS, err = ns.GetNS(getCurrentThreadNetNSPath())
if err != nil {
err = fmt.Errorf("failed to get the current netns: %v", err)
return
}
defer origNS.Close()
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
if err != nil {
err = fmt.Errorf("error from unshare: %v", err)
return
}
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
defer origNS.Set()
// bind mount the netns from the current thread (from /proc) onto the
// mount point. This causes the namespace to persist, even when there
// are no threads in the ns.
err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
if err != nil {
err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err)
}
})()
wg.Wait()
if err != nil {
return nil, fmt.Errorf("failed to create namespace: %v", err)
}
return ns.GetNS(nsPath)
}
// UnmountNS unmounts the NS held by the netns object
func UnmountNS(nsPath string) error {
// Only unmount if it's been bind-mounted (don't touch namespaces in /proc...)
if strings.HasPrefix(nsPath, NetNSRunDir) {
if err := unix.Unmount(nsPath, 0); err != nil {
return fmt.Errorf("failed to unmount NS: at %s: %v", nsPath, err)
}
if err := os.Remove(nsPath); err != nil {
return fmt.Errorf("failed to remove ns path %s: %v", nsPath, err)
}
}
return nil
}
// getCurrentThreadNetNSPath copied from pkg/ns
func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we
// use the thread's net namespace since the thread is switching around
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}

View File

@ -204,10 +204,8 @@ func newLogRotatorWrapper(path string, logger hclog.Logger, rotator io.WriteClos
var openFn func() (io.ReadCloser, error) var openFn func() (io.ReadCloser, error)
var err error var err error
//FIXME Revert #5990 and check os.IsNotExist once Go >= 1.12 is the
// release compiler.
_, serr := os.Stat(path) _, serr := os.Stat(path)
if serr != nil { if os.IsNotExist(serr) {
openFn, err = fifo.CreateAndRead(path) openFn, err = fifo.CreateAndRead(path)
} else { } else {
openFn = func() (io.ReadCloser, error) { openFn = func() (io.ReadCloser, error) {
@ -216,7 +214,7 @@ func newLogRotatorWrapper(path string, logger hclog.Logger, rotator io.WriteClos
} }
if err != nil { if err != nil {
logger.Error("Failed to create FIFO", "stat_error", serr, "create_err", err) logger.Error("failed to create FIFO", "stat_error", serr, "create_err", err)
return nil, fmt.Errorf("failed to create fifo for extracting logs: %v", err) return nil, fmt.Errorf("failed to create fifo for extracting logs: %v", err)
} }

View File

@ -57,6 +57,9 @@ const (
// Datacenter is the environment variable for passing the datacenter in which the alloc is running. // Datacenter is the environment variable for passing the datacenter in which the alloc is running.
Datacenter = "NOMAD_DC" Datacenter = "NOMAD_DC"
// Namespace is the environment variable for passing the namespace in which the alloc is running.
Namespace = "NOMAD_NAMESPACE"
// Region is the environment variable for passing the region in which the alloc is running. // Region is the environment variable for passing the region in which the alloc is running.
Region = "NOMAD_REGION" Region = "NOMAD_REGION"
@ -83,6 +86,9 @@ const (
// MetaPrefix is the prefix for passing task meta data. // MetaPrefix is the prefix for passing task meta data.
MetaPrefix = "NOMAD_META_" MetaPrefix = "NOMAD_META_"
// UpstreamPrefix is the prefix for passing upstream IP and ports to the alloc
UpstreamPrefix = "NOMAD_UPSTREAM_"
// VaultToken is the environment variable for passing the Vault token // VaultToken is the environment variable for passing the Vault token
VaultToken = "VAULT_TOKEN" VaultToken = "VAULT_TOKEN"
@ -303,6 +309,7 @@ type Builder struct {
taskName string taskName string
allocIndex int allocIndex int
datacenter string datacenter string
namespace string
region string region string
allocId string allocId string
allocName string allocName string
@ -338,6 +345,9 @@ type Builder struct {
// environment variables without having to hardcode the name of the hook. // environment variables without having to hardcode the name of the hook.
deviceHookName string deviceHookName string
// upstreams from the group connect enabled services
upstreams []structs.ConsulUpstream
mu *sync.RWMutex mu *sync.RWMutex
} }
@ -407,6 +417,9 @@ func (b *Builder) Build() *TaskEnv {
if b.datacenter != "" { if b.datacenter != "" {
envMap[Datacenter] = b.datacenter envMap[Datacenter] = b.datacenter
} }
if b.namespace != "" {
envMap[Namespace] = b.namespace
}
if b.region != "" { if b.region != "" {
envMap[Region] = b.region envMap[Region] = b.region
@ -422,6 +435,9 @@ func (b *Builder) Build() *TaskEnv {
envMap[k] = v envMap[k] = v
} }
// Build the Consul Connect upstream env vars
buildUpstreamsEnv(envMap, b.upstreams)
// Build the Vault Token // Build the Vault Token
if b.injectVaultToken && b.vaultToken != "" { if b.injectVaultToken && b.vaultToken != "" {
envMap[VaultToken] = b.vaultToken envMap[VaultToken] = b.vaultToken
@ -559,6 +575,7 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
b.groupName = alloc.TaskGroup b.groupName = alloc.TaskGroup
b.allocIndex = int(alloc.Index()) b.allocIndex = int(alloc.Index())
b.jobName = alloc.Job.Name b.jobName = alloc.Job.Name
b.namespace = alloc.Namespace
// Set meta // Set meta
combined := alloc.Job.CombinedTaskMeta(alloc.TaskGroup, b.taskName) combined := alloc.Job.CombinedTaskMeta(alloc.TaskGroup, b.taskName)
@ -584,8 +601,10 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = v b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = v
} }
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
// COMPAT(0.11): Remove in 0.11 // COMPAT(0.11): Remove in 0.11
b.otherPorts = make(map[string]string, len(alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks)*2) b.otherPorts = make(map[string]string, len(tg.Tasks)*2)
if alloc.AllocatedResources != nil { if alloc.AllocatedResources != nil {
// Populate task resources // Populate task resources
if tr, ok := alloc.AllocatedResources.Tasks[b.taskName]; ok { if tr, ok := alloc.AllocatedResources.Tasks[b.taskName]; ok {
@ -640,6 +659,17 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
} }
} }
} }
upstreams := []structs.ConsulUpstream{}
for _, svc := range tg.Services {
if svc.Connect.HasSidecar() && svc.Connect.SidecarService.HasUpstreams() {
upstreams = append(upstreams, svc.Connect.SidecarService.Proxy.Upstreams...)
}
}
if len(upstreams) > 0 {
b.SetUpstreams(upstreams)
}
return b return b
} }
@ -730,6 +760,32 @@ func buildPortEnv(envMap map[string]string, p structs.Port, ip string, driverNet
} }
} }
// SetUpstreams defined by connect enabled group services
func (b *Builder) SetUpstreams(upstreams []structs.ConsulUpstream) *Builder {
b.mu.Lock()
b.upstreams = upstreams
b.mu.Unlock()
return b
}
// buildUpstreamsEnv builds NOMAD_UPSTREAM_{IP,PORT,ADDR}_{destination} vars
func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstream) {
// Proxy sidecars always bind to localhost
const ip = "127.0.0.1"
for _, u := range upstreams {
port := strconv.Itoa(u.LocalBindPort)
envMap[UpstreamPrefix+"IP_"+u.DestinationName] = ip
envMap[UpstreamPrefix+"PORT_"+u.DestinationName] = port
envMap[UpstreamPrefix+"ADDR_"+u.DestinationName] = net.JoinHostPort(ip, port)
// Also add cleaned version
cleanName := helper.CleanEnvVar(u.DestinationName, '_')
envMap[UpstreamPrefix+"ADDR_"+cleanName] = net.JoinHostPort(ip, port)
envMap[UpstreamPrefix+"IP_"+cleanName] = ip
envMap[UpstreamPrefix+"PORT_"+cleanName] = port
}
}
// SetHostEnvvars adds the host environment variables to the tasks. The filter // SetHostEnvvars adds the host environment variables to the tasks. The filter
// parameter can be use to filter host environment from entering the tasks. // parameter can be use to filter host environment from entering the tasks.
func (b *Builder) SetHostEnvvars(filter []string) *Builder { func (b *Builder) SetHostEnvvars(filter []string) *Builder {

View File

@ -161,6 +161,7 @@ func TestEnvironment_AsList(t *testing.T) {
}, },
}, },
} }
a.Namespace = "not-default"
task := a.Job.TaskGroups[0].Tasks[0] task := a.Job.TaskGroups[0].Tasks[0]
task.Env = map[string]string{ task.Env = map[string]string{
"taskEnvKey": "taskEnvVal", "taskEnvKey": "taskEnvVal",
@ -190,6 +191,7 @@ func TestEnvironment_AsList(t *testing.T) {
"NOMAD_PORT_ssh_ssh=22", "NOMAD_PORT_ssh_ssh=22",
"NOMAD_CPU_LIMIT=500", "NOMAD_CPU_LIMIT=500",
"NOMAD_DC=dc1", "NOMAD_DC=dc1",
"NOMAD_NAMESPACE=not-default",
"NOMAD_REGION=global", "NOMAD_REGION=global",
"NOMAD_MEMORY_LIMIT=256", "NOMAD_MEMORY_LIMIT=256",
"NOMAD_META_ELB_CHECK_INTERVAL=30s", "NOMAD_META_ELB_CHECK_INTERVAL=30s",
@ -301,6 +303,7 @@ func TestEnvironment_AsList_Old(t *testing.T) {
"NOMAD_PORT_ssh_ssh=22", "NOMAD_PORT_ssh_ssh=22",
"NOMAD_CPU_LIMIT=500", "NOMAD_CPU_LIMIT=500",
"NOMAD_DC=dc1", "NOMAD_DC=dc1",
"NOMAD_NAMESPACE=default",
"NOMAD_REGION=global", "NOMAD_REGION=global",
"NOMAD_MEMORY_LIMIT=256", "NOMAD_MEMORY_LIMIT=256",
"NOMAD_META_ELB_CHECK_INTERVAL=30s", "NOMAD_META_ELB_CHECK_INTERVAL=30s",
@ -418,6 +421,7 @@ func TestEnvironment_AllValues(t *testing.T) {
"NOMAD_PORT_ssh_ssh": "22", "NOMAD_PORT_ssh_ssh": "22",
"NOMAD_CPU_LIMIT": "500", "NOMAD_CPU_LIMIT": "500",
"NOMAD_DC": "dc1", "NOMAD_DC": "dc1",
"NOMAD_NAMESPACE": "default",
"NOMAD_REGION": "global", "NOMAD_REGION": "global",
"NOMAD_MEMORY_LIMIT": "256", "NOMAD_MEMORY_LIMIT": "256",
"NOMAD_META_ELB_CHECK_INTERVAL": "30s", "NOMAD_META_ELB_CHECK_INTERVAL": "30s",
@ -728,3 +732,55 @@ func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) {
require.Equal("metaopt1val", env.ReplaceEnv("${NOMAD_META_metaopt1}")) require.Equal("metaopt1val", env.ReplaceEnv("${NOMAD_META_metaopt1}"))
require.Empty(env.ReplaceEnv("${NOMAD_META_metaopt2}")) require.Empty(env.ReplaceEnv("${NOMAD_META_metaopt2}"))
} }
// TestEnvironment_Upsteams asserts that group.service.upstreams entries are
// added to the environment.
func TestEnvironment_Upstreams(t *testing.T) {
t.Parallel()
// Add some upstreams to the mock alloc
a := mock.Alloc()
tg := a.Job.LookupTaskGroup(a.TaskGroup)
tg.Services = []*structs.Service{
// Services without Connect should be ignored
{
Name: "ignoreme",
},
// All upstreams from a service should be added
{
Name: "remote_service",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{
Proxy: &structs.ConsulProxy{
Upstreams: []structs.ConsulUpstream{
{
DestinationName: "foo-bar",
LocalBindPort: 1234,
},
{
DestinationName: "bar",
LocalBindPort: 5678,
},
},
},
},
},
},
}
// Ensure the upstreams can be interpolated
tg.Tasks[0].Env = map[string]string{
"foo": "${NOMAD_UPSTREAM_ADDR_foo_bar}",
"bar": "${NOMAD_UPSTREAM_PORT_foo-bar}",
}
env := NewBuilder(mock.Node(), a, tg.Tasks[0], "global").Build().Map()
require.Equal(t, "127.0.0.1:1234", env["NOMAD_UPSTREAM_ADDR_foo_bar"])
require.Equal(t, "127.0.0.1", env["NOMAD_UPSTREAM_IP_foo_bar"])
require.Equal(t, "1234", env["NOMAD_UPSTREAM_PORT_foo_bar"])
require.Equal(t, "127.0.0.1:5678", env["NOMAD_UPSTREAM_ADDR_bar"])
require.Equal(t, "127.0.0.1", env["NOMAD_UPSTREAM_IP_bar"])
require.Equal(t, "5678", env["NOMAD_UPSTREAM_PORT_bar"])
require.Equal(t, "127.0.0.1:1234", env["foo"])
require.Equal(t, "1234", env["bar"])
}

View File

@ -17,6 +17,14 @@ func RequireRoot(t *testing.T) {
} }
} }
// RequireConsul skips tests unless a Consul binary is available on $PATH.
func RequireConsul(t *testing.T) {
_, err := exec.Command("consul", "version").CombinedOutput()
if err != nil {
t.Skipf("Test requires Consul: %v", err)
}
}
func ExecCompatible(t *testing.T) { func ExecCompatible(t *testing.T) {
if runtime.GOOS != "linux" || syscall.Geteuid() != 0 { if runtime.GOOS != "linux" || syscall.Geteuid() != 0 {
t.Skip("Test only available running as root on linux") t.Skip("Test only available running as root on linux")

View File

@ -275,6 +275,13 @@ func convertServerConfig(agentConfig *Config) (*nomad.Config, error) {
} }
conf.NodeGCThreshold = dur conf.NodeGCThreshold = dur
} }
if gcInterval := agentConfig.Server.JobGCInterval; gcInterval != "" {
dur, err := time.ParseDuration(gcInterval)
if err != nil {
return nil, err
}
conf.JobGCInterval = dur
}
if gcThreshold := agentConfig.Server.JobGCThreshold; gcThreshold != "" { if gcThreshold := agentConfig.Server.JobGCThreshold; gcThreshold != "" {
dur, err := time.ParseDuration(gcThreshold) dur, err := time.ParseDuration(gcThreshold)
if err != nil { if err != nil {
@ -461,6 +468,14 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) {
conf.ClientMaxPort = uint(agentConfig.Client.ClientMaxPort) conf.ClientMaxPort = uint(agentConfig.Client.ClientMaxPort)
conf.ClientMinPort = uint(agentConfig.Client.ClientMinPort) conf.ClientMinPort = uint(agentConfig.Client.ClientMinPort)
conf.DisableRemoteExec = agentConfig.Client.DisableRemoteExec conf.DisableRemoteExec = agentConfig.Client.DisableRemoteExec
conf.TemplateConfig.FunctionBlacklist = agentConfig.Client.TemplateConfig.FunctionBlacklist
conf.TemplateConfig.DisableSandbox = agentConfig.Client.TemplateConfig.DisableSandbox
hvMap := make(map[string]*structs.ClientHostVolumeConfig, len(agentConfig.Client.HostVolumes))
for _, v := range agentConfig.Client.HostVolumes {
hvMap[v.Name] = v
}
conf.HostVolumes = hvMap
// Setup the node // Setup the node
conf.Node = new(structs.Node) conf.Node = new(structs.Node)
@ -531,6 +546,11 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) {
conf.ACLTokenTTL = agentConfig.ACL.TokenTTL conf.ACLTokenTTL = agentConfig.ACL.TokenTTL
conf.ACLPolicyTTL = agentConfig.ACL.PolicyTTL conf.ACLPolicyTTL = agentConfig.ACL.PolicyTTL
// Setup networking configration
conf.CNIPath = agentConfig.Client.CNIPath
conf.BridgeNetworkName = agentConfig.Client.BridgeNetworkName
conf.BridgeNetworkAllocSubnet = agentConfig.Client.BridgeNetworkSubnet
return conf, nil return conf, nil
} }

View File

@ -413,7 +413,7 @@ func TestAgent_HTTPCheckPath(t *testing.T) {
t.Parallel() t.Parallel()
// Agent.agentHTTPCheck only needs a config and logger // Agent.agentHTTPCheck only needs a config and logger
a := &Agent{ a := &Agent{
config: DevConfig(), config: DevConfig(nil),
logger: testlog.HCLogger(t), logger: testlog.HCLogger(t),
} }
if err := a.config.normalizeAddrs(); err != nil { if err := a.config.normalizeAddrs(); err != nil {

File diff suppressed because one or more lines are too long

View File

@ -56,7 +56,7 @@ type Command struct {
} }
func (c *Command) readConfig() *Config { func (c *Command) readConfig() *Config {
var dev bool var dev *devModeConfig
var configPath []string var configPath []string
var servers string var servers string
var meta []string var meta []string
@ -77,7 +77,10 @@ func (c *Command) readConfig() *Config {
flags.Usage = func() { c.Ui.Error(c.Help()) } flags.Usage = func() { c.Ui.Error(c.Help()) }
// Role options // Role options
flags.BoolVar(&dev, "dev", false, "") flags.Var((flaghelper.FuncOptionalStringVar)(func(s string) (err error) {
dev, err = newDevModeConfig(s)
return err
}), "dev", "")
flags.BoolVar(&cmdConfig.Server.Enabled, "server", false, "") flags.BoolVar(&cmdConfig.Server.Enabled, "server", false, "")
flags.BoolVar(&cmdConfig.Client.Enabled, "client", false, "") flags.BoolVar(&cmdConfig.Client.Enabled, "client", false, "")
@ -204,8 +207,8 @@ func (c *Command) readConfig() *Config {
// Load the configuration // Load the configuration
var config *Config var config *Config
if dev { if dev != nil {
config = DevConfig() config = DevConfig(dev)
} else { } else {
config = DefaultConfig() config = DefaultConfig()
} }
@ -1164,7 +1167,13 @@ General Options (clients and servers):
Start the agent in development mode. This enables a pre-configured Start the agent in development mode. This enables a pre-configured
dual-role agent (client + server) which is useful for developing dual-role agent (client + server) which is useful for developing
or testing Nomad. No other configuration is required to start the or testing Nomad. No other configuration is required to start the
agent in this mode. agent in this mode, but you may pass an optional comma-separated
list of mode configurations:
-dev=connect
Start the agent in development mode, but bind to a public network
interface rather than localhost for using Consul Connect. This
mode is supported only on Linux as root.
Server Options: Server Options:

View File

@ -7,6 +7,7 @@ import (
"io" "io"
"net" "net"
"os" "os"
"os/user"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort" "sort"
@ -14,6 +15,7 @@ import (
"strings" "strings"
"time" "time"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/go-sockaddr/template" "github.com/hashicorp/go-sockaddr/template"
client "github.com/hashicorp/nomad/client/config" client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
@ -242,11 +244,45 @@ type ClientConfig struct {
// DisableRemoteExec disables remote exec targeting tasks on this client // DisableRemoteExec disables remote exec targeting tasks on this client
DisableRemoteExec bool `hcl:"disable_remote_exec"` DisableRemoteExec bool `hcl:"disable_remote_exec"`
// TemplateConfig includes configuration for template rendering
TemplateConfig *ClientTemplateConfig `hcl:"template"`
// ServerJoin contains information that is used to attempt to join servers // ServerJoin contains information that is used to attempt to join servers
ServerJoin *ServerJoin `hcl:"server_join"` ServerJoin *ServerJoin `hcl:"server_join"`
// HostVolumes contains information about the volumes an operator has made
// available to jobs running on this node.
HostVolumes []*structs.ClientHostVolumeConfig `hcl:"host_volume"`
// ExtraKeysHCL is used by hcl to surface unexpected keys // ExtraKeysHCL is used by hcl to surface unexpected keys
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
// CNIPath is the path to search for CNI plugins, multiple paths can be
// specified colon delimited
CNIPath string `hcl:"cni_path"`
// BridgeNetworkName is the name of the bridge to create when using the
// bridge network mode
BridgeNetworkName string `hcl:"bridge_network_name"`
// BridgeNetworkSubnet is the subnet to allocate IP addresses from when
// creating allocations with bridge networking mode. This range is local to
// the host
BridgeNetworkSubnet string `hcl:"bridge_network_subnet"`
}
// ClientTemplateConfig is configuration on the client specific to template
// rendering
type ClientTemplateConfig struct {
// FunctionBlacklist disables functions in consul-template that
// are unsafe because they expose information from the client host.
FunctionBlacklist []string `hcl:"function_blacklist"`
// DisableSandbox allows templates to access arbitrary files on the
// client host. By default templates can access files only within
// the task directory.
DisableSandbox bool `hcl:"disable_file_sandbox"`
} }
// ACLConfig is configuration specific to the ACL system // ACLConfig is configuration specific to the ACL system
@ -315,6 +351,10 @@ type ServerConfig struct {
// can be used to filter by age. // can be used to filter by age.
NodeGCThreshold string `hcl:"node_gc_threshold"` NodeGCThreshold string `hcl:"node_gc_threshold"`
// JobGCInterval controls how often we dispatch a job to GC jobs that are
// available for garbage collection.
JobGCInterval string `hcl:"job_gc_interval"`
// JobGCThreshold controls how "old" a job must be to be collected by GC. // JobGCThreshold controls how "old" a job must be to be collected by GC.
// Age is not the only requirement for a Job to be GCed but the threshold // Age is not the only requirement for a Job to be GCed but the threshold
// can be used to filter by age. // can be used to filter by age.
@ -630,22 +670,101 @@ func (r *Resources) CanParseReserved() error {
return err return err
} }
// devModeConfig holds the config for the -dev flag
type devModeConfig struct {
// mode flags are set at the command line via -dev=<mode>
defaultMode bool
connectMode bool
bindAddr string
iface string
}
// newDevModeConfig parses the optional string value of the -dev flag
func newDevModeConfig(s string) (*devModeConfig, error) {
if s == "" {
return nil, nil // no -dev flag
}
mode := &devModeConfig{}
modeFlags := strings.Split(s, ",")
for _, modeFlag := range modeFlags {
switch modeFlag {
case "true": // -dev flag with no params
mode.defaultMode = true
case "connect":
if runtime.GOOS != "linux" {
// strictly speaking -dev=connect only binds to the
// non-localhost interface, but given its purpose
// is to support a feature with network namespaces
// we'll return an error here rather than let the agent
// come up and fail unexpectedly to run jobs
return nil, fmt.Errorf("-dev=connect is only supported on linux.")
}
u, err := user.Current()
if err != nil {
return nil, fmt.Errorf(
"-dev=connect uses network namespaces and is only supported for root: %v", err)
}
if u.Uid != "0" {
return nil, fmt.Errorf(
"-dev=connect uses network namespaces and is only supported for root.")
}
mode.connectMode = true
default:
return nil, fmt.Errorf("invalid -dev flag: %q", s)
}
}
err := mode.networkConfig()
if err != nil {
return nil, err
}
return mode, nil
}
func (mode *devModeConfig) networkConfig() error {
if runtime.GOOS == "darwin" {
mode.bindAddr = "127.0.0.1"
mode.iface = "lo0"
return nil
}
if mode != nil && mode.connectMode {
// if we hit either of the errors here we're in a weird situation
// where syscalls to get the list of network interfaces are failing.
// rather than throwing errors, we'll fall back to the default.
ifAddrs, err := sockaddr.GetDefaultInterfaces()
errMsg := "-dev=connect uses network namespaces: %v"
if err != nil {
return fmt.Errorf(errMsg, err)
}
if len(ifAddrs) < 1 {
return fmt.Errorf(errMsg, "could not find public network inteface")
}
iface := ifAddrs[0].Name
mode.iface = iface
mode.bindAddr = "0.0.0.0" // allows CLI to "just work"
return nil
}
mode.bindAddr = "127.0.0.1"
mode.iface = "lo"
return nil
}
// DevConfig is a Config that is used for dev mode of Nomad. // DevConfig is a Config that is used for dev mode of Nomad.
func DevConfig() *Config { func DevConfig(mode *devModeConfig) *Config {
if mode == nil {
mode = &devModeConfig{defaultMode: true}
mode.networkConfig()
}
conf := DefaultConfig() conf := DefaultConfig()
conf.BindAddr = "127.0.0.1" conf.BindAddr = mode.bindAddr
conf.LogLevel = "DEBUG" conf.LogLevel = "DEBUG"
conf.Client.Enabled = true conf.Client.Enabled = true
conf.Server.Enabled = true conf.Server.Enabled = true
conf.DevMode = true conf.DevMode = mode != nil
conf.EnableDebug = true conf.EnableDebug = true
conf.DisableAnonymousSignature = true conf.DisableAnonymousSignature = true
conf.Consul.AutoAdvertise = helper.BoolToPtr(true) conf.Consul.AutoAdvertise = helper.BoolToPtr(true)
if runtime.GOOS == "darwin" { conf.Client.NetworkInterface = mode.iface
conf.Client.NetworkInterface = "lo0"
} else if runtime.GOOS == "linux" {
conf.Client.NetworkInterface = "lo"
}
conf.Client.Options = map[string]string{ conf.Client.Options = map[string]string{
"driver.raw_exec.enable": "true", "driver.raw_exec.enable": "true",
"driver.docker.volumes": "true", "driver.docker.volumes": "true",
@ -654,6 +773,10 @@ func DevConfig() *Config {
conf.Client.GCDiskUsageThreshold = 99 conf.Client.GCDiskUsageThreshold = 99
conf.Client.GCInodeUsageThreshold = 99 conf.Client.GCInodeUsageThreshold = 99
conf.Client.GCMaxAllocs = 50 conf.Client.GCMaxAllocs = 50
conf.Client.TemplateConfig = &ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
}
conf.Telemetry.PrometheusMetrics = true conf.Telemetry.PrometheusMetrics = true
conf.Telemetry.PublishAllocationMetrics = true conf.Telemetry.PublishAllocationMetrics = true
conf.Telemetry.PublishNodeMetrics = true conf.Telemetry.PublishNodeMetrics = true
@ -695,6 +818,10 @@ func DefaultConfig() *Config {
RetryInterval: 30 * time.Second, RetryInterval: 30 * time.Second,
RetryMaxAttempts: 0, RetryMaxAttempts: 0,
}, },
TemplateConfig: &ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
},
}, },
Server: &ServerConfig{ Server: &ServerConfig{
Enabled: false, Enabled: false,
@ -1133,6 +1260,9 @@ func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
if b.NodeGCThreshold != "" { if b.NodeGCThreshold != "" {
result.NodeGCThreshold = b.NodeGCThreshold result.NodeGCThreshold = b.NodeGCThreshold
} }
if b.JobGCInterval != "" {
result.JobGCInterval = b.JobGCInterval
}
if b.JobGCThreshold != "" { if b.JobGCThreshold != "" {
result.JobGCThreshold = b.JobGCThreshold result.JobGCThreshold = b.JobGCThreshold
} }
@ -1271,6 +1401,10 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result.DisableRemoteExec = b.DisableRemoteExec result.DisableRemoteExec = b.DisableRemoteExec
} }
if b.TemplateConfig != nil {
result.TemplateConfig = b.TemplateConfig
}
// Add the servers // Add the servers
result.Servers = append(result.Servers, b.Servers...) result.Servers = append(result.Servers, b.Servers...)
@ -1302,6 +1436,12 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin) result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin)
} }
if len(a.HostVolumes) == 0 && len(b.HostVolumes) != 0 {
result.HostVolumes = structs.CopySliceClientHostVolumeConfig(b.HostVolumes)
} else if len(b.HostVolumes) != 0 {
result.HostVolumes = structs.HostVolumeSliceMerge(a.HostVolumes, b.HostVolumes)
}
return &result return &result
} }

View File

@ -138,6 +138,12 @@ func extraKeys(c *Config) error {
// stats is an unused key, continue to silently ignore it // stats is an unused key, continue to silently ignore it
removeEqualFold(&c.Client.ExtraKeysHCL, "stats") removeEqualFold(&c.Client.ExtraKeysHCL, "stats")
// Remove HostVolume extra keys
for _, hv := range c.Client.HostVolumes {
removeEqualFold(&c.Client.ExtraKeysHCL, hv.Name)
removeEqualFold(&c.Client.ExtraKeysHCL, "host_volume")
}
for _, k := range []string{"enabled_schedulers", "start_join", "retry_join", "server_join"} { for _, k := range []string{"enabled_schedulers", "start_join", "retry_join", "server_join"} {
removeEqualFold(&c.ExtraKeysHCL, k) removeEqualFold(&c.ExtraKeysHCL, k)
removeEqualFold(&c.ExtraKeysHCL, "server") removeEqualFold(&c.ExtraKeysHCL, "server")

View File

@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -81,6 +82,9 @@ var basicConfig = &Config{
GCMaxAllocs: 50, GCMaxAllocs: 50,
NoHostUUID: helper.BoolToPtr(false), NoHostUUID: helper.BoolToPtr(false),
DisableRemoteExec: true, DisableRemoteExec: true,
HostVolumes: []*structs.ClientHostVolumeConfig{
{Name: "tmp", Path: "/tmp"},
},
}, },
Server: &ServerConfig{ Server: &ServerConfig{
Enabled: true, Enabled: true,
@ -93,6 +97,7 @@ var basicConfig = &Config{
EnabledSchedulers: []string{"test"}, EnabledSchedulers: []string{"test"},
NodeGCThreshold: "12h", NodeGCThreshold: "12h",
EvalGCThreshold: "12h", EvalGCThreshold: "12h",
JobGCInterval: "3m",
JobGCThreshold: "12h", JobGCThreshold: "12h",
DeploymentGCThreshold: "12h", DeploymentGCThreshold: "12h",
HeartbeatGrace: 30 * time.Second, HeartbeatGrace: 30 * time.Second,

View File

@ -7,10 +7,13 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime"
"strings"
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/lib/freeport" "github.com/hashicorp/consul/lib/freeport"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/nomad/structs/config"
@ -94,6 +97,10 @@ func TestConfig_Merge(t *testing.T) {
MaxKillTimeout: "20s", MaxKillTimeout: "20s",
ClientMaxPort: 19996, ClientMaxPort: 19996,
DisableRemoteExec: false, DisableRemoteExec: false,
TemplateConfig: &ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
},
Reserved: &Resources{ Reserved: &Resources{
CPU: 10, CPU: 10,
MemoryMB: 10, MemoryMB: 10,
@ -253,6 +260,10 @@ func TestConfig_Merge(t *testing.T) {
MemoryMB: 105, MemoryMB: 105,
MaxKillTimeout: "50s", MaxKillTimeout: "50s",
DisableRemoteExec: false, DisableRemoteExec: false,
TemplateConfig: &ClientTemplateConfig{
FunctionBlacklist: []string{"plugin"},
DisableSandbox: false,
},
Reserved: &Resources{ Reserved: &Resources{
CPU: 15, CPU: 15,
MemoryMB: 15, MemoryMB: 15,
@ -612,6 +623,61 @@ func TestConfig_Listener(t *testing.T) {
} }
} }
func TestConfig_DevModeFlag(t *testing.T) {
cases := []struct {
flag string
expected *devModeConfig
expectedErr string
}{}
if runtime.GOOS != "linux" {
cases = []struct {
flag string
expected *devModeConfig
expectedErr string
}{
{"", nil, ""},
{"true", &devModeConfig{defaultMode: true, connectMode: false}, ""},
{"true,connect", nil, "-dev=connect is only supported on linux"},
{"connect", nil, "-dev=connect is only supported on linux"},
{"xxx", nil, "invalid -dev flag"},
}
}
if runtime.GOOS == "linux" {
testutil.RequireRoot(t)
cases = []struct {
flag string
expected *devModeConfig
expectedErr string
}{
{"", nil, ""},
{"true", &devModeConfig{defaultMode: true, connectMode: false}, ""},
{"true,connect", &devModeConfig{defaultMode: true, connectMode: true}, ""},
{"connect", &devModeConfig{defaultMode: false, connectMode: true}, ""},
{"xxx", nil, "invalid -dev flag"},
}
}
for _, c := range cases {
t.Run(c.flag, func(t *testing.T) {
mode, err := newDevModeConfig(c.flag)
if err != nil && c.expectedErr == "" {
t.Fatalf("unexpected error: %v", err)
}
if err != nil && !strings.Contains(err.Error(), c.expectedErr) {
t.Fatalf("expected %s; got %v", c.expectedErr, err)
}
if mode == nil && c.expected != nil {
t.Fatalf("expected %+v but got nil", c.expected)
}
if mode != nil {
if c.expected.defaultMode != mode.defaultMode ||
c.expected.connectMode != mode.connectMode {
t.Fatalf("expected %+v, got %+v", c.expected, mode)
}
}
})
}
}
// TestConfig_normalizeAddrs_DevMode asserts that normalizeAddrs allows // TestConfig_normalizeAddrs_DevMode asserts that normalizeAddrs allows
// advertising localhost in dev mode. // advertising localhost in dev mode.
func TestConfig_normalizeAddrs_DevMode(t *testing.T) { func TestConfig_normalizeAddrs_DevMode(t *testing.T) {

View File

@ -694,7 +694,7 @@ func (c *ServiceClient) serviceRegs(ops *operations, service *structs.Service, t
*ServiceRegistration, error) { *ServiceRegistration, error) {
// Get the services ID // Get the services ID
id := makeTaskServiceID(task.AllocID, task.Name, service, task.Canary) id := MakeTaskServiceID(task.AllocID, task.Name, service, task.Canary)
sreg := &ServiceRegistration{ sreg := &ServiceRegistration{
serviceID: id, serviceID: id,
checkIDs: make(map[string]struct{}, len(service.Checks)), checkIDs: make(map[string]struct{}, len(service.Checks)),
@ -722,6 +722,20 @@ func (c *ServiceClient) serviceRegs(ops *operations, service *structs.Service, t
copy(tags, service.Tags) copy(tags, service.Tags)
} }
// newConnect returns (nil, nil) if there's no Connect-enabled service.
connect, err := newConnect(service.Name, service.Connect, task.Networks)
if err != nil {
return nil, fmt.Errorf("invalid Consul Connect configuration for service %q: %v", service.Name, err)
}
meta := make(map[string]string, len(service.Meta))
for k, v := range service.Meta {
meta[k] = v
}
// This enables the consul UI to show that Nomad registered this service
meta["external-source"] = "nomad"
// Build the Consul Service registration request // Build the Consul Service registration request
serviceReg := &api.AgentServiceRegistration{ serviceReg := &api.AgentServiceRegistration{
ID: id, ID: id,
@ -729,10 +743,8 @@ func (c *ServiceClient) serviceRegs(ops *operations, service *structs.Service, t
Tags: tags, Tags: tags,
Address: ip, Address: ip,
Port: port, Port: port,
// This enables the consul UI to show that Nomad registered this service Meta: meta,
Meta: map[string]string{ Connect: connect, // will be nil if no Connect stanza
"external-source": "nomad",
},
} }
ops.regServices = append(ops.regServices, serviceReg) ops.regServices = append(ops.regServices, serviceReg)
@ -807,6 +819,117 @@ func (c *ServiceClient) checkRegs(ops *operations, serviceID string, service *st
return checkIDs, nil return checkIDs, nil
} }
//TODO(schmichael) remove
type noopRestarter struct{}
func (noopRestarter) Restart(context.Context, *structs.TaskEvent, bool) error { return nil }
// makeAllocTaskServices creates a TaskServices struct for a group service.
//
//TODO(schmichael) rename TaskServices and refactor this into a New method
func makeAllocTaskServices(alloc *structs.Allocation, tg *structs.TaskGroup) (*TaskServices, error) {
if n := len(alloc.AllocatedResources.Shared.Networks); n == 0 {
return nil, fmt.Errorf("unable to register a group service without a group network")
}
//TODO(schmichael) only support one network for now
net := alloc.AllocatedResources.Shared.Networks[0]
ts := &TaskServices{
AllocID: alloc.ID,
Name: "group-" + alloc.TaskGroup,
Services: tg.Services,
Networks: alloc.AllocatedResources.Shared.Networks,
//TODO(schmichael) there's probably a better way than hacking driver network
DriverNetwork: &drivers.DriverNetwork{
AutoAdvertise: true,
IP: net.IP,
// Copy PortLabels from group network
PortMap: net.PortLabels(),
},
// unsupported for group services
Restarter: noopRestarter{},
DriverExec: nil,
}
if alloc.DeploymentStatus != nil {
ts.Canary = alloc.DeploymentStatus.Canary
}
return ts, nil
}
// RegisterGroup services with Consul. Adds all task group-level service
// entries and checks to Consul.
func (c *ServiceClient) RegisterGroup(alloc *structs.Allocation) error {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if tg == nil {
return fmt.Errorf("task group %q not in allocation", alloc.TaskGroup)
}
if len(tg.Services) == 0 {
// noop
return nil
}
ts, err := makeAllocTaskServices(alloc, tg)
if err != nil {
return err
}
return c.RegisterTask(ts)
}
// UpdateGroup services with Consul. Updates all task group-level service
// entries and checks to Consul.
func (c *ServiceClient) UpdateGroup(oldAlloc, newAlloc *structs.Allocation) error {
oldTG := oldAlloc.Job.LookupTaskGroup(oldAlloc.TaskGroup)
if oldTG == nil {
return fmt.Errorf("task group %q not in old allocation", oldAlloc.TaskGroup)
}
oldServices, err := makeAllocTaskServices(oldAlloc, oldTG)
if err != nil {
return err
}
newTG := newAlloc.Job.LookupTaskGroup(newAlloc.TaskGroup)
if newTG == nil {
return fmt.Errorf("task group %q not in new allocation", newAlloc.TaskGroup)
}
newServices, err := makeAllocTaskServices(newAlloc, newTG)
if err != nil {
return err
}
return c.UpdateTask(oldServices, newServices)
}
// RemoveGroup services with Consul. Removes all task group-level service
// entries and checks from Consul.
func (c *ServiceClient) RemoveGroup(alloc *structs.Allocation) error {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if tg == nil {
return fmt.Errorf("task group %q not in allocation", alloc.TaskGroup)
}
if len(tg.Services) == 0 {
// noop
return nil
}
ts, err := makeAllocTaskServices(alloc, tg)
if err != nil {
return err
}
c.RemoveTask(ts)
return nil
}
// RegisterTask with Consul. Adds all service entries and checks to Consul. If // RegisterTask with Consul. Adds all service entries and checks to Consul. If
// exec is nil and a script check exists an error is returned. // exec is nil and a script check exists an error is returned.
// //
@ -841,7 +964,7 @@ func (c *ServiceClient) RegisterTask(task *TaskServices) error {
// Start watching checks. Done after service registrations are built // Start watching checks. Done after service registrations are built
// since an error building them could leak watches. // since an error building them could leak watches.
for _, service := range task.Services { for _, service := range task.Services {
serviceID := makeTaskServiceID(task.AllocID, task.Name, service, task.Canary) serviceID := MakeTaskServiceID(task.AllocID, task.Name, service, task.Canary)
for _, check := range service.Checks { for _, check := range service.Checks {
if check.TriggersRestarts() { if check.TriggersRestarts() {
checkID := makeCheckID(serviceID, check) checkID := makeCheckID(serviceID, check)
@ -864,11 +987,11 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
existingIDs := make(map[string]*structs.Service, len(old.Services)) existingIDs := make(map[string]*structs.Service, len(old.Services))
for _, s := range old.Services { for _, s := range old.Services {
existingIDs[makeTaskServiceID(old.AllocID, old.Name, s, old.Canary)] = s existingIDs[MakeTaskServiceID(old.AllocID, old.Name, s, old.Canary)] = s
} }
newIDs := make(map[string]*structs.Service, len(newTask.Services)) newIDs := make(map[string]*structs.Service, len(newTask.Services))
for _, s := range newTask.Services { for _, s := range newTask.Services {
newIDs[makeTaskServiceID(newTask.AllocID, newTask.Name, s, newTask.Canary)] = s newIDs[MakeTaskServiceID(newTask.AllocID, newTask.Name, s, newTask.Canary)] = s
} }
// Loop over existing Service IDs to see if they have been removed // Loop over existing Service IDs to see if they have been removed
@ -965,7 +1088,7 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
// Start watching checks. Done after service registrations are built // Start watching checks. Done after service registrations are built
// since an error building them could leak watches. // since an error building them could leak watches.
for _, service := range newIDs { for _, service := range newIDs {
serviceID := makeTaskServiceID(newTask.AllocID, newTask.Name, service, newTask.Canary) serviceID := MakeTaskServiceID(newTask.AllocID, newTask.Name, service, newTask.Canary)
for _, check := range service.Checks { for _, check := range service.Checks {
if check.TriggersRestarts() { if check.TriggersRestarts() {
checkID := makeCheckID(serviceID, check) checkID := makeCheckID(serviceID, check)
@ -983,7 +1106,7 @@ func (c *ServiceClient) RemoveTask(task *TaskServices) {
ops := operations{} ops := operations{}
for _, service := range task.Services { for _, service := range task.Services {
id := makeTaskServiceID(task.AllocID, task.Name, service, task.Canary) id := MakeTaskServiceID(task.AllocID, task.Name, service, task.Canary)
ops.deregServices = append(ops.deregServices, id) ops.deregServices = append(ops.deregServices, id)
for _, check := range service.Checks { for _, check := range service.Checks {
@ -1144,11 +1267,11 @@ func makeAgentServiceID(role string, service *structs.Service) string {
return fmt.Sprintf("%s-%s-%s", nomadServicePrefix, role, service.Hash(role, "", false)) return fmt.Sprintf("%s-%s-%s", nomadServicePrefix, role, service.Hash(role, "", false))
} }
// makeTaskServiceID creates a unique ID for identifying a task service in // MakeTaskServiceID creates a unique ID for identifying a task service in
// Consul. // Consul.
// //
// Example Service ID: _nomad-task-b4e61df9-b095-d64e-f241-23860da1375f-redis-http-http // Example Service ID: _nomad-task-b4e61df9-b095-d64e-f241-23860da1375f-redis-http-http
func makeTaskServiceID(allocID, taskName string, service *structs.Service, canary bool) string { func MakeTaskServiceID(allocID, taskName string, service *structs.Service, canary bool) string {
return fmt.Sprintf("%s%s-%s-%s-%s", nomadTaskPrefix, allocID, taskName, service.Name, service.PortLabel) return fmt.Sprintf("%s%s-%s-%s-%s", nomadTaskPrefix, allocID, taskName, service.Name, service.PortLabel)
} }
@ -1314,3 +1437,81 @@ func getAddress(addrMode, portLabel string, networks structs.Networks, driverNet
return "", 0, fmt.Errorf("invalid address mode %q", addrMode) return "", 0, fmt.Errorf("invalid address mode %q", addrMode)
} }
} }
// newConnect creates a new Consul AgentServiceConnect struct based on a Nomad
// Connect struct. If the nomad Connect struct is nil, nil will be returned to
// disable Connect for this service.
func newConnect(serviceName string, nc *structs.ConsulConnect, networks structs.Networks) (*api.AgentServiceConnect, error) {
if nc == nil {
// No Connect stanza, returning nil is fine
return nil, nil
}
cc := &api.AgentServiceConnect{
Native: nc.Native,
}
if nc.SidecarService == nil {
return cc, nil
}
net, port, err := getConnectPort(serviceName, networks)
if err != nil {
return nil, err
}
// Bind to netns IP(s):port
proxyConfig := map[string]interface{}{}
if nc.SidecarService.Proxy != nil && nc.SidecarService.Proxy.Config != nil {
proxyConfig = nc.SidecarService.Proxy.Config
}
proxyConfig["bind_address"] = "0.0.0.0"
proxyConfig["bind_port"] = port.To
// Advertise host IP:port
cc.SidecarService = &api.AgentServiceRegistration{
Address: net.IP,
Port: port.Value,
// Automatically configure the proxy to bind to all addresses
// within the netns.
Proxy: &api.AgentServiceConnectProxyConfig{
Config: proxyConfig,
},
}
// If no further proxy settings were explicitly configured, exit early
if nc.SidecarService.Proxy == nil {
return cc, nil
}
numUpstreams := len(nc.SidecarService.Proxy.Upstreams)
if numUpstreams == 0 {
return cc, nil
}
upstreams := make([]api.Upstream, numUpstreams)
for i, nu := range nc.SidecarService.Proxy.Upstreams {
upstreams[i].DestinationName = nu.DestinationName
upstreams[i].LocalBindPort = nu.LocalBindPort
}
cc.SidecarService.Proxy.Upstreams = upstreams
return cc, nil
}
// getConnectPort returns the network and port for the Connect proxy sidecar
// defined for this service. An error is returned if the network and port
// cannot be determined.
func getConnectPort(serviceName string, networks structs.Networks) (*structs.NetworkResource, structs.Port, error) {
if n := len(networks); n != 1 {
return nil, structs.Port{}, fmt.Errorf("Connect only supported with exactly 1 network (found %d)", n)
}
port, ok := networks[0].PortForService(serviceName)
if !ok {
return nil, structs.Port{}, fmt.Errorf("No Connect port defined for service %q", serviceName)
}
return networks[0], port, nil
}

View File

@ -0,0 +1,99 @@
package consul
import (
"io/ioutil"
"testing"
"time"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
func TestConsul_Connect(t *testing.T) {
// Create an embedded Consul server
testconsul, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testconsul.HTTPAddr
consulClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
serviceClient := NewServiceClient(consulClient.Agent(), testlog.HCLogger(t), true)
go serviceClient.Run()
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
{
Mode: "bridge",
IP: "10.0.0.1",
DynamicPorts: []structs.Port{
{
Label: "connect-proxy-testconnect",
Value: 9999,
To: 9998,
},
},
},
}
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
tg.Services = []*structs.Service{
{
Name: "testconnect",
PortLabel: "9999",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
require.NoError(t, serviceClient.RegisterGroup(alloc))
require.Eventually(t, func() bool {
services, err := consulClient.Agent().Services()
require.NoError(t, err)
return len(services) == 2
}, 3*time.Second, 100*time.Millisecond)
services, err := consulClient.Agent().Services()
require.NoError(t, err)
require.Len(t, services, 2)
serviceID := MakeTaskServiceID(alloc.ID, "group-"+alloc.TaskGroup, tg.Services[0], false)
connectID := serviceID + "-sidecar-proxy"
require.Contains(t, services, serviceID)
agentService := services[serviceID]
require.Equal(t, agentService.Service, "testconnect")
require.Equal(t, agentService.Address, "10.0.0.1")
require.Equal(t, agentService.Port, 9999)
require.Nil(t, agentService.Connect)
require.Nil(t, agentService.Proxy)
require.Contains(t, services, connectID)
connectService := services[connectID]
require.Equal(t, connectService.Service, "testconnect-sidecar-proxy")
require.Equal(t, connectService.Address, "10.0.0.1")
require.Equal(t, connectService.Port, 9999)
require.Nil(t, connectService.Connect)
require.Equal(t, connectService.Proxy.DestinationServiceName, "testconnect")
require.Equal(t, connectService.Proxy.DestinationServiceID, serviceID)
require.Equal(t, connectService.Proxy.LocalServiceAddress, "127.0.0.1")
require.Equal(t, connectService.Proxy.LocalServicePort, 9999)
require.Equal(t, connectService.Proxy.Config, map[string]interface{}{
"bind_address": "0.0.0.0",
"bind_port": float64(9998),
})
}

View File

@ -1710,7 +1710,7 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
}, },
}, },
} }
remainingTaskServiceID := makeTaskServiceID(remainingTask.AllocID, remainingTaskServiceID := MakeTaskServiceID(remainingTask.AllocID,
remainingTask.Name, remainingTask.Services[0], false) remainingTask.Name, remainingTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(remainingTask)) require.NoError(ctx.ServiceClient.RegisterTask(remainingTask))
@ -1733,7 +1733,7 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
}, },
}, },
} }
explicitlyRemovedTaskServiceID := makeTaskServiceID(explicitlyRemovedTask.AllocID, explicitlyRemovedTaskServiceID := MakeTaskServiceID(explicitlyRemovedTask.AllocID,
explicitlyRemovedTask.Name, explicitlyRemovedTask.Services[0], false) explicitlyRemovedTask.Name, explicitlyRemovedTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(explicitlyRemovedTask)) require.NoError(ctx.ServiceClient.RegisterTask(explicitlyRemovedTask))
@ -1758,7 +1758,7 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
}, },
}, },
} }
outofbandTaskServiceID := makeTaskServiceID(outofbandTask.AllocID, outofbandTaskServiceID := MakeTaskServiceID(outofbandTask.AllocID,
outofbandTask.Name, outofbandTask.Services[0], false) outofbandTask.Name, outofbandTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(outofbandTask)) require.NoError(ctx.ServiceClient.RegisterTask(outofbandTask))
@ -1819,7 +1819,7 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
}, },
}, },
} }
remainingTaskServiceID := makeTaskServiceID(remainingTask.AllocID, remainingTaskServiceID := MakeTaskServiceID(remainingTask.AllocID,
remainingTask.Name, remainingTask.Services[0], false) remainingTask.Name, remainingTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(remainingTask)) require.NoError(ctx.ServiceClient.RegisterTask(remainingTask))
@ -1842,7 +1842,7 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
}, },
}, },
} }
explicitlyRemovedTaskServiceID := makeTaskServiceID(explicitlyRemovedTask.AllocID, explicitlyRemovedTaskServiceID := MakeTaskServiceID(explicitlyRemovedTask.AllocID,
explicitlyRemovedTask.Name, explicitlyRemovedTask.Services[0], false) explicitlyRemovedTask.Name, explicitlyRemovedTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(explicitlyRemovedTask)) require.NoError(ctx.ServiceClient.RegisterTask(explicitlyRemovedTask))
@ -1867,7 +1867,7 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
}, },
}, },
} }
outofbandTaskServiceID := makeTaskServiceID(outofbandTask.AllocID, outofbandTaskServiceID := MakeTaskServiceID(outofbandTask.AllocID,
outofbandTask.Name, outofbandTask.Services[0], false) outofbandTask.Name, outofbandTask.Services[0], false)
require.NoError(ctx.ServiceClient.RegisterTask(outofbandTask)) require.NoError(ctx.ServiceClient.RegisterTask(outofbandTask))

View File

@ -194,11 +194,13 @@ func (s *HTTPServer) FileCatRequest(resp http.ResponseWriter, req *http.Request)
// Stream streams the content of a file blocking on EOF. // Stream streams the content of a file blocking on EOF.
// The parameters are: // The parameters are:
// * path: path to file to stream. // * path: path to file to stream.
// * follow: A boolean of whether to follow the file, defaults to true.
// * offset: The offset to start streaming data at, defaults to zero. // * offset: The offset to start streaming data at, defaults to zero.
// * origin: Either "start" or "end" and defines from where the offset is // * origin: Either "start" or "end" and defines from where the offset is
// applied. Defaults to "start". // applied. Defaults to "start".
func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
var allocID, path string var allocID, path string
var err error
q := req.URL.Query() q := req.URL.Query()
@ -210,10 +212,16 @@ func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interf
return nil, fileNameNotPresentErr return nil, fileNameNotPresentErr
} }
follow := true
if followStr := q.Get("follow"); followStr != "" {
if follow, err = strconv.ParseBool(followStr); err != nil {
return nil, fmt.Errorf("failed to parse follow field to boolean: %v", err)
}
}
var offset int64 var offset int64
offsetString := q.Get("offset") offsetString := q.Get("offset")
if offsetString != "" { if offsetString != "" {
var err error
if offset, err = strconv.ParseInt(offsetString, 10, 64); err != nil { if offset, err = strconv.ParseInt(offsetString, 10, 64); err != nil {
return nil, fmt.Errorf("error parsing offset: %v", err) return nil, fmt.Errorf("error parsing offset: %v", err)
} }
@ -234,7 +242,7 @@ func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interf
Path: path, Path: path,
Origin: origin, Origin: origin,
Offset: offset, Offset: offset,
Follow: true, Follow: follow,
} }
s.parse(resp, req, &fsReq.QueryOptions.Region, &fsReq.QueryOptions) s.parse(resp, req, &fsReq.QueryOptions.Region, &fsReq.QueryOptions)
@ -265,13 +273,13 @@ func (s *HTTPServer) Logs(resp http.ResponseWriter, req *http.Request) (interfac
if followStr := q.Get("follow"); followStr != "" { if followStr := q.Get("follow"); followStr != "" {
if follow, err = strconv.ParseBool(followStr); err != nil { if follow, err = strconv.ParseBool(followStr); err != nil {
return nil, fmt.Errorf("Failed to parse follow field to boolean: %v", err) return nil, fmt.Errorf("failed to parse follow field to boolean: %v", err)
} }
} }
if plainStr := q.Get("plain"); plainStr != "" { if plainStr := q.Get("plain"); plainStr != "" {
if plain, err = strconv.ParseBool(plainStr); err != nil { if plain, err = strconv.ParseBool(plainStr); err != nil {
return nil, fmt.Errorf("Failed to parse plain field to boolean: %v", err) return nil, fmt.Errorf("failed to parse plain field to boolean: %v", err)
} }
} }

View File

@ -341,7 +341,54 @@ func TestHTTP_FS_Cat(t *testing.T) {
}) })
} }
func TestHTTP_FS_Stream(t *testing.T) { func TestHTTP_FS_Stream_NoFollow(t *testing.T) {
t.Parallel()
require := require.New(t)
httpTest(t, nil, func(s *TestAgent) {
a := mockFSAlloc(s.client.NodeID(), nil)
addAllocToClient(s, a, terminalClientAlloc)
offset := 4
expectation := base64.StdEncoding.EncodeToString(
[]byte(defaultLoggerMockDriverStdout[len(defaultLoggerMockDriverStdout)-offset:]))
path := fmt.Sprintf("/v1/client/fs/stream/%s?path=alloc/logs/web.stdout.0&offset=%d&origin=end&follow=false",
a.ID, offset)
p, _ := io.Pipe()
req, err := http.NewRequest("GET", path, p)
require.Nil(err)
respW := testutil.NewResponseRecorder()
doneCh := make(chan struct{})
go func() {
_, err = s.Server.Stream(respW, req)
require.Nil(err)
close(doneCh)
}()
out := ""
testutil.WaitForResult(func() (bool, error) {
output, err := ioutil.ReadAll(respW)
if err != nil {
return false, err
}
out += string(output)
return strings.Contains(out, expectation), fmt.Errorf("%q doesn't contain %q", out, expectation)
}, func(err error) {
t.Fatal(err)
})
select {
case <-doneCh:
case <-time.After(1 * time.Second):
t.Fatal("should close but did not")
}
p.Close()
})
}
func TestHTTP_FS_Stream_Follow(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t) require := require.New(t)
httpTest(t, nil, func(s *TestAgent) { httpTest(t, nil, func(s *TestAgent) {

View File

@ -141,12 +141,16 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
return nil, CodedError(400, "Job ID does not match") return nil, CodedError(400, "Job ID does not match")
} }
// Http region takes precedence over hcl region // Region in http request query param takes precedence over region in job hcl config
if args.WriteRequest.Region != "" { if args.WriteRequest.Region != "" {
args.Job.Region = helper.StringToPtr(args.WriteRequest.Region) args.Job.Region = helper.StringToPtr(args.WriteRequest.Region)
} }
// If 'global' region is specified or if no region is given,
// default to region of the node you're submitting to
if args.Job.Region == nil || *args.Job.Region == "" || *args.Job.Region == api.GlobalRegion {
args.Job.Region = &s.agent.config.Region
}
// If no region given, region is canonicalized to 'global'
sJob := ApiJobToStructJob(args.Job) sJob := ApiJobToStructJob(args.Job)
planReq := structs.JobPlanRequest{ planReq := structs.JobPlanRequest{
@ -157,6 +161,8 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
Region: sJob.Region, Region: sJob.Region,
}, },
} }
// parseWriteRequest overrides Namespace, Region and AuthToken
// based on values from the original http request
s.parseWriteRequest(req, &planReq.WriteRequest) s.parseWriteRequest(req, &planReq.WriteRequest)
planReq.Namespace = sJob.Namespace planReq.Namespace = sJob.Namespace
@ -183,6 +189,7 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ
} }
job := ApiJobToStructJob(validateRequest.Job) job := ApiJobToStructJob(validateRequest.Job)
args := structs.JobValidateRequest{ args := structs.JobValidateRequest{
Job: job, Job: job,
WriteRequest: structs.WriteRequest{ WriteRequest: structs.WriteRequest{
@ -384,12 +391,16 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
return nil, CodedError(400, "Job ID does not match name") return nil, CodedError(400, "Job ID does not match name")
} }
// Http region takes precedence over hcl region // Region in http request query param takes precedence over region in job hcl config
if args.WriteRequest.Region != "" { if args.WriteRequest.Region != "" {
args.Job.Region = helper.StringToPtr(args.WriteRequest.Region) args.Job.Region = helper.StringToPtr(args.WriteRequest.Region)
} }
// If 'global' region is specified or if no region is given,
// default to region of the node you're submitting to
if args.Job.Region == nil || *args.Job.Region == "" || *args.Job.Region == api.GlobalRegion {
args.Job.Region = &s.agent.config.Region
}
// If no region given, region is canonicalized to 'global'
sJob := ApiJobToStructJob(args.Job) sJob := ApiJobToStructJob(args.Job)
regReq := structs.JobRegisterRequest{ regReq := structs.JobRegisterRequest{
@ -402,6 +413,8 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
AuthToken: args.WriteRequest.SecretID, AuthToken: args.WriteRequest.SecretID,
}, },
} }
// parseWriteRequest overrides Namespace, Region and AuthToken
// based on values from the original http request
s.parseWriteRequest(req, &regReq.WriteRequest) s.parseWriteRequest(req, &regReq.WriteRequest)
regReq.Namespace = sJob.Namespace regReq.Namespace = sJob.Namespace
@ -685,6 +698,8 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
tg.Meta = taskGroup.Meta tg.Meta = taskGroup.Meta
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints) tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities) tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
tg.Services = ApiServicesToStructs(taskGroup.Services)
tg.RestartPolicy = &structs.RestartPolicy{ tg.RestartPolicy = &structs.RestartPolicy{
Attempts: *taskGroup.RestartPolicy.Attempts, Attempts: *taskGroup.RestartPolicy.Attempts,
@ -726,6 +741,25 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
} }
} }
if l := len(taskGroup.Volumes); l != 0 {
tg.Volumes = make(map[string]*structs.VolumeRequest, l)
for k, v := range taskGroup.Volumes {
if v.Type != structs.VolumeTypeHost {
// Ignore non-host volumes in this iteration currently.
continue
}
vol := &structs.VolumeRequest{
Name: v.Name,
Type: v.Type,
ReadOnly: v.ReadOnly,
Config: v.Config,
}
tg.Volumes[k] = vol
}
}
if taskGroup.Update != nil { if taskGroup.Update != nil {
tg.Update = &structs.UpdateStrategy{ tg.Update = &structs.UpdateStrategy{
Stagger: *taskGroup.Update.Stagger, Stagger: *taskGroup.Update.Stagger,
@ -770,9 +804,21 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
structsTask.KillTimeout = *apiTask.KillTimeout structsTask.KillTimeout = *apiTask.KillTimeout
structsTask.ShutdownDelay = apiTask.ShutdownDelay structsTask.ShutdownDelay = apiTask.ShutdownDelay
structsTask.KillSignal = apiTask.KillSignal structsTask.KillSignal = apiTask.KillSignal
structsTask.Kind = structs.TaskKind(apiTask.Kind)
structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints) structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints)
structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities) structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities)
if l := len(apiTask.VolumeMounts); l != 0 {
structsTask.VolumeMounts = make([]*structs.VolumeMount, l)
for i, mount := range apiTask.VolumeMounts {
structsTask.VolumeMounts[i] = &structs.VolumeMount{
Volume: mount.Volume,
Destination: mount.Destination,
ReadOnly: mount.ReadOnly,
}
}
}
if l := len(apiTask.Services); l != 0 { if l := len(apiTask.Services); l != 0 {
structsTask.Services = make([]*structs.Service, l) structsTask.Services = make([]*structs.Service, l)
for i, service := range apiTask.Services { for i, service := range apiTask.Services {
@ -782,6 +828,7 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
Tags: service.Tags, Tags: service.Tags,
CanaryTags: service.CanaryTags, CanaryTags: service.CanaryTags,
AddressMode: service.AddressMode, AddressMode: service.AddressMode,
Meta: helper.CopyMapStringString(service.Meta),
} }
if l := len(service.Checks); l != 0 { if l := len(service.Checks); l != 0 {
@ -886,35 +933,8 @@ func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
out.IOPS = *in.IOPS out.IOPS = *in.IOPS
} }
if l := len(in.Networks); l != 0 { if len(in.Networks) != 0 {
out.Networks = make([]*structs.NetworkResource, l) out.Networks = ApiNetworkResourceToStructs(in.Networks)
for i, nw := range in.Networks {
out.Networks[i] = &structs.NetworkResource{
CIDR: nw.CIDR,
IP: nw.IP,
MBits: *nw.MBits,
}
if l := len(nw.DynamicPorts); l != 0 {
out.Networks[i].DynamicPorts = make([]structs.Port, l)
for j, dp := range nw.DynamicPorts {
out.Networks[i].DynamicPorts[j] = structs.Port{
Label: dp.Label,
Value: dp.Value,
}
}
}
if l := len(nw.ReservedPorts); l != 0 {
out.Networks[i].ReservedPorts = make([]structs.Port, l)
for j, rp := range nw.ReservedPorts {
out.Networks[i].ReservedPorts[j] = structs.Port{
Label: rp.Label,
Value: rp.Value,
}
}
}
}
} }
if l := len(in.Devices); l != 0 { if l := len(in.Devices); l != 0 {
@ -932,6 +952,168 @@ func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
return out return out
} }
func ApiNetworkResourceToStructs(in []*api.NetworkResource) []*structs.NetworkResource {
var out []*structs.NetworkResource
if len(in) == 0 {
return out
}
out = make([]*structs.NetworkResource, len(in))
for i, nw := range in {
out[i] = &structs.NetworkResource{
Mode: nw.Mode,
CIDR: nw.CIDR,
IP: nw.IP,
MBits: *nw.MBits,
}
if l := len(nw.DynamicPorts); l != 0 {
out[i].DynamicPorts = make([]structs.Port, l)
for j, dp := range nw.DynamicPorts {
out[i].DynamicPorts[j] = structs.Port{
Label: dp.Label,
Value: dp.Value,
To: dp.To,
}
}
}
if l := len(nw.ReservedPorts); l != 0 {
out[i].ReservedPorts = make([]structs.Port, l)
for j, rp := range nw.ReservedPorts {
out[i].ReservedPorts[j] = structs.Port{
Label: rp.Label,
Value: rp.Value,
To: rp.To,
}
}
}
}
return out
}
//TODO(schmichael) refactor and reuse in service parsing above
func ApiServicesToStructs(in []*api.Service) []*structs.Service {
if len(in) == 0 {
return nil
}
out := make([]*structs.Service, len(in))
for i, s := range in {
out[i] = &structs.Service{
Name: s.Name,
PortLabel: s.PortLabel,
Tags: s.Tags,
CanaryTags: s.CanaryTags,
AddressMode: s.AddressMode,
Meta: helper.CopyMapStringString(s.Meta),
}
if l := len(s.Checks); l != 0 {
out[i].Checks = make([]*structs.ServiceCheck, l)
for j, check := range s.Checks {
out[i].Checks[j] = &structs.ServiceCheck{
Name: check.Name,
Type: check.Type,
Command: check.Command,
Args: check.Args,
Path: check.Path,
Protocol: check.Protocol,
PortLabel: check.PortLabel,
AddressMode: check.AddressMode,
Interval: check.Interval,
Timeout: check.Timeout,
InitialStatus: check.InitialStatus,
TLSSkipVerify: check.TLSSkipVerify,
Header: check.Header,
Method: check.Method,
GRPCService: check.GRPCService,
GRPCUseTLS: check.GRPCUseTLS,
TaskName: check.TaskName,
}
if check.CheckRestart != nil {
out[i].Checks[j].CheckRestart = &structs.CheckRestart{
Limit: check.CheckRestart.Limit,
Grace: *check.CheckRestart.Grace,
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
}
}
}
}
if s.Connect != nil {
out[i].Connect = ApiConsulConnectToStructs(s.Connect)
}
}
return out
}
func ApiConsulConnectToStructs(in *api.ConsulConnect) *structs.ConsulConnect {
if in == nil {
return nil
}
out := &structs.ConsulConnect{
Native: in.Native,
}
if in.SidecarService != nil {
out.SidecarService = &structs.ConsulSidecarService{
Port: in.SidecarService.Port,
}
if in.SidecarService.Proxy != nil {
out.SidecarService.Proxy = &structs.ConsulProxy{
Config: in.SidecarService.Proxy.Config,
}
upstreams := make([]structs.ConsulUpstream, len(in.SidecarService.Proxy.Upstreams))
for i, p := range in.SidecarService.Proxy.Upstreams {
upstreams[i] = structs.ConsulUpstream{
DestinationName: p.DestinationName,
LocalBindPort: p.LocalBindPort,
}
}
out.SidecarService.Proxy.Upstreams = upstreams
}
}
if in.SidecarTask != nil {
out.SidecarTask = &structs.SidecarTask{
Name: in.SidecarTask.Name,
Driver: in.SidecarTask.Driver,
Config: in.SidecarTask.Config,
User: in.SidecarTask.User,
Env: in.SidecarTask.Env,
Resources: ApiResourcesToStructs(in.SidecarTask.Resources),
Meta: in.SidecarTask.Meta,
LogConfig: &structs.LogConfig{},
ShutdownDelay: in.SidecarTask.ShutdownDelay,
KillSignal: in.SidecarTask.KillSignal,
}
if in.SidecarTask.KillTimeout != nil {
out.SidecarTask.KillTimeout = in.SidecarTask.KillTimeout
}
if in.SidecarTask.LogConfig != nil {
out.SidecarTask.LogConfig = &structs.LogConfig{}
if in.SidecarTask.LogConfig.MaxFiles != nil {
out.SidecarTask.LogConfig.MaxFiles = *in.SidecarTask.LogConfig.MaxFiles
}
if in.SidecarTask.LogConfig.MaxFileSizeMB != nil {
out.SidecarTask.LogConfig.MaxFileSizeMB = *in.SidecarTask.LogConfig.MaxFileSizeMB
}
}
}
return out
}
func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint { func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint {
if in == nil { if in == nil {
return nil return nil

View File

@ -493,11 +493,17 @@ func TestHTTP_JobUpdateRegion(t *testing.T) {
ExpectedRegion: "north-america", ExpectedRegion: "north-america",
}, },
{ {
Name: "falls back to default if no region is provided", Name: "defaults to node region global if no region is provided",
ConfigRegion: "", ConfigRegion: "",
APIRegion: "", APIRegion: "",
ExpectedRegion: "global", ExpectedRegion: "global",
}, },
{
Name: "defaults to node region not-global if no region is provided",
ConfigRegion: "",
APIRegion: "",
ExpectedRegion: "not-global",
},
} }
for _, tc := range cases { for _, tc := range cases {
@ -1492,10 +1498,47 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
ProgressDeadline: helper.TimeToPtr(5 * time.Minute), ProgressDeadline: helper.TimeToPtr(5 * time.Minute),
AutoRevert: helper.BoolToPtr(true), AutoRevert: helper.BoolToPtr(true),
}, },
Meta: map[string]string{ Meta: map[string]string{
"key": "value", "key": "value",
}, },
Services: []*api.Service{
{
Name: "groupserviceA",
Tags: []string{"a", "b"},
CanaryTags: []string{"d", "e"},
PortLabel: "1234",
Meta: map[string]string{
"servicemeta": "foobar",
},
CheckRestart: &api.CheckRestart{
Limit: 4,
Grace: helper.TimeToPtr(11 * time.Second),
},
Checks: []api.ServiceCheck{
{
Id: "hello",
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
PortLabel: "foo",
AddressMode: "driver",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
CheckRestart: &api.CheckRestart{
Limit: 3,
IgnoreWarnings: true,
},
TaskName: "task1",
},
},
},
},
Tasks: []*api.Task{ Tasks: []*api.Task{
{ {
Name: "task1", Name: "task1",
@ -1531,6 +1574,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Tags: []string{"1", "2"}, Tags: []string{"1", "2"},
CanaryTags: []string{"3", "4"}, CanaryTags: []string{"3", "4"},
PortLabel: "foo", PortLabel: "foo",
Meta: map[string]string{
"servicemeta": "foobar",
},
CheckRestart: &api.CheckRestart{ CheckRestart: &api.CheckRestart{
Limit: 4, Limit: 4,
Grace: helper.TimeToPtr(11 * time.Second), Grace: helper.TimeToPtr(11 * time.Second),
@ -1798,6 +1844,41 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Meta: map[string]string{ Meta: map[string]string{
"key": "value", "key": "value",
}, },
Services: []*structs.Service{
{
Name: "groupserviceA",
Tags: []string{"a", "b"},
CanaryTags: []string{"d", "e"},
PortLabel: "1234",
AddressMode: "auto",
Meta: map[string]string{
"servicemeta": "foobar",
},
Checks: []*structs.ServiceCheck{
{
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
PortLabel: "foo",
AddressMode: "driver",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
CheckRestart: &structs.CheckRestart{
Grace: 11 * time.Second,
Limit: 3,
IgnoreWarnings: true,
},
TaskName: "task1",
},
},
},
},
Tasks: []*structs.Task{ Tasks: []*structs.Task{
{ {
Name: "task1", Name: "task1",
@ -1832,6 +1913,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
CanaryTags: []string{"3", "4"}, CanaryTags: []string{"3", "4"},
PortLabel: "foo", PortLabel: "foo",
AddressMode: "auto", AddressMode: "auto",
Meta: map[string]string{
"servicemeta": "foobar",
},
Checks: []*structs.ServiceCheck{ Checks: []*structs.ServiceCheck{
{ {
Name: "bar", Name: "bar",

View File

@ -311,7 +311,7 @@ func (a *TestAgent) pickRandomPorts(c *Config) {
// TestConfig returns a unique default configuration for testing an // TestConfig returns a unique default configuration for testing an
// agent. // agent.
func (a *TestAgent) config() *Config { func (a *TestAgent) config() *Config {
conf := DevConfig() conf := DevConfig(nil)
// Customize the server configuration // Customize the server configuration
config := nomad.DefaultConfig() config := nomad.DefaultConfig()

View File

@ -89,6 +89,10 @@ client {
gc_max_allocs = 50 gc_max_allocs = 50
no_host_uuid = false no_host_uuid = false
disable_remote_exec = true disable_remote_exec = true
host_volume "tmp" {
path = "/tmp"
}
} }
server { server {
@ -101,6 +105,7 @@ server {
num_schedulers = 2 num_schedulers = 2
enabled_schedulers = ["test"] enabled_schedulers = ["test"]
node_gc_threshold = "12h" node_gc_threshold = "12h"
job_gc_interval = "3m"
job_gc_threshold = "12h" job_gc_threshold = "12h"
eval_gc_threshold = "12h" eval_gc_threshold = "12h"
deployment_gc_threshold = "12h" deployment_gc_threshold = "12h"

View File

@ -44,12 +44,22 @@
"client_max_port": 2000, "client_max_port": 2000,
"client_min_port": 1000, "client_min_port": 1000,
"cpu_total_compute": 4444, "cpu_total_compute": 4444,
"disable_remote_exec": true,
"enabled": true, "enabled": true,
"gc_disk_usage_threshold": 82, "gc_disk_usage_threshold": 82,
"gc_inode_usage_threshold": 91, "gc_inode_usage_threshold": 91,
"gc_interval": "6s", "gc_interval": "6s",
"gc_max_allocs": 50, "gc_max_allocs": 50,
"gc_parallel_destroys": 6, "gc_parallel_destroys": 6,
"host_volume": [
{
"tmp": [
{
"path": "/tmp"
}
]
}
],
"max_kill_timeout": "10s", "max_kill_timeout": "10s",
"meta": [ "meta": [
{ {
@ -60,7 +70,6 @@
"network_interface": "eth0", "network_interface": "eth0",
"network_speed": 100, "network_speed": 100,
"no_host_uuid": false, "no_host_uuid": false,
"disable_remote_exec": true,
"node_class": "linux-medium-64bit", "node_class": "linux-medium-64bit",
"options": [ "options": [
{ {
@ -137,25 +146,39 @@
"log_json": true, "log_json": true,
"log_level": "ERR", "log_level": "ERR",
"name": "my-web", "name": "my-web",
"plugin": { "plugin": [
"docker": { {
"args": [ "docker": [
"foo", {
"bar" "args": [
], "foo",
"config": { "bar"
"foo": "bar", ],
"nested": { "config": [
"bam": 2 {
"foo": "bar",
"nested": [
{
"bam": 2
}
]
}
]
} }
} ]
}, },
"exec": { {
"config": { "exec": [
"foo": true {
"config": [
{
"foo": true
}
]
} }
]
} }
}, ],
"plugin_dir": "/tmp/nomad-plugins", "plugin_dir": "/tmp/nomad-plugins",
"ports": [ "ports": [
{ {
@ -208,6 +231,7 @@
"encrypt": "abc", "encrypt": "abc",
"eval_gc_threshold": "12h", "eval_gc_threshold": "12h",
"heartbeat_grace": "30s", "heartbeat_grace": "30s",
"job_gc_interval": "3m",
"job_gc_threshold": "12h", "job_gc_threshold": "12h",
"max_heartbeats_per_second": 11, "max_heartbeats_per_second": 11,
"min_heartbeat_ttl": "33s", "min_heartbeat_ttl": "33s",

View File

@ -191,6 +191,11 @@ func (c *AllocStatusCommand) Run(args []string) int {
} }
c.Ui.Output(output) c.Ui.Output(output)
if len(alloc.AllocatedResources.Shared.Networks) > 0 && alloc.AllocatedResources.Shared.Networks[0].HasPorts() {
c.Ui.Output("")
c.Ui.Output(formatAllocNetworkInfo(alloc))
}
if short { if short {
c.shortTaskStatus(alloc) c.shortTaskStatus(alloc)
} else { } else {
@ -299,6 +304,32 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
return formatKV(basic), nil return formatKV(basic), nil
} }
func formatAllocNetworkInfo(alloc *api.Allocation) string {
nw := alloc.AllocatedResources.Shared.Networks[0]
addrs := make([]string, len(nw.DynamicPorts)+len(nw.ReservedPorts)+1)
addrs[0] = "Label|Dynamic|Address"
portFmt := func(port *api.Port, dyn string) string {
s := fmt.Sprintf("%s|%s|%s:%d", port.Label, dyn, nw.IP, port.Value)
if port.To > 0 {
s += fmt.Sprintf(" -> %d", port.To)
}
return s
}
for idx, port := range nw.DynamicPorts {
addrs[idx+1] = portFmt(&port, "yes")
}
for idx, port := range nw.ReservedPorts {
addrs[idx+1+len(nw.DynamicPorts)] = portFmt(&port, "yes")
}
var mode string
if nw.Mode != "" {
mode = fmt.Sprintf(" (mode = %q)", nw.Mode)
}
return fmt.Sprintf("Allocation Addresses%s\n%s", mode, formatList(addrs))
}
// futureEvalTimePretty returns when the eval is eligible to reschedule // futureEvalTimePretty returns when the eval is eligible to reschedule
// relative to current time, based on the WaitUntil field // relative to current time, based on the WaitUntil field
func futureEvalTimePretty(evalID string, client *api.Client) string { func futureEvalTimePretty(evalID string, client *api.Client) string {

View File

@ -0,0 +1,65 @@
job "countdash" {
datacenters = ["dc1"]
group "api" {
network {
mode = "bridge"
}
service {
name = "count-api"
port = "9001"
connect {
sidecar_service {}
}
}
task "web" {
driver = "docker"
config {
image = "hashicorpnomad/counter-api:v1"
}
}
}
group "dashboard" {
network {
mode = "bridge"
port "http" {
static = 9002
to = 9002
}
}
service {
name = "count-dashboard"
port = "9002"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "count-api"
local_bind_port = 8080
}
}
}
}
}
task "dashboard" {
driver = "docker"
env {
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
}
config {
image = "hashicorpnomad/counter-dashboard:v1"
}
}
}
}

View File

@ -0,0 +1,460 @@
# There can only be a single job definition per file. This job is named
# "countdash" so it will create a job with the ID and Name "countdash".
# The "job" stanza is the top-most configuration option in the job
# specification. A job is a declarative specification of tasks that Nomad
# should run. Jobs have a globally unique name, one or many task groups, which
# are themselves collections of one or many tasks.
#
# For more information and examples on the "job" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/job.html
#
job "countdash" {
# The "region" parameter specifies the region in which to execute the job. If
# omitted, this inherits the default region name of "global".
# region = "global"
#
# The "datacenters" parameter specifies the list of datacenters which should
# be considered when placing this task. This must be provided.
datacenters = ["dc1"]
# The "type" parameter controls the type of job, which impacts the scheduler's
# decision on placement. This configuration is optional and defaults to
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
# For more information, please see the online documentation at:
#
# https://www.nomadproject.io/docs/jobspec/schedulers.html
#
type = "service"
# The "constraint" stanza defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This stanza may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
#
# For more information and examples on the "constraint" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/constraint.html
#
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# The "update" stanza specifies the update strategy of task groups. The update
# strategy is used to control things like rolling upgrades, canaries, and
# blue/green deployments. If omitted, no update strategy is enforced. The
# "update" stanza may be placed at the job or task group. When placed at the
# job, it applies to all groups within the job. When placed at both the job and
# group level, the stanzas are merged with the group's taking precedence.
#
# For more information and examples on the "update" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/update.html
#
update {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
max_parallel = 1
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
# allocation for the deployment is created and is reset whenever an allocation
# as part of the deployment transitions to a healthy state. If no allocation
# transitions to the healthy state before the progress deadline, the
# deployment is marked as failed.
progress_deadline = "10m"
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
# without stopping any previous allocations. Once the operator determines the
# canaries are healthy, they can be promoted which unblocks a rolling update
# of the remaining allocations at a rate of "max_parallel".
#
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
canary = 0
}
# The migrate stanza specifies the group's strategy for migrating off of
# draining nodes. If omitted, a default migration strategy is applied.
#
# For more information on the "migrate" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/migrate.html
#
migrate {
# Specifies the number of task groups that can be migrated at the same
# time. This number must be less than the total count for the group as
# (count - max_parallel) will be left running during migrations.
max_parallel = 1
# Specifies the mechanism in which allocations health is determined. The
# potential values are "checks" or "task_states".
health_check = "checks"
# Specifies the minimum time the allocation must be in the healthy state
# before it is marked as healthy and unblocks further allocations from being
# migrated. This is specified using a label suffix like "30s" or "15m".
min_healthy_time = "10s"
# Specifies the deadline in which the allocation must be marked as healthy
# after which the allocation is automatically transitioned to unhealthy. This
# is specified using a label suffix like "2m" or "1h".
healthy_deadline = "5m"
}
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
# For more information and examples on the "group" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/group.html
#
group "api" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
count = 1
# The "restart" stanza configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
# For more information and examples on the "restart" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/restart.html
#
restart {
# The number of attempts to run the job within the specified interval.
attempts = 2
interval = "30m"
# The "delay" parameter specifies the duration to wait before restarting
# a task after it has failed.
delay = "15s"
# The "mode" parameter controls what happens when a task has restarted
# "attempts" times within the interval. "delay" mode delays the next
# restart until the next interval. "fail" mode does not restart the task
# if "attempts" has been hit within the interval.
mode = "fail"
}
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
# instead of a hard disk requirement. Clients using this stanza should
# not specify disk requirements in the resources stanza of the task. All
# tasks in this group will share the same ephemeral disk.
#
# For more information and examples on the "ephemeral_disk" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
#
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
#
# Setting migrate to true results in the allocation directory of a
# sticky allocation directory to be migrated.
# migrate = true
#
# The "size" parameter specifies the size in MB of shared ephemeral disk
# between tasks in the group.
size = 300
}
# The "affinity" stanza enables operators to express placement preferences
# based on node attributes or metadata.
#
# For more information and examples on the "affinity" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/affinity.html
#
# affinity {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# value specifies the desired attribute value. In this example Nomad
# will prefer placement in the "us-west1" datacenter.
# value = "us-west1"
# weight can be used to indicate relative preference
# when the job has more than one affinity. It defaults to 50 if not set.
# weight = 100
# }
# The "spread" stanza allows operators to increase the failure tolerance of
# their applications by specifying a node attribute that allocations
# should be spread over.
#
# For more information and examples on the "spread" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/spread.html
#
# spread {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# targets can be used to define desired percentages of allocations
# for each targeted attribute value.
#
# target "us-east1" {
# percent = 60
# }
# target "us-west1" {
# percent = 40
# }
# }
# The "network" stanza for a group creates a network namespace shared
# by all tasks within the group.
network {
# "mode" is the CNI plugin used to configure the network namespace.
# see the documentation for CNI plugins at:
#
# https://github.com/containernetworking/plugins
#
mode = "bridge"
# The service we define for this group is accessible only via
# Consul Connect, so we do not define ports in its network.
# port "http" {
# to = "8080"
# }
}
# The "service" stanza enables Consul Connect.
service {
name = "count-api"
# The port in the service stanza is the port the service listens on.
# The Envoy proxy will automatically route traffic to that port
# inside the network namespace. If the application binds to localhost
# on this port, the task needs no additional network configuration.
port = "9001"
# The "check" stanza specifies a health check associated with the service.
# This can be specified multiple times to define multiple checks for the
# service. Note that checks run inside the task indicated by the "task"
# field.
#
# check {
# name = "alive"
# type = "tcp"
# task = "api"
# interval = "10s"
# timeout = "2s"
# }
connect {
# The "sidecar_service" stanza configures the Envoy sidecar admission
# controller. For each task group with a sidecar_service, Nomad will
# inject an Envoy task into the task group. A group network will be
# required and a dynamic port will be registered for remote services
# to connect to Envoy with the name `connect-proxy-<service>`.
#
# By default, Envoy will be run via its official upstream Docker image.
sidecar_service {}
}
}
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
# For more information and examples on the "task" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/task.html
#
task "web" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
image = "hashicorpnomad/counter-api:v1"
}
# The "artifact" stanza instructs Nomad to download an artifact from a
# remote source prior to starting the task. This provides a convenient
# mechanism for downloading configuration files or data needed to run the
# task. It is possible to specify the "artifact" stanza multiple times to
# download multiple artifacts.
#
# For more information and examples on the "artifact" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/artifact.html
#
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# The "logs" stanza instructs the Nomad client on how many log files and
# the maximum size of those logs files to retain. Logging is enabled by
# default, but the "logs" stanza allows for finer-grained control over
# the log rotation and storage configuration.
#
# For more information and examples on the "logs" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/logs.html
#
# logs {
# max_files = 10
# max_file_size = 15
# }
# The "resources" stanza describes the requirements a task needs to
# execute. Resource requirements include memory, network, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
# For more information and examples on the "resources" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/resources.html
#
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
}
}
# The Envoy sidecar admission controller will inject an Envoy task into
# any task group for each service with a sidecar_service stanza it contains.
# A group network will be required and a dynamic port will be registered for
# remote services to connect to Envoy with the name `connect-proxy-<service>`.
# By default, Envoy will be run via its official upstream Docker image.
#
# There are two ways to modify the default behavior:
# * Tasks can define a `sidecar_task` stanza in the `connect` stanza
# that merges into the default sidecar configuration.
# * Add the `kind = "connect-proxy:<service>"` field to another task.
# That task will be replace the default Envoy proxy task entirely.
#
# task "connect-<service>" {
# kind = "connect-proxy:<service>"
# driver = "docker"
# config {
# image = "${meta.connect.sidecar_image}"
# args = [
# "-c", "${NOMAD_TASK_DIR}/bootstrap.json",
# "-l", "${meta.connect.log_level}"
# ]
# }
# resources {
# cpu = 100
# memory = 300
# }
# logs {
# max_files = 2
# max_file_size = 2
# }
# }
}
# This job has a second "group" stanza to define tasks that might be placed
# on a separate Nomad client from the group above.
#
group "dashboard" {
network {
mode = "bridge"
# The `static = 9002` parameter requests the Nomad scheduler reserve
# port 9002 on a host network interface. The `to = 9002` parameter
# forwards that host port to port 9002 inside the network namespace.
port "http" {
static = 9002
to = 9002
}
}
service {
name = "count-dashboard"
port = "9002"
connect {
sidecar_service {
proxy {
# The upstreams stanza defines the remote service to access
# (count-api) and what port to expose that service on inside
# the network namespace. This allows this task to reach the
# upstream at localhost:8080.
upstreams {
destination_name = "count-api"
local_bind_port = 8080
}
}
}
# The `sidecar_task` stanza modifies the default configuration
# of the Envoy proxy task.
# sidecar_task {
# resources {
# cpu = 1000
# memory = 512
# }
# }
}
}
task "dashboard" {
driver = "docker"
# The application can take advantage of automatically created
# environment variables to find the address of its upstream
# service.
env {
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
}
config {
image = "hashicorpnomad/counter-dashboard:v1"
}
}
}
}

View File

@ -0,0 +1,27 @@
job "example" {
datacenters = ["dc1"]
group "cache" {
task "redis" {
driver = "docker"
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
resources {
cpu = 500
memory = 256
network {
mbits = 10
port "db" {}
}
}
}
}
}

View File

@ -0,0 +1,394 @@
# There can only be a single job definition per file. This job is named
# "example" so it will create a job with the ID and Name "example".
# The "job" stanza is the top-most configuration option in the job
# specification. A job is a declarative specification of tasks that Nomad
# should run. Jobs have a globally unique name, one or many task groups, which
# are themselves collections of one or many tasks.
#
# For more information and examples on the "job" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/job.html
#
job "example" {
# The "region" parameter specifies the region in which to execute the job.
# If omitted, this inherits the default region name of "global".
# region = "global"
#
# The "datacenters" parameter specifies the list of datacenters which should
# be considered when placing this task. This must be provided.
datacenters = ["dc1"]
# The "type" parameter controls the type of job, which impacts the scheduler's
# decision on placement. This configuration is optional and defaults to
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
# For more information, please see the online documentation at:
#
# https://www.nomadproject.io/docs/jobspec/schedulers.html
#
type = "service"
# The "constraint" stanza defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This stanza may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
#
# For more information and examples on the "constraint" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/constraint.html
#
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# The "update" stanza specifies the update strategy of task groups. The update
# strategy is used to control things like rolling upgrades, canaries, and
# blue/green deployments. If omitted, no update strategy is enforced. The
# "update" stanza may be placed at the job or task group. When placed at the
# job, it applies to all groups within the job. When placed at both the job and
# group level, the stanzas are merged with the group's taking precedence.
#
# For more information and examples on the "update" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/update.html
#
update {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
max_parallel = 1
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
# allocation for the deployment is created and is reset whenever an allocation
# as part of the deployment transitions to a healthy state. If no allocation
# transitions to the healthy state before the progress deadline, the
# deployment is marked as failed.
progress_deadline = "10m"
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
# without stopping any previous allocations. Once the operator determines the
# canaries are healthy, they can be promoted which unblocks a rolling update
# of the remaining allocations at a rate of "max_parallel".
#
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
canary = 0
}
# The migrate stanza specifies the group's strategy for migrating off of
# draining nodes. If omitted, a default migration strategy is applied.
#
# For more information on the "migrate" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/migrate.html
#
migrate {
# Specifies the number of task groups that can be migrated at the same
# time. This number must be less than the total count for the group as
# (count - max_parallel) will be left running during migrations.
max_parallel = 1
# Specifies the mechanism in which allocations health is determined. The
# potential values are "checks" or "task_states".
health_check = "checks"
# Specifies the minimum time the allocation must be in the healthy state
# before it is marked as healthy and unblocks further allocations from being
# migrated. This is specified using a label suffix like "30s" or "15m".
min_healthy_time = "10s"
# Specifies the deadline in which the allocation must be marked as healthy
# after which the allocation is automatically transitioned to unhealthy. This
# is specified using a label suffix like "2m" or "1h".
healthy_deadline = "5m"
}
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
# For more information and examples on the "group" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/group.html
#
group "cache" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
count = 1
# The "restart" stanza configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
# For more information and examples on the "restart" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/restart.html
#
restart {
# The number of attempts to run the job within the specified interval.
attempts = 2
interval = "30m"
# The "delay" parameter specifies the duration to wait before restarting
# a task after it has failed.
delay = "15s"
# The "mode" parameter controls what happens when a task has restarted
# "attempts" times within the interval. "delay" mode delays the next
# restart until the next interval. "fail" mode does not restart the task
# if "attempts" has been hit within the interval.
mode = "fail"
}
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
# instead of a hard disk requirement. Clients using this stanza should
# not specify disk requirements in the resources stanza of the task. All
# tasks in this group will share the same ephemeral disk.
#
# For more information and examples on the "ephemeral_disk" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
#
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
#
# Setting migrate to true results in the allocation directory of a
# sticky allocation directory to be migrated.
# migrate = true
#
# The "size" parameter specifies the size in MB of shared ephemeral disk
# between tasks in the group.
size = 300
}
# The "affinity" stanza enables operators to express placement preferences
# based on node attributes or metadata.
#
# For more information and examples on the "affinity" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/affinity.html
#
# affinity {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# value specifies the desired attribute value. In this example Nomad
# will prefer placement in the "us-west1" datacenter.
# value = "us-west1"
# weight can be used to indicate relative preference
# when the job has more than one affinity. It defaults to 50 if not set.
# weight = 100
# }
# The "spread" stanza allows operators to increase the failure tolerance of
# their applications by specifying a node attribute that allocations
# should be spread over.
#
# For more information and examples on the "spread" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/spread.html
#
# spread {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# targets can be used to define desired percentages of allocations
# for each targeted attribute value.
#
# target "us-east1" {
# percent = 60
# }
# target "us-west1" {
# percent = 40
# }
# }
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
# For more information and examples on the "task" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/task.html
#
task "redis" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
# The "artifact" stanza instructs Nomad to download an artifact from a
# remote source prior to starting the task. This provides a convenient
# mechanism for downloading configuration files or data needed to run the
# task. It is possible to specify the "artifact" stanza multiple times to
# download multiple artifacts.
#
# For more information and examples on the "artifact" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/artifact.html
#
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# The "logs" stanza instructs the Nomad client on how many log files and
# the maximum size of those logs files to retain. Logging is enabled by
# default, but the "logs" stanza allows for finer-grained control over
# the log rotation and storage configuration.
#
# For more information and examples on the "logs" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/logs.html
#
# logs {
# max_files = 10
# max_file_size = 15
# }
# The "resources" stanza describes the requirements a task needs to
# execute. Resource requirements include memory, network, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
# For more information and examples on the "resources" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/resources.html
#
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {}
}
}
# The "service" stanza instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Consul. This will
# make the service addressable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service.html
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# The "template" stanza instructs Nomad to manage a template, such as
# a configuration file or script. This template can optionally pull data
# from Consul or Vault to populate runtime configuration data.
#
# For more information and examples on the "template" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/template.html
#
# template {
# data = "---\nkey: {{ key \"service/my-key\" }}"
# destination = "local/file.yml"
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# The "template" stanza can also be used to create environment variables
# for tasks that prefer those to config files. The task will be restarted
# when data pulled from Consul or Vault changes.
#
# template {
# data = "KEY={{ key \"service/my-key\" }}"
# destination = "local/file.env"
# env = true
# }
# The "vault" stanza instructs the Nomad client to acquire a token from
# a HashiCorp Vault server. The Nomad servers must be configured and
# authorized to communicate with Vault. By default, Nomad will inject
# The token into the job via an environment variable and make the token
# available to the "template" stanza. The Nomad client handles the renewal
# and revocation of the Vault token.
#
# For more information and examples on the "vault" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/vault.html
#
# vault {
# policies = ["cdn", "frontend"]
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
"time"
"github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts" "github.com/hashicorp/nomad/api/contexts"
@ -203,9 +204,21 @@ func (c *EvalStatusCommand) Run(args []string) int {
statusDesc = eval.Status statusDesc = eval.Status
} }
// Format eval timestamps
var formattedCreateTime, formattedModifyTime string
if verbose {
formattedCreateTime = formatUnixNanoTime(eval.CreateTime)
formattedModifyTime = formatUnixNanoTime(eval.ModifyTime)
} else {
formattedCreateTime = prettyTimeDiff(time.Unix(0, eval.CreateTime), time.Now())
formattedModifyTime = prettyTimeDiff(time.Unix(0, eval.ModifyTime), time.Now())
}
// Format the evaluation data // Format the evaluation data
basic := []string{ basic := []string{
fmt.Sprintf("ID|%s", limit(eval.ID, length)), fmt.Sprintf("ID|%s", limit(eval.ID, length)),
fmt.Sprintf("Create Time|%s", formattedCreateTime),
fmt.Sprintf("Modify Time|%s", formattedModifyTime),
fmt.Sprintf("Status|%s", eval.Status), fmt.Sprintf("Status|%s", eval.Status),
fmt.Sprintf("Status Description|%s", statusDesc), fmt.Sprintf("Status Description|%s", statusDesc),
fmt.Sprintf("Type|%s", eval.Type), fmt.Sprintf("Type|%s", eval.Type),

File diff suppressed because one or more lines are too long

View File

@ -33,6 +33,9 @@ Init Options:
-short -short
If the short flag is set, a minimal jobspec without comments is emitted. If the short flag is set, a minimal jobspec without comments is emitted.
-connect
If the connect flag is set, the jobspec includes Consul Connect integration.
` `
return strings.TrimSpace(helpText) return strings.TrimSpace(helpText)
} }
@ -56,10 +59,12 @@ func (c *JobInitCommand) Name() string { return "job init" }
func (c *JobInitCommand) Run(args []string) int { func (c *JobInitCommand) Run(args []string) int {
var short bool var short bool
var connect bool
flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) } flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "") flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&connect, "connect", false, "")
if err := flags.Parse(args); err != nil { if err := flags.Parse(args); err != nil {
return 1 return 1
@ -84,11 +89,21 @@ func (c *JobInitCommand) Run(args []string) int {
} }
var jobSpec []byte var jobSpec []byte
switch {
if short { case connect && !short:
jobSpec = []byte(shortJob) jobSpec, err = Asset("command/assets/connect.nomad")
} else { case connect && short:
jobSpec = []byte(defaultJob) jobSpec, err = Asset("command/assets/connect-short.nomad")
case !connect && short:
jobSpec, err = Asset("command/assets/example-short.nomad")
default:
jobSpec, err = Asset("command/assets/example.nomad")
}
if err != nil {
// should never see this because we've precompiled the assets
// as part of `make generate-examples`
c.Ui.Error(fmt.Sprintf("Accessed non-existent asset: %s", err))
return 1
} }
// Write out the example // Write out the example
@ -102,436 +117,3 @@ func (c *JobInitCommand) Run(args []string) int {
c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName)) c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
return 0 return 0
} }
var shortJob = strings.TrimSpace(`
job "example" {
datacenters = ["dc1"]
group "cache" {
task "redis" {
driver = "docker"
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
resources {
cpu = 500
memory = 256
network {
mbits = 10
port "db" {}
}
}
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
}
}
`)
var defaultJob = strings.TrimSpace(`
# There can only be a single job definition per file. This job is named
# "example" so it will create a job with the ID and Name "example".
# The "job" stanza is the top-most configuration option in the job
# specification. A job is a declarative specification of tasks that Nomad
# should run. Jobs have a globally unique name, one or many task groups, which
# are themselves collections of one or many tasks.
#
# For more information and examples on the "job" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/job.html
#
job "example" {
# The "region" parameter specifies the region in which to execute the job. If
# omitted, this inherits the default region name of "global".
# region = "global"
# The "datacenters" parameter specifies the list of datacenters which should
# be considered when placing this task. This must be provided.
datacenters = ["dc1"]
# The "type" parameter controls the type of job, which impacts the scheduler's
# decision on placement. This configuration is optional and defaults to
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
# For more information, please see the online documentation at:
#
# https://www.nomadproject.io/docs/jobspec/schedulers.html
#
type = "service"
# The "constraint" stanza defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This stanza may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
#
# For more information and examples on the "constraint" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/constraint.html
#
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# The "update" stanza specifies the update strategy of task groups. The update
# strategy is used to control things like rolling upgrades, canaries, and
# blue/green deployments. If omitted, no update strategy is enforced. The
# "update" stanza may be placed at the job or task group. When placed at the
# job, it applies to all groups within the job. When placed at both the job and
# group level, the stanzas are merged with the group's taking precedence.
#
# For more information and examples on the "update" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/update.html
#
update {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
max_parallel = 1
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
# allocation for the deployment is created and is reset whenever an allocation
# as part of the deployment transitions to a healthy state. If no allocation
# transitions to the healthy state before the progress deadline, the
# deployment is marked as failed.
progress_deadline = "10m"
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
# without stopping any previous allocations. Once the operator determines the
# canaries are healthy, they can be promoted which unblocks a rolling update
# of the remaining allocations at a rate of "max_parallel".
#
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
canary = 0
}
# The migrate stanza specifies the group's strategy for migrating off of
# draining nodes. If omitted, a default migration strategy is applied.
#
# For more information on the "migrate" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/migrate.html
#
migrate {
# Specifies the number of task groups that can be migrated at the same
# time. This number must be less than the total count for the group as
# (count - max_parallel) will be left running during migrations.
max_parallel = 1
# Specifies the mechanism in which allocations health is determined. The
# potential values are "checks" or "task_states".
health_check = "checks"
# Specifies the minimum time the allocation must be in the healthy state
# before it is marked as healthy and unblocks further allocations from being
# migrated. This is specified using a label suffix like "30s" or "15m".
min_healthy_time = "10s"
# Specifies the deadline in which the allocation must be marked as healthy
# after which the allocation is automatically transitioned to unhealthy. This
# is specified using a label suffix like "2m" or "1h".
healthy_deadline = "5m"
}
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
# For more information and examples on the "group" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/group.html
#
group "cache" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
count = 1
# The "restart" stanza configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
# For more information and examples on the "restart" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/restart.html
#
restart {
# The number of attempts to run the job within the specified interval.
attempts = 2
interval = "30m"
# The "delay" parameter specifies the duration to wait before restarting
# a task after it has failed.
delay = "15s"
# The "mode" parameter controls what happens when a task has restarted
# "attempts" times within the interval. "delay" mode delays the next
# restart until the next interval. "fail" mode does not restart the task
# if "attempts" has been hit within the interval.
mode = "fail"
}
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
# instead of a hard disk requirement. Clients using this stanza should
# not specify disk requirements in the resources stanza of the task. All
# tasks in this group will share the same ephemeral disk.
#
# For more information and examples on the "ephemeral_disk" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
#
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
#
# Setting migrate to true results in the allocation directory of a
# sticky allocation directory to be migrated.
# migrate = true
# The "size" parameter specifies the size in MB of shared ephemeral disk
# between tasks in the group.
size = 300
}
# The "affinity" stanza enables operators to express placement preferences
# based on node attributes or metadata.
#
# For more information and examples on the "affinity" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/affinity.html
#
# affinity {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# value specifies the desired attribute value. In this example Nomad
# will prefer placement in the "us-west1" datacenter.
# value = "us-west1"
# weight can be used to indicate relative preference
# when the job has more than one affinity. It defaults to 50 if not set.
# weight = 100
# }
# The "spread" stanza allows operators to increase the failure tolerance of
# their applications by specifying a node attribute that allocations
# should be spread over.
#
# For more information and examples on the "spread" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/spread.html
#
# spread {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# targets can be used to define desired percentages of allocations
# for each targeted attribute value.
#
# target "us-east1" {
# percent = 60
# }
# target "us-west1" {
# percent = 40
# }
# }
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
# For more information and examples on the "task" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/task.html
#
task "redis" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
# The "artifact" stanza instructs Nomad to download an artifact from a
# remote source prior to starting the task. This provides a convenient
# mechanism for downloading configuration files or data needed to run the
# task. It is possible to specify the "artifact" stanza multiple times to
# download multiple artifacts.
#
# For more information and examples on the "artifact" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/artifact.html
#
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# The "logs" stanza instructs the Nomad client on how many log files and
# the maximum size of those logs files to retain. Logging is enabled by
# default, but the "logs" stanza allows for finer-grained control over
# the log rotation and storage configuration.
#
# For more information and examples on the "logs" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/logs.html
#
# logs {
# max_files = 10
# max_file_size = 15
# }
# The "resources" stanza describes the requirements a task needs to
# execute. Resource requirements include memory, network, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
# For more information and examples on the "resources" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/resources.html
#
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {}
}
}
# The "service" stanza instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Consul. This will
# make the service addressable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service.html
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# The "template" stanza instructs Nomad to manage a template, such as
# a configuration file or script. This template can optionally pull data
# from Consul or Vault to populate runtime configuration data.
#
# For more information and examples on the "template" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/template.html
#
# template {
# data = "---\nkey: {{ key \"service/my-key\" }}"
# destination = "local/file.yml"
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# The "template" stanza can also be used to create environment variables
# for tasks that prefer those to config files. The task will be restarted
# when data pulled from Consul or Vault changes.
#
# template {
# data = "KEY={{ key \"service/my-key\" }}"
# destination = "local/file.env"
# env = true
# }
# The "vault" stanza instructs the Nomad client to acquire a token from
# a HashiCorp Vault server. The Nomad servers must be configured and
# authorized to communicate with Vault. By default, Nomad will inject
# The token into the job via an environment variable and make the token
# available to the "template" stanza. The Nomad client handles the renewal
# and revocation of the Vault token.
#
# For more information and examples on the "vault" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/vault.html
#
# vault {
# policies = ["cdn", "frontend"]
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}
`)

View File

@ -54,7 +54,8 @@ func TestInitCommand_Run(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if string(content) != defaultJob { defaultJob, _ := Asset("command/assets/example.nomad")
if string(content) != string(defaultJob) {
t.Fatalf("unexpected file content\n\n%s", string(content)) t.Fatalf("unexpected file content\n\n%s", string(content))
} }
@ -65,7 +66,8 @@ func TestInitCommand_Run(t *testing.T) {
} }
content, err = ioutil.ReadFile(DefaultInitName) content, err = ioutil.ReadFile(DefaultInitName)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, string(content), shortJob) shortJob, _ := Asset("command/assets/example-short.nomad")
require.Equal(t, string(content), string(shortJob))
// Fails if the file exists // Fails if the file exists
if code := cmd.Run([]string{}); code != 1 { if code := cmd.Run([]string{}); code != 1 {
@ -81,7 +83,8 @@ func TestInitCommand_defaultJob(t *testing.T) {
// Ensure the job file is always written with spaces instead of tabs. Since // Ensure the job file is always written with spaces instead of tabs. Since
// the default job file is embedded in the go file, it's easy for tabs to // the default job file is embedded in the go file, it's easy for tabs to
// slip in. // slip in.
if strings.Contains(defaultJob, "\t") { defaultJob, _ := Asset("command/assets/example.nomad")
if strings.Contains(string(defaultJob), "\t") {
t.Error("default job contains tab character - please convert to spaces") t.Error("default job contains tab character - please convert to spaces")
} }
} }

View File

@ -299,6 +299,16 @@ func nodeDrivers(n *api.Node) []string {
return drivers return drivers
} }
func nodeVolumeNames(n *api.Node) []string {
var volumes []string
for name := range n.HostVolumes {
volumes = append(volumes, name)
}
sort.Strings(volumes)
return volumes
}
func formatDrain(n *api.Node) string { func formatDrain(n *api.Node) string {
if n.DrainStrategy != nil { if n.DrainStrategy != nil {
b := new(strings.Builder) b := new(strings.Builder)
@ -333,84 +343,107 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int {
} }
if c.short { if c.short {
basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ","))) basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ",")))
c.Ui.Output(c.Colorize().Color(formatKV(basic))) c.Ui.Output(c.Colorize().Color(formatKV(basic)))
} else {
// Get the host stats
hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil)
if nodeStatsErr != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", nodeStatsErr))
}
if hostStats != nil {
uptime := time.Duration(hostStats.Uptime * uint64(time.Second))
basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String()))
}
// Emit the driver info // Output alloc info
if !c.verbose { if err := c.outputAllocInfo(client, node); err != nil {
driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node)) c.Ui.Error(fmt.Sprintf("%s", err))
basic = append(basic, driverStatus)
}
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
if c.verbose {
c.outputNodeDriverInfo(node)
}
// Emit node events
c.outputNodeStatusEvents(node)
// Get list of running allocations on the node
runningAllocs, err := getRunningAllocs(client, node.ID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node for running allocations: %s", err))
return 1 return 1
} }
allocatedResources := getAllocatedResources(client, runningAllocs, node) return 0
c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]")) }
c.Ui.Output(formatList(allocatedResources))
actualResources, err := getActualResources(client, runningAllocs, node) // Get the host stats
if err == nil { hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil)
c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]")) if nodeStatsErr != nil {
c.Ui.Output(formatList(actualResources)) c.Ui.Output("")
} c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", nodeStatsErr))
}
if hostStats != nil {
uptime := time.Duration(hostStats.Uptime * uint64(time.Second))
basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String()))
}
hostResources, err := getHostResources(hostStats, node) // When we're not running in verbose mode, then also include host volumes and
if err != nil { // driver info in the basic output
c.Ui.Output("") if !c.verbose {
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", err)) basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
}
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]"))
c.Ui.Output(formatList(hostResources))
}
if err == nil && node.NodeResources != nil && len(node.NodeResources.Devices) > 0 { driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node))
c.Ui.Output(c.Colorize().Color("\n[bold]Device Resource Utilization[reset]")) basic = append(basic, driverStatus)
c.Ui.Output(formatList(getDeviceResourcesForNode(hostStats.DeviceStats, node))) }
}
if hostStats != nil && c.stats { // Output the basic info
c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]")) c.Ui.Output(c.Colorize().Color(formatKV(basic)))
c.printCpuStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]")) // If we're running in verbose mode, include full host volume and driver info
c.printMemoryStats(hostStats) if c.verbose {
c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]")) c.outputNodeVolumeInfo(node)
c.printDiskStats(hostStats) c.outputNodeDriverInfo(node)
if len(hostStats.DeviceStats) > 0 { }
c.Ui.Output(c.Colorize().Color("\n[bold]Device Stats[reset]"))
printDeviceStats(c.Ui, hostStats.DeviceStats) // Emit node events
} c.outputNodeStatusEvents(node)
// Get list of running allocations on the node
runningAllocs, err := getRunningAllocs(client, node.ID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node for running allocations: %s", err))
return 1
}
allocatedResources := getAllocatedResources(client, runningAllocs, node)
c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]"))
c.Ui.Output(formatList(allocatedResources))
actualResources, err := getActualResources(client, runningAllocs, node)
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]"))
c.Ui.Output(formatList(actualResources))
}
hostResources, err := getHostResources(hostStats, node)
if err != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", err))
}
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]"))
c.Ui.Output(formatList(hostResources))
}
if err == nil && node.NodeResources != nil && len(node.NodeResources.Devices) > 0 {
c.Ui.Output(c.Colorize().Color("\n[bold]Device Resource Utilization[reset]"))
c.Ui.Output(formatList(getDeviceResourcesForNode(hostStats.DeviceStats, node)))
}
if hostStats != nil && c.stats {
c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]"))
c.printCpuStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]"))
c.printMemoryStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]"))
c.printDiskStats(hostStats)
if len(hostStats.DeviceStats) > 0 {
c.Ui.Output(c.Colorize().Color("\n[bold]Device Stats[reset]"))
printDeviceStats(c.Ui, hostStats.DeviceStats)
} }
} }
if err := c.outputAllocInfo(client, node); err != nil {
c.Ui.Error(fmt.Sprintf("%s", err))
return 1
}
return 0
}
func (c *NodeStatusCommand) outputAllocInfo(client *api.Client, node *api.Node) error {
nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil)
if err != nil { if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err)) return fmt.Errorf("Error querying node allocations: %s", err)
return 1
} }
c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]")) c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]"))
@ -421,8 +454,8 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int {
c.formatDeviceAttributes(node) c.formatDeviceAttributes(node)
c.formatMeta(node) c.formatMeta(node)
} }
return 0
return nil
} }
func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string { func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string {
@ -443,6 +476,25 @@ func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string
return strings.Trim(strings.Join(drivers, ","), ", ") return strings.Trim(strings.Join(drivers, ","), ", ")
} }
func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Volumes"))
names := make([]string, 0, len(node.HostVolumes))
for name := range node.HostVolumes {
names = append(names, name)
}
sort.Strings(names)
output := make([]string, 0, len(names)+1)
output = append(output, "Name|ReadOnly|Source")
for _, volName := range names {
info := node.HostVolumes[volName]
output = append(output, fmt.Sprintf("%s|%v|%s", volName, info.ReadOnly, info.Path))
}
c.Ui.Output(formatList(output))
}
func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) { func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) {
c.Ui.Output(c.Colorize().Color("\n[bold]Drivers")) c.Ui.Output(c.Colorize().Color("\n[bold]Drivers"))

View File

@ -216,6 +216,12 @@ var (
hclspec.NewAttr("nvidia_runtime", "string", false), hclspec.NewAttr("nvidia_runtime", "string", false),
hclspec.NewLiteral(`"nvidia"`), hclspec.NewLiteral(`"nvidia"`),
), ),
// image to use when creating a network namespace parent container
"infra_image": hclspec.NewDefault(
hclspec.NewAttr("infra_image", "string", false),
hclspec.NewLiteral(`"gcr.io/google_containers/pause-amd64:3.0"`),
),
}) })
// taskConfigSpec is the hcl specification for the driver config section of // taskConfigSpec is the hcl specification for the driver config section of
@ -310,6 +316,12 @@ var (
SendSignals: true, SendSignals: true,
Exec: true, Exec: true,
FSIsolation: drivers.FSIsolationImage, FSIsolation: drivers.FSIsolationImage,
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost,
drivers.NetIsolationModeGroup,
drivers.NetIsolationModeTask,
},
MustInitiateNetwork: true,
} }
) )
@ -485,6 +497,7 @@ type DriverConfig struct {
AllowPrivileged bool `codec:"allow_privileged"` AllowPrivileged bool `codec:"allow_privileged"`
AllowCaps []string `codec:"allow_caps"` AllowCaps []string `codec:"allow_caps"`
GPURuntimeName string `codec:"nvidia_runtime"` GPURuntimeName string `codec:"nvidia_runtime"`
InfraImage string `codec:"infra_image"`
} }
type AuthConfig struct { type AuthConfig struct {

View File

@ -65,6 +65,9 @@ type DockerImageClient interface {
// LogEventFn is a callback which allows Drivers to emit task events. // LogEventFn is a callback which allows Drivers to emit task events.
type LogEventFn func(message string, annotations map[string]string) type LogEventFn func(message string, annotations map[string]string)
// noopLogEventFn satisfies the LogEventFn type but noops when called
func noopLogEventFn(string, map[string]string) {}
// dockerCoordinatorConfig is used to configure the Docker coordinator. // dockerCoordinatorConfig is used to configure the Docker coordinator.
type dockerCoordinatorConfig struct { type dockerCoordinatorConfig struct {
// logger is the logger the coordinator should use // logger is the logger the coordinator should use

View File

@ -266,7 +266,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
startAttempts := 0 startAttempts := 0
CREATE: CREATE:
container, err := d.createContainer(client, containerCfg, &driverConfig) container, err := d.createContainer(client, containerCfg, driverConfig.Image)
if err != nil { if err != nil {
d.logger.Error("failed to create container", "error", err) d.logger.Error("failed to create container", "error", err)
return nil, nil, nstructs.WrapRecoverable(fmt.Sprintf("failed to create container: %v", err), err) return nil, nil, nstructs.WrapRecoverable(fmt.Sprintf("failed to create container: %v", err), err)
@ -368,7 +368,7 @@ type createContainerClient interface {
// createContainer creates the container given the passed configuration. It // createContainer creates the container given the passed configuration. It
// attempts to handle any transient Docker errors. // attempts to handle any transient Docker errors.
func (d *Driver) createContainer(client createContainerClient, config docker.CreateContainerOptions, func (d *Driver) createContainer(client createContainerClient, config docker.CreateContainerOptions,
driverConfig *TaskConfig) (*docker.Container, error) { image string) (*docker.Container, error) {
// Create a container // Create a container
attempted := 0 attempted := 0
CREATE: CREATE:
@ -378,7 +378,7 @@ CREATE:
} }
d.logger.Debug("failed to create container", "container_name", d.logger.Debug("failed to create container", "container_name",
config.Name, "image_name", driverConfig.Image, "image_id", config.Config.Image, config.Name, "image_name", image, "image_id", config.Config.Image,
"attempt", attempted+1, "error", createErr) "attempt", attempted+1, "error", createErr)
// Volume management tools like Portworx may not have detached a volume // Volume management tools like Portworx may not have detached a volume
@ -869,11 +869,22 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
hostConfig.ReadonlyRootfs = driverConfig.ReadonlyRootfs hostConfig.ReadonlyRootfs = driverConfig.ReadonlyRootfs
// set the docker network mode
hostConfig.NetworkMode = driverConfig.NetworkMode hostConfig.NetworkMode = driverConfig.NetworkMode
// if the driver config does not specify a network mode then try to use the
// shared alloc network
if hostConfig.NetworkMode == "" { if hostConfig.NetworkMode == "" {
// docker default if task.NetworkIsolation != nil && task.NetworkIsolation.Path != "" {
logger.Debug("networking mode not specified; using default", "network_mode", defaultNetworkMode) // find the previously created parent container to join networks with
hostConfig.NetworkMode = defaultNetworkMode netMode := fmt.Sprintf("container:%s", task.NetworkIsolation.Labels[dockerNetSpecLabelKey])
logger.Debug("configuring network mode for task group", "network_mode", netMode)
hostConfig.NetworkMode = netMode
} else {
// docker default
logger.Debug("networking mode not specified; using default")
hostConfig.NetworkMode = "default"
}
} }
// Setup port mapping and exposed ports // Setup port mapping and exposed ports
@ -1312,7 +1323,7 @@ func (d *Driver) ExecTaskStreaming(ctx context.Context, taskID string, opts *dri
const execTerminatingTimeout = 3 * time.Second const execTerminatingTimeout = 3 * time.Second
start := time.Now() start := time.Now()
var res *docker.ExecInspect var res *docker.ExecInspect
for res == nil || res.Running || time.Since(start) > execTerminatingTimeout { for (res == nil || res.Running) && time.Since(start) <= execTerminatingTimeout {
res, err = client.InspectExec(exec.ID) res, err = client.InspectExec(exec.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to inspect exec result: %v", err) return nil, fmt.Errorf("failed to inspect exec result: %v", err)

View File

@ -7,11 +7,6 @@ import (
"github.com/moby/moby/daemon/caps" "github.com/moby/moby/daemon/caps"
) )
const (
// Setting default network mode for non-windows OS as bridge
defaultNetworkMode = "bridge"
)
func getPortBinding(ip string, port string) []docker.PortBinding { func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{{HostIP: ip, HostPort: port}} return []docker.PortBinding{{HostIP: ip, HostPort: port}}
} }

View File

@ -2230,7 +2230,7 @@ func TestDockerDriver_VolumeError(t *testing.T) {
driver := dockerDriverHarness(t, nil) driver := dockerDriverHarness(t, nil)
// assert volume error is recoverable // assert volume error is recoverable
_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg) _, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
require.True(t, structs.IsRecoverable(err)) require.True(t, structs.IsRecoverable(err))
} }

View File

@ -2,11 +2,6 @@ package docker
import docker "github.com/fsouza/go-dockerclient" import docker "github.com/fsouza/go-dockerclient"
const (
// Default network mode for windows containers is nat
defaultNetworkMode = "nat"
)
//Currently Windows containers don't support host ip in port binding. //Currently Windows containers don't support host ip in port binding.
func getPortBinding(ip string, port string) []docker.PortBinding { func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{{HostIP: "", HostPort: port}} return []docker.PortBinding{{HostIP: "", HostPort: port}}

90
drivers/docker/network.go Normal file
View File

@ -0,0 +1,90 @@
package docker
import (
"fmt"
docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/nomad/plugins/drivers"
)
// dockerNetSpecLabelKey is used when creating a parent container for
// shared networking. It is a label whos value identifies the container ID of
// the parent container so tasks can configure their network mode accordingly
const dockerNetSpecLabelKey = "docker_sandbox_container_id"
func (d *Driver) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
// Initialize docker API clients
client, _, err := d.dockerClients()
if err != nil {
return nil, fmt.Errorf("failed to connect to docker daemon: %s", err)
}
repo, _ := parseDockerImage(d.config.InfraImage)
authOptions, err := firstValidAuth(repo, []authBackend{
authFromDockerConfig(d.config.Auth.Config),
authFromHelper(d.config.Auth.Helper),
})
if err != nil {
d.logger.Debug("auth failed for infra container image pull", "image", d.config.InfraImage, "error", err)
}
_, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn)
if err != nil {
return nil, err
}
config, err := d.createSandboxContainerConfig(allocID)
if err != nil {
return nil, err
}
container, err := d.createContainer(client, *config, d.config.InfraImage)
if err != nil {
return nil, err
}
if err := d.startContainer(container); err != nil {
return nil, err
}
c, err := client.InspectContainer(container.ID)
if err != nil {
return nil, err
}
return &drivers.NetworkIsolationSpec{
Mode: drivers.NetIsolationModeGroup,
Path: c.NetworkSettings.SandboxKey,
Labels: map[string]string{
dockerNetSpecLabelKey: c.ID,
},
}, nil
}
func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
client, _, err := d.dockerClients()
if err != nil {
return fmt.Errorf("failed to connect to docker daemon: %s", err)
}
return client.RemoveContainer(docker.RemoveContainerOptions{
Force: true,
ID: spec.Labels[dockerNetSpecLabelKey],
})
}
// createSandboxContainerConfig creates a docker container configuration which
// starts a container with an empty network namespace
func (d *Driver) createSandboxContainerConfig(allocID string) (*docker.CreateContainerOptions, error) {
return &docker.CreateContainerOptions{
Name: fmt.Sprintf("nomad_init_%s", allocID),
Config: &docker.Config{
Image: d.config.InfraImage,
},
HostConfig: &docker.HostConfig{
// set the network mode to none which creates a network namespace with
// only a loopback interface
NetworkMode: "none",
},
}, nil
}

View File

@ -33,7 +33,7 @@ const (
// rxHostDir is the first option of a source // rxHostDir is the first option of a source
rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*`
// rxName is the second option of a source // rxName is the second option of a source
rxName = `[^\\/:*?"<>|\r\n]+` rxName = `[^\\/:*?"<>|\r\n]+\/?.*`
// RXReservedNames are reserved names not possible on Windows // RXReservedNames are reserved names not possible on Windows
rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])`
@ -58,7 +58,7 @@ const (
// - And can be optional // - And can be optional
// rxDestination is the regex expression for the mount destination // rxDestination is the regex expression for the mount destination
rxDestination = `(?P<destination>((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` rxDestination = `(?P<destination>((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `)|([/].*))`
// Destination (aka container path): // Destination (aka container path):
// - Variation on hostdir but can be a drive followed by colon as well // - Variation on hostdir but can be a drive followed by colon as well

View File

@ -342,17 +342,18 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
} }
execCmd := &executor.ExecCommand{ execCmd := &executor.ExecCommand{
Cmd: driverConfig.Command, Cmd: driverConfig.Command,
Args: driverConfig.Args, Args: driverConfig.Args,
Env: cfg.EnvList(), Env: cfg.EnvList(),
User: user, User: user,
ResourceLimits: true, ResourceLimits: true,
Resources: cfg.Resources, Resources: cfg.Resources,
TaskDir: cfg.TaskDir().Dir, TaskDir: cfg.TaskDir().Dir,
StdoutPath: cfg.StdoutPath, StdoutPath: cfg.StdoutPath,
StderrPath: cfg.StderrPath, StderrPath: cfg.StderrPath,
Mounts: cfg.Mounts, Mounts: cfg.Mounts,
Devices: cfg.Devices, Devices: cfg.Devices,
NetworkIsolation: cfg.NetworkIsolation,
} }
ps, err := exec.Launch(execCmd) ps, err := exec.Launch(execCmd)

View File

@ -678,3 +678,11 @@ func (d *Driver) GetHandle(taskID string) *taskHandle {
func (d *Driver) Shutdown() { func (d *Driver) Shutdown() {
d.signalShutdown() d.signalShutdown()
} }
func (d *Driver) CreateNetwork(allocID string) (*drivers.NetworkIsolationSpec, error) {
return nil, nil
}
func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
return nil
}

View File

@ -95,6 +95,10 @@ var (
SendSignals: true, SendSignals: true,
Exec: true, Exec: true,
FSIsolation: drivers.FSIsolationNone, FSIsolation: drivers.FSIsolationNone,
NetIsolationModes: []drivers.NetIsolationMode{
drivers.NetIsolationModeHost,
drivers.NetIsolationModeGroup,
},
} }
) )
@ -342,6 +346,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
TaskDir: cfg.TaskDir().Dir, TaskDir: cfg.TaskDir().Dir,
StdoutPath: cfg.StdoutPath, StdoutPath: cfg.StdoutPath,
StderrPath: cfg.StderrPath, StderrPath: cfg.StderrPath,
NetworkIsolation: cfg.NetworkIsolation,
} }
ps, err := exec.Launch(execCmd) ps, err := exec.Launch(execCmd)

Some files were not shown because too many files have changed in this diff Show More