Merge branch 'master' into fix-pending-state
This commit is contained in:
commit
c76b3b54b9
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -57,3 +57,6 @@ TODO.md
|
|||
.terraform
|
||||
*.tfstate*
|
||||
rkt-*
|
||||
|
||||
./idea
|
||||
*.iml
|
||||
|
|
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -1,4 +1,22 @@
|
|||
## 0.6.0 (Unreleased)
|
||||
## 0.6.1 (Unreleased)
|
||||
|
||||
IMPROVEMENTS:
|
||||
* core: `distinct_property` constraint can set the number of allocations that
|
||||
are allowed to share a property value [GH-2942]
|
||||
* driver/rkt: support read-only volume mounts [GH-2883]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Fix incorrect destructive update with `distinct_property` constraint
|
||||
[GH-2939]
|
||||
* cli: Fix autocmpleting global flags [GH-2928]
|
||||
* cli: Fix panic when using 0.6.0 cli with an older cluster [GH-2929]
|
||||
* driver/docker: Fix leaking plugin file used by syslog server [GH-2937]
|
||||
|
||||
## 0.6.0 (July 26, 2017)
|
||||
|
||||
__BACKWARDS INCOMPATIBILITIES:__
|
||||
* cli: When given a prefix that does not resolve to a particular object,
|
||||
commands now return exit code 1 rather than 0.
|
||||
|
||||
IMPROVEMENTS:
|
||||
* core: Rolling updates based on allocation health [GH-2621, GH-2634, GH-2799]
|
||||
|
@ -14,7 +32,9 @@ IMPROVEMENTS:
|
|||
* api: Add `verify_https_client` to require certificates from HTTP clients
|
||||
[GH-2587]
|
||||
* api/job: Ability to revert job to older versions [GH-2575]
|
||||
* cli: Autocomplete for CLI commands [GH-2848]
|
||||
* client: Use a random host UUID by default [GH-2735]
|
||||
* client: Add `NOMAD_GROUP_NAME` environment variable [GH-2877]
|
||||
* client: Environment variables for client DC and Region [GH-2507]
|
||||
* client: Hash host ID so its stable and well distributed [GH-2541]
|
||||
* client: GC dead allocs if total allocs > `gc_max_allocs` tunable [GH-2636]
|
||||
|
@ -22,11 +42,13 @@ IMPROVEMENTS:
|
|||
[GH-2610]
|
||||
* client: Fingerprint all routable addresses on an interface including IPv6
|
||||
addresses [GH-2536]
|
||||
* client/artifact: Support .xz archives [GH-2836]
|
||||
* client/artifact: Allow specifying a go-getter mode [GH-2781]
|
||||
* client/artifact: Support non-Amazon S3-compatible sources [GH-2781]
|
||||
* client/template: Support reading env vars from templates [GH-2654]
|
||||
* config: Support Unix socket addresses for Consul [GH-2622]
|
||||
* discovery: Advertise driver-specified IP address and port [GH-2709]
|
||||
* discovery: Support `tls_skip_verify` for Consul HTTPS checks [GH-2467]
|
||||
* driver/docker: Allow specifying extra hosts [GH-2547]
|
||||
* driver/docker: Allow setting seccomp profiles [GH-2658]
|
||||
* driver/docker: Support Docker credential helpers [GH-2651]
|
||||
|
@ -36,6 +58,9 @@ IMPROVEMENTS:
|
|||
[GH-2535]
|
||||
* driver/rkt: Support `no_overlay` [GH-2702]
|
||||
* driver/rkt: Support `insecure_options` list [GH-2695]
|
||||
* server: Allow tuning of node heartbeat TTLs [GH-2859]
|
||||
* server/networking: Shrink dynamic port range to not overlap with majority of
|
||||
operating system's ephemeral port ranges to avoid port conflicts [GH-2856]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Protect against nil job in new allocation, avoiding panic [GH-2592]
|
||||
|
@ -50,6 +75,7 @@ BUG FIXES:
|
|||
* client: Client syncs allocation state with server before waiting for
|
||||
allocation destroy fixing a corner case in which an allocation may be blocked
|
||||
till destroy [GH-2563]
|
||||
* client: Improved state file handling and reduced write volume [GH-2878]
|
||||
* client/artifact: Honor netrc [GH-2524]
|
||||
* client/artifact: Handle tars where file in directory is listed before
|
||||
directory [GH-2524]
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
||||
EXTERNAL_TOOLS=\
|
||||
github.com/kardianos/govendor \
|
||||
github.com/mitchellh/gox \
|
||||
golang.org/x/tools/cmd/cover \
|
||||
github.com/axw/gocov/gocov \
|
||||
gopkg.in/matm/v1/gocov-html \
|
||||
|
|
11
README.md
11
README.md
|
@ -1,7 +1,8 @@
|
|||
Nomad [![Build Status](https://travis-ci.org/hashicorp/nomad.svg)](https://travis-ci.org/hashicorp/nomad) [![Build status](https://ci.appveyor.com/api/projects/status/i748vuqet037ojo3?svg=true)](https://ci.appveyor.com/project/hashicorp/nomad) [![Join the chat at https://gitter.im/hashicorp-nomad/Lobby](https://badges.gitter.im/hashicorp-nomad/Lobby.svg)](https://gitter.im/hashicorp-nomad/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
Nomad [![Build Status](https://travis-ci.org/hashicorp/nomad.svg)](https://travis-ci.org/hashicorp/nomad) [![Join the chat at https://gitter.im/hashicorp-nomad/Lobby](https://badges.gitter.im/hashicorp-nomad/Lobby.svg)](https://gitter.im/hashicorp-nomad/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
=========
|
||||
- Website: https://www.nomadproject.io
|
||||
- Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
|
||||
|
||||
* Website: [www.nomadproject.io](https://www.nomadproject.io)
|
||||
* Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
|
||||
|
||||
<p align="center" style="text-align:center;">
|
||||
<img src="https://cdn.rawgit.com/hashicorp/nomad/master/website/source/assets/images/logo-text.svg" width="500" />
|
||||
|
@ -61,7 +62,7 @@ machine (version 1.8+ is *required*).
|
|||
|
||||
**Developing with Vagrant**
|
||||
There is an included Vagrantfile that can help bootstrap the process. The
|
||||
created virtual machine is based off of Ubuntu 14, and installs several of the
|
||||
created virtual machine is based off of Ubuntu 16, and installs several of the
|
||||
base libraries that can be used by Nomad.
|
||||
|
||||
To use this virtual machine, checkout Nomad and run `vagrant up` from the root
|
||||
|
@ -78,7 +79,7 @@ needed dependencies.
|
|||
|
||||
**Developing locally**
|
||||
For local dev first make sure Go is properly installed, including setting up a
|
||||
[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
|
||||
[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
|
||||
repository into `$GOPATH/src/github.com/hashicorp/nomad`. Then you can
|
||||
download the required build tools such as vet, cover, godep etc by bootstrapping
|
||||
your environment.
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
)
|
||||
|
||||
func TestAgent_Self(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
@ -33,6 +34,7 @@ func TestAgent_Self(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_NodeName(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Agent()
|
||||
|
@ -48,6 +50,7 @@ func TestAgent_NodeName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_Datacenter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Agent()
|
||||
|
@ -63,6 +66,7 @@ func TestAgent_Datacenter(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_Join(t *testing.T) {
|
||||
t.Parallel()
|
||||
c1, s1 := makeClient(t, nil, nil)
|
||||
defer s1.Stop()
|
||||
a1 := c1.Agent()
|
||||
|
@ -92,6 +96,7 @@ func TestAgent_Join(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_Members(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Agent()
|
||||
|
@ -112,6 +117,7 @@ func TestAgent_Members(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_ForceLeave(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Agent()
|
||||
|
@ -129,6 +135,7 @@ func (a *AgentMember) String() string {
|
|||
}
|
||||
|
||||
func TestAgents_Sort(t *testing.T) {
|
||||
t.Parallel()
|
||||
var sortTests = []struct {
|
||||
in []*AgentMember
|
||||
out []*AgentMember
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
func TestAllocations_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Allocations()
|
||||
|
@ -53,6 +54,7 @@ func TestAllocations_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_PrefixList(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
a := c.Allocations()
|
||||
|
@ -100,6 +102,7 @@ func TestAllocations_PrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_CreateIndexSort(t *testing.T) {
|
||||
t.Parallel()
|
||||
allocs := []*AllocationListStub{
|
||||
&AllocationListStub{CreateIndex: 2},
|
||||
&AllocationListStub{CreateIndex: 1},
|
||||
|
|
|
@ -43,6 +43,7 @@ func makeClient(t *testing.T, cb1 configCallback,
|
|||
}
|
||||
|
||||
func TestRequestTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
d, err := json.Marshal(struct{ Done bool }{true})
|
||||
|
@ -90,6 +91,7 @@ func TestRequestTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDefaultConfig_env(t *testing.T) {
|
||||
t.Parallel()
|
||||
url := "http://1.2.3.4:5678"
|
||||
auth := []string{"nomaduser", "12345"}
|
||||
|
||||
|
@ -115,6 +117,7 @@ func TestDefaultConfig_env(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetQueryOptions(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
@ -142,6 +145,7 @@ func TestSetQueryOptions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetWriteOptions(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
@ -157,6 +161,7 @@ func TestSetWriteOptions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRequestToHTTP(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
@ -179,6 +184,7 @@ func TestRequestToHTTP(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseQueryMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := &http.Response{
|
||||
Header: make(map[string][]string),
|
||||
}
|
||||
|
@ -203,6 +209,7 @@ func TestParseQueryMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseWriteMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := &http.Response{
|
||||
Header: make(map[string][]string),
|
||||
}
|
||||
|
@ -219,6 +226,7 @@ func TestParseWriteMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestQueryString(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestCompose(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Compose a task
|
||||
task := NewTask("task1", "exec").
|
||||
SetConfig("foo", "bar").
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
)
|
||||
|
||||
func TestCompose_Constraints(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := NewConstraint("kernel.name", "=", "darwin")
|
||||
expect := &Constraint{
|
||||
LTarget: "kernel.name",
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestEvaluations_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
e := c.Evaluations()
|
||||
|
@ -49,6 +50,7 @@ func TestEvaluations_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEvaluations_PrefixList(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
e := c.Evaluations()
|
||||
|
@ -88,6 +90,7 @@ func TestEvaluations_PrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEvaluations_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
e := c.Evaluations()
|
||||
|
@ -121,6 +124,7 @@ func TestEvaluations_Info(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEvaluations_Allocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
e := c.Evaluations()
|
||||
|
@ -139,6 +143,7 @@ func TestEvaluations_Allocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEvaluations_Sort(t *testing.T) {
|
||||
t.Parallel()
|
||||
evals := []*Evaluation{
|
||||
&Evaluation{CreateIndex: 2},
|
||||
&Evaluation{CreateIndex: 1},
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestFS_FrameReader(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create a channel of the frames and a cancel channel
|
||||
framesCh := make(chan *StreamFrame, 3)
|
||||
cancelCh := make(chan struct{})
|
||||
|
@ -76,6 +77,7 @@ func TestFS_FrameReader(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_FrameReader_Unblock(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create a channel of the frames and a cancel channel
|
||||
framesCh := make(chan *StreamFrame, 3)
|
||||
cancelCh := make(chan struct{})
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func TestJobs_Register(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -54,6 +55,7 @@ func TestJobs_Register(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Validate(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -81,6 +83,7 @@ func TestJobs_Validate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Canonicalize(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCases := []struct {
|
||||
name string
|
||||
expected *Job
|
||||
|
@ -589,6 +592,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_EnforceRegister(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -657,6 +661,7 @@ func TestJobs_EnforceRegister(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Revert(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -706,6 +711,7 @@ func TestJobs_Revert(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -739,6 +745,7 @@ func TestJobs_Info(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Versions(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -771,6 +778,7 @@ func TestJobs_Versions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_PrefixList(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -809,6 +817,7 @@ func TestJobs_PrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -847,6 +856,7 @@ func TestJobs_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Allocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -868,6 +878,7 @@ func TestJobs_Allocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Evaluations(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -909,6 +920,7 @@ func TestJobs_Evaluations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Deregister(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -968,6 +980,7 @@ func TestJobs_Deregister(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_ForceEvaluate(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -1007,6 +1020,7 @@ func TestJobs_ForceEvaluate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_PeriodicForce(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -1059,6 +1073,7 @@ func TestJobs_PeriodicForce(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Plan(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -1129,6 +1144,7 @@ func TestJobs_Plan(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_JobSummary(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
jobs := c.Jobs()
|
||||
|
@ -1166,6 +1182,7 @@ func TestJobs_JobSummary(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_NewBatchJob(t *testing.T) {
|
||||
t.Parallel()
|
||||
job := NewBatchJob("job1", "myjob", "region1", 5)
|
||||
expect := &Job{
|
||||
Region: helper.StringToPtr("region1"),
|
||||
|
@ -1180,6 +1197,7 @@ func TestJobs_NewBatchJob(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_NewServiceJob(t *testing.T) {
|
||||
t.Parallel()
|
||||
job := NewServiceJob("job1", "myjob", "region1", 5)
|
||||
expect := &Job{
|
||||
Region: helper.StringToPtr("region1"),
|
||||
|
@ -1194,6 +1212,7 @@ func TestJobs_NewServiceJob(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_SetMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
job := &Job{Meta: nil}
|
||||
|
||||
// Initializes a nil map
|
||||
|
@ -1216,6 +1235,7 @@ func TestJobs_SetMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Constrain(t *testing.T) {
|
||||
t.Parallel()
|
||||
job := &Job{Constraints: nil}
|
||||
|
||||
// Create and add a constraint
|
||||
|
@ -1249,6 +1269,7 @@ func TestJobs_Constrain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobs_Sort(t *testing.T) {
|
||||
t.Parallel()
|
||||
jobs := []*JobListStub{
|
||||
&JobListStub{ID: "job2"},
|
||||
&JobListStub{ID: "job0"},
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNodes_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
})
|
||||
|
@ -40,6 +41,7 @@ func TestNodes_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_PrefixList(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
})
|
||||
|
@ -80,6 +82,7 @@ func TestNodes_PrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_Info(t *testing.T) {
|
||||
t.Parallel()
|
||||
startTime := time.Now().Unix()
|
||||
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
|
@ -131,6 +134,7 @@ func TestNodes_Info(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_ToggleDrain(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
})
|
||||
|
@ -196,6 +200,7 @@ func TestNodes_ToggleDrain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_Allocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
nodes := c.Nodes()
|
||||
|
@ -214,6 +219,7 @@ func TestNodes_Allocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_ForceEvaluate(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
})
|
||||
|
@ -252,6 +258,7 @@ func TestNodes_ForceEvaluate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodes_Sort(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := []*NodeListStub{
|
||||
&NodeListStub{CreateIndex: 2},
|
||||
&NodeListStub{CreateIndex: 1},
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
)
|
||||
|
||||
func TestOperator_RaftGetConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
@ -22,6 +23,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRegionsList(t *testing.T) {
|
||||
t.Parallel()
|
||||
c1, s1 := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||
c.Region = "regionA"
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func TestStatus_Leader(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
status := c.Status()
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func TestSystem_GarbageCollect(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
e := c.System()
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestTaskGroup_NewTaskGroup(t *testing.T) {
|
||||
t.Parallel()
|
||||
grp := NewTaskGroup("grp1", 2)
|
||||
expect := &TaskGroup{
|
||||
Name: helper.StringToPtr("grp1"),
|
||||
|
@ -19,6 +20,7 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskGroup_Constrain(t *testing.T) {
|
||||
t.Parallel()
|
||||
grp := NewTaskGroup("grp1", 1)
|
||||
|
||||
// Add a constraint to the group
|
||||
|
@ -52,6 +54,7 @@ func TestTaskGroup_Constrain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskGroup_SetMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
grp := NewTaskGroup("grp1", 1)
|
||||
|
||||
// Initializes an empty map
|
||||
|
@ -74,6 +77,7 @@ func TestTaskGroup_SetMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskGroup_AddTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
grp := NewTaskGroup("grp1", 1)
|
||||
|
||||
// Add the task to the task group
|
||||
|
@ -105,6 +109,7 @@ func TestTaskGroup_AddTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_NewTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := NewTask("task1", "exec")
|
||||
expect := &Task{
|
||||
Name: "task1",
|
||||
|
@ -116,6 +121,7 @@ func TestTask_NewTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_SetConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := NewTask("task1", "exec")
|
||||
|
||||
// Initializes an empty map
|
||||
|
@ -138,6 +144,7 @@ func TestTask_SetConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_SetMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := NewTask("task1", "exec")
|
||||
|
||||
// Initializes an empty map
|
||||
|
@ -160,6 +167,7 @@ func TestTask_SetMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_Require(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := NewTask("task1", "exec")
|
||||
|
||||
// Create some require resources
|
||||
|
@ -188,6 +196,7 @@ func TestTask_Require(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_Constrain(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := NewTask("task1", "exec")
|
||||
|
||||
// Add a constraint to the task
|
||||
|
@ -221,6 +230,7 @@ func TestTask_Constrain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_Artifact(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := TaskArtifact{
|
||||
GetterSource: helper.StringToPtr("http://localhost/foo.txt"),
|
||||
GetterMode: helper.StringToPtr("file"),
|
||||
|
|
|
@ -51,6 +51,11 @@ type AllocRunner struct {
|
|||
updater AllocStateUpdater
|
||||
logger *log.Logger
|
||||
|
||||
// allocID is the ID of this runner's allocation. Since it does not
|
||||
// change for the lifetime of the AllocRunner it is safe to read
|
||||
// without acquiring a lock (unlike alloc).
|
||||
allocID string
|
||||
|
||||
alloc *structs.Allocation
|
||||
allocClientStatus string // Explicit status of allocation. Set when there are failures
|
||||
allocClientDescription string
|
||||
|
@ -83,7 +88,8 @@ type AllocRunner struct {
|
|||
|
||||
// State related fields
|
||||
// stateDB is used to store the alloc runners state
|
||||
stateDB *bolt.DB
|
||||
stateDB *bolt.DB
|
||||
allocStateLock sync.Mutex
|
||||
|
||||
// persistedEval is the last persisted evaluation ID. Since evaluation
|
||||
// IDs change on every allocation update we only need to persist the
|
||||
|
@ -138,6 +144,7 @@ type allocRunnerMutableState struct {
|
|||
AllocClientStatus string
|
||||
AllocClientDescription string
|
||||
TaskStates map[string]*structs.TaskState
|
||||
DeploymentStatus *structs.AllocDeploymentStatus
|
||||
}
|
||||
|
||||
// NewAllocRunner is used to create a new allocation context
|
||||
|
@ -151,8 +158,10 @@ func NewAllocRunner(logger *log.Logger, config *config.Config, stateDB *bolt.DB,
|
|||
updater: updater,
|
||||
logger: logger,
|
||||
alloc: alloc,
|
||||
allocBroadcast: cstructs.NewAllocBroadcaster(0),
|
||||
allocID: alloc.ID,
|
||||
allocBroadcast: cstructs.NewAllocBroadcaster(8),
|
||||
dirtyCh: make(chan struct{}, 1),
|
||||
allocDir: allocdir.NewAllocDir(logger, filepath.Join(config.AllocDir, alloc.ID)),
|
||||
tasks: make(map[string]*TaskRunner),
|
||||
taskStates: copyTaskStates(alloc.TaskStates),
|
||||
restored: make(map[string]struct{}),
|
||||
|
@ -173,7 +182,7 @@ func NewAllocRunner(logger *log.Logger, config *config.Config, stateDB *bolt.DB,
|
|||
func (r *AllocRunner) pre060StateFilePath() string {
|
||||
r.allocLock.Lock()
|
||||
defer r.allocLock.Unlock()
|
||||
path := filepath.Join(r.config.StateDir, "alloc", r.alloc.ID, "state.json")
|
||||
path := filepath.Join(r.config.StateDir, "alloc", r.allocID, "state.json")
|
||||
return path
|
||||
}
|
||||
|
||||
|
@ -187,7 +196,7 @@ func (r *AllocRunner) RestoreState() error {
|
|||
var upgrading bool
|
||||
if err := pre060RestoreState(oldPath, &snap); err == nil {
|
||||
// Restore fields
|
||||
r.logger.Printf("[INFO] client: restoring pre v0.6.0 alloc runner state for alloc %q", r.alloc.ID)
|
||||
r.logger.Printf("[INFO] client: restoring pre v0.6.0 alloc runner state for alloc %q", r.allocID)
|
||||
r.alloc = snap.Alloc
|
||||
r.allocDir = snap.AllocDir
|
||||
r.allocClientStatus = snap.AllocClientStatus
|
||||
|
@ -201,7 +210,7 @@ func (r *AllocRunner) RestoreState() error {
|
|||
// #2132 Upgrade path: if snap.AllocDir is nil, try to convert old
|
||||
// Context struct to new AllocDir struct
|
||||
if snap.AllocDir == nil && snap.Context != nil {
|
||||
r.logger.Printf("[DEBUG] client: migrating state snapshot for alloc %q", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: migrating state snapshot for alloc %q", r.allocID)
|
||||
r.allocDir = allocdir.NewAllocDir(r.logger, snap.Context.AllocDir.AllocDir)
|
||||
for taskName := range snap.Context.AllocDir.TaskDirs {
|
||||
r.allocDir.NewTaskDir(taskName)
|
||||
|
@ -217,7 +226,7 @@ func (r *AllocRunner) RestoreState() error {
|
|||
} else {
|
||||
// We are doing a normal restore
|
||||
err := r.stateDB.View(func(tx *bolt.Tx) error {
|
||||
bkt, err := getAllocationBucket(tx, r.alloc.ID)
|
||||
bkt, err := getAllocationBucket(tx, r.allocID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get allocation bucket: %v", err)
|
||||
}
|
||||
|
@ -248,6 +257,7 @@ func (r *AllocRunner) RestoreState() error {
|
|||
r.allocClientDescription = mutable.AllocClientDescription
|
||||
r.taskStates = mutable.TaskStates
|
||||
r.alloc.ClientStatus = getClientStatus(r.taskStates)
|
||||
r.alloc.DeploymentStatus = mutable.DeploymentStatus
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -273,32 +283,40 @@ func (r *AllocRunner) RestoreState() error {
|
|||
}
|
||||
|
||||
// Restore the task runners
|
||||
taskDestroyEvent := structs.NewTaskEvent(structs.TaskKilled)
|
||||
var mErr multierror.Error
|
||||
for _, task := range tg.Tasks {
|
||||
name := task.Name
|
||||
state := r.taskStates[name]
|
||||
|
||||
// Nomad exited before task could start, nothing to restore.
|
||||
// AllocRunner.Run will start a new TaskRunner for this task
|
||||
if state == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark the task as restored.
|
||||
r.restored[name] = struct{}{}
|
||||
|
||||
td, ok := r.allocDir.TaskDirs[name]
|
||||
if !ok {
|
||||
err := fmt.Errorf("failed to find task dir metadata for alloc %q task %q",
|
||||
r.alloc.ID, name)
|
||||
r.logger.Printf("[ERR] client: %v", err)
|
||||
return err
|
||||
// Create the task dir metadata if it doesn't exist.
|
||||
// Since task dirs are created during r.Run() the
|
||||
// client may save state and exit before all task dirs
|
||||
// are created
|
||||
td = r.allocDir.NewTaskDir(name)
|
||||
}
|
||||
|
||||
tr := NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, td, r.Alloc(), task, r.vaultClient, r.consulClient)
|
||||
r.tasks[name] = tr
|
||||
|
||||
// Skip tasks in terminal states.
|
||||
if state.State == structs.TaskStateDead {
|
||||
continue
|
||||
}
|
||||
|
||||
tr := NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, td, r.Alloc(), task, r.vaultClient, r.consulClient)
|
||||
r.tasks[name] = tr
|
||||
|
||||
if restartReason, err := tr.RestoreState(); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to restore state for alloc %s task %q: %v", r.alloc.ID, name, err)
|
||||
r.logger.Printf("[ERR] client: failed to restore state for alloc %s task %q: %v", r.allocID, name, err)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
} else if !r.alloc.TerminalStatus() {
|
||||
// Only start if the alloc isn't in a terminal status.
|
||||
|
@ -306,15 +324,17 @@ func (r *AllocRunner) RestoreState() error {
|
|||
|
||||
if upgrading {
|
||||
if err := tr.SaveState(); err != nil {
|
||||
r.logger.Printf("[WARN] client: initial save state for alloc %s task %s failed: %v", r.alloc.ID, name, err)
|
||||
r.logger.Printf("[WARN] client: initial save state for alloc %s task %s failed: %v", r.allocID, name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Restart task runner if RestoreState gave a reason
|
||||
if restartReason != "" {
|
||||
r.logger.Printf("[INFO] client: restarting alloc %s task %s: %v", r.alloc.ID, name, restartReason)
|
||||
r.logger.Printf("[INFO] client: restarting alloc %s task %s: %v", r.allocID, name, restartReason)
|
||||
tr.Restart("upgrade", restartReason)
|
||||
}
|
||||
} else {
|
||||
tr.Destroy(taskDestroyEvent)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -334,14 +354,22 @@ func (r *AllocRunner) SaveState() error {
|
|||
runners := r.getTaskRunners()
|
||||
var mErr multierror.Error
|
||||
for _, tr := range runners {
|
||||
if err := r.saveTaskRunnerState(tr); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
if err := tr.SaveState(); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to save state for alloc %s task %q: %v",
|
||||
r.allocID, tr.task.Name, err))
|
||||
}
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (r *AllocRunner) saveAllocRunnerState() error {
|
||||
r.allocStateLock.Lock()
|
||||
defer r.allocStateLock.Unlock()
|
||||
|
||||
if r.ctx.Err() == context.Canceled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grab all the relevant data
|
||||
alloc := r.Alloc()
|
||||
|
||||
|
@ -351,14 +379,14 @@ func (r *AllocRunner) saveAllocRunnerState() error {
|
|||
r.allocLock.Unlock()
|
||||
|
||||
r.allocDirLock.Lock()
|
||||
allocDir := r.allocDir
|
||||
allocDir := r.allocDir.Copy()
|
||||
r.allocDirLock.Unlock()
|
||||
|
||||
// Start the transaction.
|
||||
return r.stateDB.Batch(func(tx *bolt.Tx) error {
|
||||
|
||||
// Grab the allocation bucket
|
||||
allocBkt, err := getAllocationBucket(tx, r.alloc.ID)
|
||||
allocBkt, err := getAllocationBucket(tx, r.allocID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve allocation bucket: %v", err)
|
||||
}
|
||||
|
@ -399,7 +427,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
|
|||
}
|
||||
|
||||
// Write the alloc dir data if it hasn't been written before and it exists.
|
||||
if !r.allocDirPersisted && r.allocDir != nil {
|
||||
if !r.allocDirPersisted && allocDir != nil {
|
||||
if err := putObject(allocBkt, allocRunnerStateAllocDirKey, allocDir); err != nil {
|
||||
return fmt.Errorf("failed to write alloc_runner allocDir state: %v", err)
|
||||
}
|
||||
|
@ -414,6 +442,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
|
|||
AllocClientStatus: allocClientStatus,
|
||||
AllocClientDescription: allocClientDescription,
|
||||
TaskStates: alloc.TaskStates,
|
||||
DeploymentStatus: alloc.DeploymentStatus,
|
||||
}
|
||||
|
||||
if err := putObject(allocBkt, allocRunnerStateMutableKey, &mutable); err != nil {
|
||||
|
@ -424,18 +453,13 @@ func (r *AllocRunner) saveAllocRunnerState() error {
|
|||
})
|
||||
}
|
||||
|
||||
func (r *AllocRunner) saveTaskRunnerState(tr *TaskRunner) error {
|
||||
if err := tr.SaveState(); err != nil {
|
||||
return fmt.Errorf("failed to save state for alloc %s task '%s': %v",
|
||||
r.alloc.ID, tr.task.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DestroyState is used to cleanup after ourselves
|
||||
func (r *AllocRunner) DestroyState() error {
|
||||
r.allocStateLock.Lock()
|
||||
defer r.allocStateLock.Unlock()
|
||||
|
||||
return r.stateDB.Update(func(tx *bolt.Tx) error {
|
||||
if err := deleteAllocationBucket(tx, r.alloc.ID); err != nil {
|
||||
if err := deleteAllocationBucket(tx, r.allocID); err != nil {
|
||||
return fmt.Errorf("failed to delete allocation bucket: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -549,7 +573,11 @@ func (r *AllocRunner) dirtySyncState() {
|
|||
for {
|
||||
select {
|
||||
case <-r.dirtyCh:
|
||||
r.syncStatus()
|
||||
if err := r.syncStatus(); err != nil {
|
||||
// Only WARN instead of ERR because we continue on
|
||||
r.logger.Printf("[WARN] client: error persisting alloc %q state: %v",
|
||||
r.allocID, err)
|
||||
}
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
@ -561,10 +589,25 @@ func (r *AllocRunner) syncStatus() error {
|
|||
// Get a copy of our alloc, update status server side and sync to disk
|
||||
alloc := r.Alloc()
|
||||
r.updater(alloc)
|
||||
r.allocBroadcast.Send(alloc)
|
||||
r.sendBroadcast(alloc)
|
||||
return r.saveAllocRunnerState()
|
||||
}
|
||||
|
||||
// sendBroadcast broadcasts an alloc update.
|
||||
func (r *AllocRunner) sendBroadcast(alloc *structs.Allocation) {
|
||||
// Try to send the alloc up to three times with a delay to allow recovery.
|
||||
sent := false
|
||||
for i := 0; i < 3; i++ {
|
||||
if sent = r.allocBroadcast.Send(alloc); sent {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
if !sent {
|
||||
r.logger.Printf("[WARN] client: failed to broadcast update to allocation %q", r.allocID)
|
||||
}
|
||||
}
|
||||
|
||||
// setStatus is used to update the allocation status
|
||||
func (r *AllocRunner) setStatus(status, desc string) {
|
||||
r.allocLock.Lock()
|
||||
|
@ -685,30 +728,27 @@ func (r *AllocRunner) Run() {
|
|||
alloc := r.Alloc()
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
if tg == nil {
|
||||
r.logger.Printf("[ERR] client: alloc '%s' for missing task group '%s'", alloc.ID, alloc.TaskGroup)
|
||||
r.logger.Printf("[ERR] client: alloc %q for missing task group %q", r.allocID, alloc.TaskGroup)
|
||||
r.setStatus(structs.AllocClientStatusFailed, fmt.Sprintf("missing task group '%s'", alloc.TaskGroup))
|
||||
return
|
||||
}
|
||||
|
||||
// Create the execution context
|
||||
r.allocDirLock.Lock()
|
||||
if r.allocDir == nil {
|
||||
// Build allocation directory
|
||||
r.allocDir = allocdir.NewAllocDir(r.logger, filepath.Join(r.config.AllocDir, r.alloc.ID))
|
||||
if err := r.allocDir.Build(); err != nil {
|
||||
r.logger.Printf("[WARN] client: failed to build task directories: %v", err)
|
||||
r.setStatus(structs.AllocClientStatusFailed, fmt.Sprintf("failed to build task dirs for '%s'", alloc.TaskGroup))
|
||||
r.allocDirLock.Unlock()
|
||||
return
|
||||
}
|
||||
// Build allocation directory (idempotent)
|
||||
if err := r.allocDir.Build(); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to build task directories: %v", err)
|
||||
r.setStatus(structs.AllocClientStatusFailed, fmt.Sprintf("failed to build task dirs for '%s'", alloc.TaskGroup))
|
||||
r.allocDirLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if r.otherAllocDir != nil {
|
||||
if err := r.allocDir.Move(r.otherAllocDir, tg.Tasks); err != nil {
|
||||
r.logger.Printf("[ERROR] client: failed to move alloc dir into alloc %q: %v", r.alloc.ID, err)
|
||||
}
|
||||
if err := r.otherAllocDir.Destroy(); err != nil {
|
||||
r.logger.Printf("[ERROR] client: error destroying allocdir %v: %v", r.otherAllocDir.AllocDir, err)
|
||||
}
|
||||
if r.otherAllocDir != nil {
|
||||
if err := r.allocDir.Move(r.otherAllocDir, tg.Tasks); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to move alloc dir into alloc %q: %v", r.allocID, err)
|
||||
}
|
||||
if err := r.otherAllocDir.Destroy(); err != nil {
|
||||
r.logger.Printf("[ERR] client: error destroying allocdir %v: %v", r.otherAllocDir.AllocDir, err)
|
||||
}
|
||||
}
|
||||
r.allocDirLock.Unlock()
|
||||
|
@ -717,11 +757,11 @@ func (r *AllocRunner) Run() {
|
|||
// start any of the task runners and directly wait for the destroy signal to
|
||||
// clean up the allocation.
|
||||
if alloc.TerminalStatus() {
|
||||
r.logger.Printf("[DEBUG] client: alloc %q in terminal status, waiting for destroy", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: alloc %q in terminal status, waiting for destroy", r.allocID)
|
||||
// mark this allocation as completed.
|
||||
r.setStatus(structs.AllocClientStatusComplete, "cancelled running tasks for allocation in terminal state")
|
||||
r.handleDestroy()
|
||||
r.logger.Printf("[DEBUG] client: terminating runner for alloc '%s'", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: terminating runner for alloc '%s'", r.allocID)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -730,7 +770,7 @@ func (r *AllocRunner) Run() {
|
|||
go r.watchHealth(wCtx)
|
||||
|
||||
// Start the task runners
|
||||
r.logger.Printf("[DEBUG] client: starting task runners for alloc '%s'", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: starting task runners for alloc '%s'", r.allocID)
|
||||
r.taskLock.Lock()
|
||||
for _, task := range tg.Tasks {
|
||||
if _, ok := r.restored[task.Name]; ok {
|
||||
|
@ -787,7 +827,8 @@ OUTER:
|
|||
}
|
||||
|
||||
if err := r.syncStatus(); err != nil {
|
||||
r.logger.Printf("[WARN] client: failed to sync status upon receiving alloc update: %v", err)
|
||||
r.logger.Printf("[WARN] client: failed to sync alloc %q status upon receiving alloc update: %v",
|
||||
r.allocID, err)
|
||||
}
|
||||
case <-r.ctx.Done():
|
||||
taskDestroyEvent = structs.NewTaskEvent(structs.TaskKilled)
|
||||
|
@ -804,7 +845,7 @@ OUTER:
|
|||
// Free up the context. It has likely exited already
|
||||
watcherCancel()
|
||||
|
||||
r.logger.Printf("[DEBUG] client: terminating runner for alloc '%s'", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: terminating runner for alloc '%s'", r.allocID)
|
||||
}
|
||||
|
||||
// SetPreviousAllocDir sets the previous allocation directory of the current
|
||||
|
@ -816,26 +857,37 @@ func (r *AllocRunner) SetPreviousAllocDir(allocDir *allocdir.AllocDir) {
|
|||
// destroyTaskRunners destroys the task runners, waits for them to terminate and
|
||||
// then saves state.
|
||||
func (r *AllocRunner) destroyTaskRunners(destroyEvent *structs.TaskEvent) {
|
||||
runners := r.getTaskRunners()
|
||||
|
||||
// First destroy the leader
|
||||
for _, tr := range runners {
|
||||
if tr.task.Leader {
|
||||
r.logger.Printf("[DEBUG] client: destroying leader task %q of task group %q first", tr.task.Name, tr.alloc.TaskGroup)
|
||||
tr.Destroy(destroyEvent)
|
||||
<-tr.WaitCh()
|
||||
// First destroy the leader if one exists
|
||||
tg := r.alloc.Job.LookupTaskGroup(r.alloc.TaskGroup)
|
||||
leader := ""
|
||||
for _, task := range tg.Tasks {
|
||||
if task.Leader {
|
||||
leader = task.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
if leader != "" {
|
||||
r.taskLock.RLock()
|
||||
tr := r.tasks[leader]
|
||||
r.taskLock.RUnlock()
|
||||
|
||||
r.logger.Printf("[DEBUG] client: alloc %q destroying leader task %q of task group %q first",
|
||||
r.allocID, leader, r.alloc.TaskGroup)
|
||||
tr.Destroy(destroyEvent)
|
||||
<-tr.WaitCh()
|
||||
}
|
||||
|
||||
// Then destroy non-leader tasks concurrently
|
||||
for _, tr := range runners {
|
||||
if !tr.task.Leader {
|
||||
r.taskLock.RLock()
|
||||
for name, tr := range r.tasks {
|
||||
if name != leader {
|
||||
tr.Destroy(destroyEvent)
|
||||
}
|
||||
}
|
||||
r.taskLock.RUnlock()
|
||||
|
||||
// Wait for termination of the task runners
|
||||
for _, tr := range runners {
|
||||
for _, tr := range r.getTaskRunners() {
|
||||
<-tr.WaitCh()
|
||||
}
|
||||
}
|
||||
|
@ -845,23 +897,35 @@ func (r *AllocRunner) destroyTaskRunners(destroyEvent *structs.TaskEvent) {
|
|||
func (r *AllocRunner) handleDestroy() {
|
||||
// Final state sync. We do this to ensure that the server has the correct
|
||||
// state as we wait for a destroy.
|
||||
r.syncStatus()
|
||||
alloc := r.Alloc()
|
||||
|
||||
//TODO(schmichael) updater can cause a GC which can block on this alloc
|
||||
// runner shutting down. Since handleDestroy can be called by Run() we
|
||||
// can't block shutdown here as it would cause a deadlock.
|
||||
go r.updater(alloc)
|
||||
|
||||
// Broadcast and persist state synchronously
|
||||
r.sendBroadcast(alloc)
|
||||
if err := r.saveAllocRunnerState(); err != nil {
|
||||
r.logger.Printf("[WARN] client: alloc %q unable to persist state but should be GC'd soon anyway:%v",
|
||||
r.allocID, err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
if err := r.DestroyContext(); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to destroy context for alloc '%s': %v",
|
||||
r.alloc.ID, err)
|
||||
r.allocID, err)
|
||||
}
|
||||
if err := r.DestroyState(); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to destroy state for alloc '%s': %v",
|
||||
r.alloc.ID, err)
|
||||
r.allocID, err)
|
||||
}
|
||||
|
||||
return
|
||||
case <-r.updateCh:
|
||||
r.logger.Printf("[DEBUG] client: dropping update to terminal alloc '%s'", r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: dropping update to terminal alloc '%s'", r.allocID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -907,7 +971,7 @@ func (r *AllocRunner) LatestAllocStats(taskFilter string) (*cstructs.AllocResour
|
|||
tr, ok := r.tasks[taskFilter]
|
||||
r.taskLock.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("allocation %q has no task %q", r.alloc.ID, taskFilter)
|
||||
return nil, fmt.Errorf("allocation %q has no task %q", r.allocID, taskFilter)
|
||||
}
|
||||
l := tr.LatestResourceUsage()
|
||||
if l != nil {
|
||||
|
@ -956,6 +1020,11 @@ func (r *AllocRunner) shouldUpdate(serverIndex uint64) bool {
|
|||
|
||||
// Destroy is used to indicate that the allocation context should be destroyed
|
||||
func (r *AllocRunner) Destroy() {
|
||||
// Lock when closing the context as that gives the save state code
|
||||
// serialization.
|
||||
r.allocStateLock.Lock()
|
||||
defer r.allocStateLock.Unlock()
|
||||
|
||||
r.exitFn()
|
||||
r.allocBroadcast.Close()
|
||||
}
|
||||
|
|
|
@ -24,6 +24,9 @@ func (r *AllocRunner) watchHealth(ctx context.Context) {
|
|||
if alloc.DeploymentID == "" {
|
||||
r.logger.Printf("[TRACE] client.alloc_watcher: exiting because alloc isn't part of a deployment")
|
||||
return
|
||||
} else if alloc.DeploymentStatus.IsHealthy() || alloc.DeploymentStatus.IsUnhealthy() {
|
||||
r.logger.Printf("[TRACE] client.alloc_watcher: exiting because alloc deployment health already determined")
|
||||
return
|
||||
}
|
||||
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
|
@ -67,9 +70,16 @@ func (r *AllocRunner) watchHealth(ctx context.Context) {
|
|||
latestTaskHealthy := time.Unix(0, 0)
|
||||
latestChecksHealthy := time.Unix(0, 0)
|
||||
healthyTimer := time.NewTimer(0)
|
||||
if !healthyTimer.Stop() {
|
||||
<-healthyTimer.C
|
||||
healthyTime := time.Time{}
|
||||
cancelHealthyTimer := func() {
|
||||
if !healthyTimer.Stop() {
|
||||
select {
|
||||
case <-healthyTimer.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
cancelHealthyTimer()
|
||||
|
||||
// Cleanup function
|
||||
defer func() {
|
||||
|
@ -166,6 +176,7 @@ OUTER:
|
|||
// If we should have checks and they aren't all healthy continue
|
||||
if len(checks) != desiredChecks {
|
||||
r.logger.Printf("[TRACE] client.alloc_watcher: continuing since all checks (want %d; got %d) haven't been registered for alloc %q", desiredChecks, len(checks), alloc.ID)
|
||||
cancelHealthyTimer()
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
|
@ -174,6 +185,7 @@ OUTER:
|
|||
if check.Status != api.HealthPassing {
|
||||
r.logger.Printf("[TRACE] client.alloc_watcher: continuing since check %q isn't passing for alloc %q", check.CheckID, alloc.ID)
|
||||
latestChecksHealthy = time.Time{}
|
||||
cancelHealthyTimer()
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
|
@ -193,26 +205,21 @@ OUTER:
|
|||
}
|
||||
}
|
||||
|
||||
// Don't need to set the timer if we are healthy and have marked
|
||||
// ourselves healthy.
|
||||
if alloc.DeploymentStatus != nil && alloc.DeploymentStatus.Healthy != nil && *alloc.DeploymentStatus.Healthy {
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
// Determine when we can mark ourselves as healthy.
|
||||
totalHealthy := latestTaskHealthy
|
||||
if totalHealthy.Before(latestChecksHealthy) {
|
||||
totalHealthy = latestChecksHealthy
|
||||
}
|
||||
d := time.Until(totalHealthy.Add(u.MinHealthyTime))
|
||||
|
||||
if !healthyTimer.Stop() {
|
||||
select {
|
||||
case <-healthyTimer.C:
|
||||
default:
|
||||
}
|
||||
// Nothing to do since we are already waiting for the healthy timer to
|
||||
// fire at the same time.
|
||||
if totalHealthy.Equal(healthyTime) {
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
healthyTime = totalHealthy
|
||||
cancelHealthyTimer()
|
||||
d := time.Until(totalHealthy.Add(u.MinHealthyTime))
|
||||
healthyTimer.Reset(d)
|
||||
r.logger.Printf("[TRACE] client.alloc_watcher: setting healthy timer to %v for alloc %q", d, alloc.ID)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
|
@ -16,20 +17,33 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/client/vaultclient"
|
||||
)
|
||||
|
||||
type MockAllocStateUpdater struct {
|
||||
Count int
|
||||
Allocs []*structs.Allocation
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Update fulfills the TaskStateUpdater interface
|
||||
func (m *MockAllocStateUpdater) Update(alloc *structs.Allocation) {
|
||||
m.Count += 1
|
||||
m.mu.Lock()
|
||||
m.Allocs = append(m.Allocs, alloc)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// Last returns the total number of updates and the last alloc (or nil)
|
||||
func (m *MockAllocStateUpdater) Last() (int, *structs.Allocation) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
n := len(m.Allocs)
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return n, m.Allocs[n-1].Copy()
|
||||
}
|
||||
|
||||
func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
|
||||
|
@ -51,20 +65,25 @@ func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAl
|
|||
}
|
||||
|
||||
func testAllocRunner(restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
|
||||
return testAllocRunnerFromAlloc(mock.Alloc(), restarts)
|
||||
// Use mock driver
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config["run_for"] = "500ms"
|
||||
return testAllocRunnerFromAlloc(alloc, restarts)
|
||||
}
|
||||
|
||||
func TestAllocRunner_SimpleRun(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -76,7 +95,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) {
|
|||
|
||||
// Test that the watcher will mark the allocation as unhealthy.
|
||||
func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
// Ensure the task fails and restarts
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
@ -96,10 +115,10 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if *last.DeploymentStatus.Healthy {
|
||||
|
@ -114,7 +133,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
|
|||
// Test that the watcher will mark the allocation as unhealthy if it hits its
|
||||
// deadline.
|
||||
func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
// Ensure the task fails and restarts
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
@ -136,10 +155,10 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if *last.DeploymentStatus.Healthy {
|
||||
|
@ -153,7 +172,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
|
|||
|
||||
// Test that the watcher will mark the allocation as healthy.
|
||||
func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
// Ensure the task fails and restarts
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
@ -181,10 +200,10 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
|
@ -201,7 +220,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
|||
|
||||
// Test that the watcher will mark the allocation as healthy with checks
|
||||
func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
// Ensure the task fails and restarts
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
@ -249,10 +268,10 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
|
@ -270,7 +289,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
|
|||
|
||||
// Test that the watcher will mark the allocation as healthy.
|
||||
func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
// Ensure the task fails and restarts
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
@ -291,10 +310,10 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
|
@ -306,17 +325,16 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
|||
})
|
||||
|
||||
// Mimick an update to a new deployment id
|
||||
oldCount := upd.Count
|
||||
last := upd.Allocs[oldCount-1].Copy()
|
||||
oldCount, last := upd.Last()
|
||||
last.DeploymentStatus = nil
|
||||
last.DeploymentID = structs.GenerateUUID()
|
||||
ar.Update(last)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count <= oldCount {
|
||||
newCount, last := upd.Last()
|
||||
if newCount <= oldCount {
|
||||
return false, fmt.Errorf("No new updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
|
@ -332,7 +350,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
|||
// retrying fetching an artifact, other tasks in the group should be able
|
||||
// to proceed.
|
||||
func TestAllocRunner_RetryArtifact(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Type = structs.JobTypeBatch
|
||||
|
@ -351,7 +369,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
|
|||
badtask := alloc.Job.TaskGroups[0].Tasks[0].Copy()
|
||||
badtask.Name = "bad"
|
||||
badtask.Artifacts = []*structs.TaskArtifact{
|
||||
{GetterSource: "http://127.1.1.111:12315/foo/bar/baz"},
|
||||
{GetterSource: "http://127.0.0.1:0/foo/bar/baz"},
|
||||
}
|
||||
|
||||
alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)
|
||||
|
@ -360,10 +378,10 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count < 6 {
|
||||
return false, fmt.Errorf("Not enough updates")
|
||||
count, last := upd.Last()
|
||||
if min := 6; count < min {
|
||||
return false, fmt.Errorf("Not enough updates (%d < %d)", count, min)
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
|
||||
// web task should have completed successfully while bad task
|
||||
// retries artififact fetching
|
||||
|
@ -390,6 +408,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
||||
// Ensure task takes some time
|
||||
|
@ -399,10 +418,10 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|||
go ar.Run()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
||||
}
|
||||
|
@ -418,12 +437,12 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|||
ar.Update(update)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
return false, nil
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
|
||||
// Check the status has changed.
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -453,12 +472,12 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|||
ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
return false, nil
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
|
||||
// Check the status has changed.
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -488,6 +507,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_Destroy(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
||||
// Ensure task takes some time
|
||||
|
@ -504,12 +524,12 @@ func TestAllocRunner_Destroy(t *testing.T) {
|
|||
}()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
return false, nil
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
|
||||
// Check the status has changed.
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -543,8 +563,12 @@ func TestAllocRunner_Destroy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_Update(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, ar := testAllocRunner(false)
|
||||
|
||||
// Deep copy the alloc to avoid races when updating
|
||||
newAlloc := ar.Alloc().Copy()
|
||||
|
||||
// Ensure task takes some time
|
||||
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -553,8 +577,6 @@ func TestAllocRunner_Update(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
// Update the alloc definition
|
||||
newAlloc := new(structs.Allocation)
|
||||
*newAlloc = *ar.alloc
|
||||
newAlloc.Name = "FOO"
|
||||
newAlloc.AllocModifyIndex++
|
||||
ar.Update(newAlloc)
|
||||
|
@ -568,6 +590,7 @@ func TestAllocRunner_Update(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -582,6 +605,8 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
|||
|
||||
// Snapshot state
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ar.taskLock.RLock()
|
||||
defer ar.taskLock.RUnlock()
|
||||
return len(ar.tasks) == 1, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("task never started: %v", err)
|
||||
|
@ -608,14 +633,15 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
|||
return false, fmt.Errorf("Incorrect number of tasks")
|
||||
}
|
||||
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
return last.ClientStatus == structs.AllocClientStatusRunning, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar2.alloc.TaskStates["web"])
|
||||
_, last := upd.Last()
|
||||
t.Fatalf("err: %v %#v %#v", err, last, last.TaskStates["web"])
|
||||
})
|
||||
|
||||
// Destroy and wait
|
||||
|
@ -629,7 +655,8 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
|
||||
_, last := upd.Last()
|
||||
t.Fatalf("err: %v %#v %#v", err, last, last.TaskStates)
|
||||
})
|
||||
|
||||
if time.Since(start) > time.Duration(testutil.TestMultiplier()*5)*time.Second {
|
||||
|
@ -638,6 +665,7 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
ar.logger = prefixedTestLogger("ar1: ")
|
||||
|
||||
|
@ -649,10 +677,11 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
|
||||
if last.ClientStatus != structs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
||||
}
|
||||
|
@ -668,7 +697,7 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|||
ar.Update(update)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
return ar.alloc.DesiredStatus == structs.AllocDesiredStatusStop, nil
|
||||
return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v", err)
|
||||
})
|
||||
|
@ -713,7 +742,8 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
|
||||
_, last := upd.Last()
|
||||
t.Fatalf("err: %v %#v %#v", err, last, last.TaskStates)
|
||||
})
|
||||
|
||||
// Send the destroy signal and ensure the AllocRunner cleans up.
|
||||
|
@ -721,12 +751,12 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|||
ar2.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
return false, nil
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
|
||||
// Check the status has changed.
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -758,6 +788,7 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|||
// TestAllocRunner_SaveRestoreState_Upgrade asserts that pre-0.6 exec tasks are
|
||||
// restarted on upgrade.
|
||||
func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -775,10 +806,11 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
|||
|
||||
// Snapshot state
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
|
||||
if last.ClientStatus != structs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
||||
}
|
||||
|
@ -803,22 +835,19 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
|||
defer ar2.Destroy() // Just-in-case of failure before Destroy below
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if len(ar2.tasks) != 1 {
|
||||
return false, fmt.Errorf("Incorrect number of tasks")
|
||||
count, last := upd.Last()
|
||||
if min := 3; count < min {
|
||||
return false, fmt.Errorf("expected at least %d updates but found %d", min, count)
|
||||
}
|
||||
|
||||
if upd.Count < 3 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, ev := range ar2.taskStates["web"].Events {
|
||||
for _, ev := range last.TaskStates["web"].Events {
|
||||
if strings.HasSuffix(ev.RestartReason, pre06ScriptCheckReason) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("no restart with proper reason found")
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v\nAllocs: %#v\nWeb State: %#v", err, upd.Allocs, ar2.taskStates["web"])
|
||||
count, last := upd.Last()
|
||||
t.Fatalf("err: %v\nAllocs: %d\nweb state: % #v", err, count, pretty.Formatter(last.TaskStates["web"]))
|
||||
})
|
||||
|
||||
// Destroy and wait
|
||||
|
@ -832,7 +861,8 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
|||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
|
||||
_, last := upd.Last()
|
||||
t.Fatalf("err: %v %#v %#v", err, last, last.TaskStates)
|
||||
})
|
||||
|
||||
if time.Since(start) > time.Duration(testutil.TestMultiplier()*5)*time.Second {
|
||||
|
@ -856,6 +886,7 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
|||
// "AllocID": "2a54fcff-fc44-8d4f-e025-53c48e9cbbbb"
|
||||
// }
|
||||
func TestAllocRunner_RestoreOldState(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -974,6 +1005,7 @@ func TestAllocRunner_RestoreOldState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
||||
// Create two tasks in the task group
|
||||
|
@ -996,10 +1028,10 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusFailed {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusFailed)
|
||||
}
|
||||
|
@ -1041,6 +1073,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
||||
// Create two tasks in the task group
|
||||
|
@ -1061,12 +1094,13 @@ func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
|
|||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -1113,6 +1147,7 @@ func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
|
|||
// TestAllocRunner_TaskLeader_StopTG asserts that when stopping a task group
|
||||
// with a leader the leader is stopped before other tasks.
|
||||
func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
upd, ar := testAllocRunner(false)
|
||||
|
||||
// Create 3 tasks in the task group
|
||||
|
@ -1142,33 +1177,62 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
|
|||
}
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2, task3)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
defer ar.Destroy()
|
||||
|
||||
// Destroy before running so it shuts down the alloc runner right after
|
||||
// starting all tasks
|
||||
ar.Destroy()
|
||||
go ar.Run()
|
||||
select {
|
||||
case <-ar.WaitCh():
|
||||
case <-time.After(8 * time.Second):
|
||||
t.Fatalf("timed out waiting for alloc runner to exit")
|
||||
}
|
||||
|
||||
if len(upd.Allocs) != 1 {
|
||||
t.Fatalf("expected 1 alloc update but found %d", len(upd.Allocs))
|
||||
}
|
||||
// Wait for tasks to start
|
||||
oldCount, last := upd.Last()
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
oldCount, last = upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if n := len(last.TaskStates); n != 3 {
|
||||
return false, fmt.Errorf("Not enough task states (want: 3; found %d)", n)
|
||||
}
|
||||
for name, state := range last.TaskStates {
|
||||
if state.State != structs.TaskStateRunning {
|
||||
return false, fmt.Errorf("Task %q is not running yet (it's %q)", name, state.State)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v", err)
|
||||
})
|
||||
|
||||
a := upd.Allocs[0]
|
||||
if a.TaskStates["leader"].FinishedAt.UnixNano() >= a.TaskStates["follower1"].FinishedAt.UnixNano() {
|
||||
t.Fatalf("expected leader to finish before follower1: %s >= %s",
|
||||
a.TaskStates["leader"].FinishedAt, a.TaskStates["follower1"].FinishedAt)
|
||||
}
|
||||
if a.TaskStates["leader"].FinishedAt.UnixNano() >= a.TaskStates["follower2"].FinishedAt.UnixNano() {
|
||||
t.Fatalf("expected leader to finish before follower2: %s >= %s",
|
||||
a.TaskStates["leader"].FinishedAt, a.TaskStates["follower2"].FinishedAt)
|
||||
}
|
||||
// Stop alloc
|
||||
update := ar.Alloc()
|
||||
update.DesiredStatus = structs.AllocDesiredStatusStop
|
||||
ar.Update(update)
|
||||
|
||||
// Wait for tasks to stop
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
newCount, last := upd.Last()
|
||||
if newCount == oldCount {
|
||||
return false, fmt.Errorf("no new updates (count: %d)", newCount)
|
||||
}
|
||||
if last.TaskStates["leader"].FinishedAt.UnixNano() >= last.TaskStates["follower1"].FinishedAt.UnixNano() {
|
||||
return false, fmt.Errorf("expected leader to finish before follower1: %s >= %s",
|
||||
last.TaskStates["leader"].FinishedAt, last.TaskStates["follower1"].FinishedAt)
|
||||
}
|
||||
if last.TaskStates["leader"].FinishedAt.UnixNano() >= last.TaskStates["follower2"].FinishedAt.UnixNano() {
|
||||
return false, fmt.Errorf("expected leader to finish before follower2: %s >= %s",
|
||||
last.TaskStates["leader"].FinishedAt, last.TaskStates["follower2"].FinishedAt)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
count, last := upd.Last()
|
||||
t.Logf("Updates: %d", count)
|
||||
for name, state := range last.TaskStates {
|
||||
t.Logf("%s: %s", name, state.State)
|
||||
}
|
||||
t.Fatalf("err: %v", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create an alloc runner
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1181,10 +1245,10 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
|||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
_, last := upd.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd.Allocs[upd.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
@ -1213,10 +1277,10 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
|||
defer ar1.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd1.Count == 0 {
|
||||
_, last := upd1.Last()
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
last := upd1.Allocs[upd1.Count-1]
|
||||
if last.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
||||
}
|
||||
|
|
|
@ -98,6 +98,24 @@ func NewAllocDir(logger *log.Logger, allocDir string) *AllocDir {
|
|||
}
|
||||
}
|
||||
|
||||
// Copy an AllocDir and all of its TaskDirs. Returns nil if AllocDir is
|
||||
// nil.
|
||||
func (d *AllocDir) Copy() *AllocDir {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
dcopy := &AllocDir{
|
||||
AllocDir: d.AllocDir,
|
||||
SharedDir: d.SharedDir,
|
||||
TaskDirs: make(map[string]*TaskDir, len(d.TaskDirs)),
|
||||
logger: d.logger,
|
||||
}
|
||||
for k, v := range d.TaskDirs {
|
||||
dcopy.TaskDirs[k] = v.Copy()
|
||||
}
|
||||
return dcopy
|
||||
}
|
||||
|
||||
// NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map.
|
||||
func (d *AllocDir) NewTaskDir(name string) *TaskDir {
|
||||
td := newTaskDir(d.logger, d.AllocDir, name)
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/kr/pretty"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -50,7 +51,10 @@ var (
|
|||
)
|
||||
|
||||
func testLogger() *log.Logger {
|
||||
return log.New(os.Stderr, "", log.LstdFlags)
|
||||
if testing.Verbose() {
|
||||
return log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
return log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
// Test that AllocDir.Build builds just the alloc directory.
|
||||
|
@ -367,8 +371,10 @@ func TestAllocDir_SplitPath(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(info) != 6 {
|
||||
t.Fatalf("expected: %v, actual: %v", 6, len(info))
|
||||
// Testing that is 6 or more rather than 6 because on osx, the temp dir is
|
||||
// randomized.
|
||||
if len(info) < 6 {
|
||||
t.Fatalf("expected more than: %v, actual: %v", 6, len(info))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,6 +415,25 @@ func TestAllocDir_CreateDir(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestAllocDir_Copy asserts that AllocDir.Copy does a deep copy of itself and
|
||||
// all TaskDirs.
|
||||
func TestAllocDir_Copy(t *testing.T) {
|
||||
a := NewAllocDir(testLogger(), "foo")
|
||||
a.NewTaskDir("bar")
|
||||
a.NewTaskDir("baz")
|
||||
|
||||
b := a.Copy()
|
||||
if diff := pretty.Diff(a, b); len(diff) > 0 {
|
||||
t.Errorf("differences between copies: %# v", pretty.Formatter(diff))
|
||||
}
|
||||
|
||||
// Make sure TaskDirs map is copied
|
||||
a.NewTaskDir("new")
|
||||
if b.TaskDirs["new"] != nil {
|
||||
t.Errorf("TaskDirs map shared between copied")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathFuncs(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "nomadtest-pathfuncs")
|
||||
if err != nil {
|
||||
|
|
|
@ -57,6 +57,14 @@ func newTaskDir(logger *log.Logger, allocDir, taskName string) *TaskDir {
|
|||
}
|
||||
}
|
||||
|
||||
// Copy a TaskDir. Panics if TaskDir is nil as TaskDirs should never be nil.
|
||||
func (t *TaskDir) Copy() *TaskDir {
|
||||
// No nested structures other than the logger which is safe to share,
|
||||
// so just copy the struct
|
||||
tcopy := *t
|
||||
return &tcopy
|
||||
}
|
||||
|
||||
// Build default directories and permissions in a task directory. chrootCreated
|
||||
// allows skipping chroot creation if the caller knows it has already been
|
||||
// done.
|
||||
|
|
|
@ -7,17 +7,18 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/fingerprint"
|
||||
"github.com/hashicorp/nomad/command/agent/consul"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -28,35 +29,19 @@ import (
|
|||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
nextPort uint32 = 16000
|
||||
|
||||
osExecDriverSupport = map[string]bool{
|
||||
"linux": true,
|
||||
}
|
||||
)
|
||||
|
||||
func getPort() int {
|
||||
return int(atomic.AddUint32(&nextPort, 1))
|
||||
return 1030 + int(rand.Int31n(6440))
|
||||
}
|
||||
|
||||
func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
|
||||
f := false
|
||||
|
||||
// Setup the default settings
|
||||
config := nomad.DefaultConfig()
|
||||
config.VaultConfig.Enabled = &f
|
||||
config.VaultConfig.Enabled = helper.BoolToPtr(false)
|
||||
config.Build = "unittest"
|
||||
config.DevMode = true
|
||||
config.RPCAddr = &net.TCPAddr{
|
||||
IP: []byte{127, 0, 0, 1},
|
||||
Port: getPort(),
|
||||
}
|
||||
config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
|
||||
|
||||
// Tighten the Serf timing
|
||||
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
||||
config.SerfConfig.MemberlistConfig.BindPort = getPort()
|
||||
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
|
||||
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
|
||||
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
||||
|
@ -70,33 +55,52 @@ func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
|
|||
config.RaftConfig.StartAsLeader = true
|
||||
config.RaftTimeout = 500 * time.Millisecond
|
||||
|
||||
logger := log.New(config.LogOutput, "", log.LstdFlags)
|
||||
catalog := consul.NewMockCatalog(logger)
|
||||
|
||||
// Invoke the callback if any
|
||||
if cb != nil {
|
||||
cb(config)
|
||||
}
|
||||
|
||||
logger := log.New(config.LogOutput, "", log.LstdFlags)
|
||||
catalog := consul.NewMockCatalog(logger)
|
||||
for i := 10; i >= 0; i-- {
|
||||
config.RPCAddr = &net.TCPAddr{
|
||||
IP: []byte{127, 0, 0, 1},
|
||||
Port: getPort(),
|
||||
}
|
||||
config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
|
||||
config.SerfConfig.MemberlistConfig.BindPort = getPort()
|
||||
|
||||
// Create server
|
||||
server, err := nomad.NewServer(config, catalog, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
// Create server
|
||||
server, err := nomad.NewServer(config, catalog, logger)
|
||||
if err == nil {
|
||||
return server, config.RPCAddr.String()
|
||||
} else if i == 0 {
|
||||
t.Fatalf("err: %v", err)
|
||||
} else {
|
||||
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}
|
||||
return server, config.RPCAddr.String()
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, cb func(c *config.Config)) *Client {
|
||||
f := false
|
||||
|
||||
conf := config.DefaultConfig()
|
||||
conf.VaultConfig.Enabled = &f
|
||||
conf.VaultConfig.Enabled = helper.BoolToPtr(false)
|
||||
conf.DevMode = true
|
||||
conf.Node = &structs.Node{
|
||||
Reserved: &structs.Resources{
|
||||
DiskMB: 0,
|
||||
},
|
||||
}
|
||||
|
||||
// Tighten the fingerprinter timeouts
|
||||
if conf.Options == nil {
|
||||
conf.Options = make(map[string]string)
|
||||
}
|
||||
conf.Options[fingerprint.TightenNetworkTimeoutsConfig] = "true"
|
||||
|
||||
if cb != nil {
|
||||
cb(conf)
|
||||
}
|
||||
|
@ -113,6 +117,7 @@ func testClient(t *testing.T, cb func(c *config.Config)) *Client {
|
|||
}
|
||||
|
||||
func TestClient_StartStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := testClient(t, nil)
|
||||
if err := client.Shutdown(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -120,6 +125,7 @@ func TestClient_StartStop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RPC(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, addr := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
|
||||
|
@ -139,6 +145,7 @@ func TestClient_RPC(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RPC_Passthrough(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
|
||||
|
@ -158,6 +165,7 @@ func TestClient_RPC_Passthrough(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, nil)
|
||||
defer c.Shutdown()
|
||||
|
||||
|
@ -172,6 +180,7 @@ func TestClient_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_HasNodeChanged(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, nil)
|
||||
defer c.Shutdown()
|
||||
|
||||
|
@ -203,6 +212,7 @@ func TestClient_HasNodeChanged(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint_InWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -220,6 +230,7 @@ func TestClient_Fingerprint_InWhitelist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint_InBlacklist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -237,6 +248,7 @@ func TestClient_Fingerprint_InBlacklist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -253,6 +265,7 @@ func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -281,63 +294,46 @@ func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClient_Drivers(t *testing.T) {
|
||||
c := testClient(t, nil)
|
||||
defer c.Shutdown()
|
||||
|
||||
node := c.Node()
|
||||
if node.Attributes["driver.exec"] == "" {
|
||||
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
|
||||
t.Fatalf("missing exec driver")
|
||||
} else {
|
||||
t.Skipf("missing exec driver, no OS support")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_Drivers_InWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
// Weird spacing to test trimming
|
||||
c.Options["driver.whitelist"] = " exec , foo "
|
||||
c.Options["driver.raw_exec.enable"] = "1"
|
||||
c.Options["driver.whitelist"] = " raw_exec , foo "
|
||||
})
|
||||
defer c.Shutdown()
|
||||
|
||||
node := c.Node()
|
||||
if node.Attributes["driver.exec"] == "" {
|
||||
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
|
||||
t.Fatalf("missing exec driver")
|
||||
} else {
|
||||
t.Skipf("missing exec driver, no OS support")
|
||||
}
|
||||
if node.Attributes["driver.raw_exec"] == "" {
|
||||
t.Fatalf("missing raw_exec driver")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_Drivers_InBlacklist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
// Weird spacing to test trimming
|
||||
c.Options["driver.blacklist"] = " exec , foo "
|
||||
c.Options["driver.raw_exec.enable"] = "1"
|
||||
c.Options["driver.blacklist"] = " raw_exec , foo "
|
||||
})
|
||||
defer c.Shutdown()
|
||||
|
||||
node := c.Node()
|
||||
if node.Attributes["driver.exec"] != "" {
|
||||
if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
|
||||
t.Fatalf("exec driver loaded despite blacklist")
|
||||
} else {
|
||||
t.Skipf("missing exec driver, no OS support")
|
||||
}
|
||||
if node.Attributes["driver.raw_exec"] != "" {
|
||||
t.Fatalf("raw_exec driver loaded despite blacklist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -354,6 +350,7 @@ func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := testClient(t, func(c *config.Config) {
|
||||
if c.Options == nil {
|
||||
c.Options = make(map[string]string)
|
||||
|
@ -379,6 +376,7 @@ func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
|
|||
// TestClient_MixedTLS asserts that when a server is running with TLS enabled
|
||||
// it will reject any RPC connections from clients that lack TLS. See #2525
|
||||
func TestClient_MixedTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
cafile = "../helper/tlsutil/testdata/ca.pem"
|
||||
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
||||
|
@ -425,6 +423,7 @@ func TestClient_MixedTLS(t *testing.T) {
|
|||
// enabled -- but their certificates are signed by different CAs -- they're
|
||||
// unable to communicate.
|
||||
func TestClient_BadTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
cafile = "../helper/tlsutil/testdata/ca.pem"
|
||||
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
||||
|
@ -479,6 +478,7 @@ func TestClient_BadTLS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Register(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -510,6 +510,7 @@ func TestClient_Register(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Heartbeat(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, _ := testServer(t, func(c *nomad.Config) {
|
||||
c.MinHeartbeatTTL = 50 * time.Millisecond
|
||||
})
|
||||
|
@ -543,6 +544,7 @@ func TestClient_Heartbeat(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_UpdateAllocStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -592,6 +594,7 @@ func TestClient_UpdateAllocStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_WatchAllocs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctestutil.ExecCompatible(t)
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
|
@ -690,6 +693,7 @@ func waitTilNodeReady(client *Client, t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_SaveRestoreState(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctestutil.ExecCompatible(t)
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
|
@ -783,6 +787,7 @@ func TestClient_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Init(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "nomad")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -806,6 +811,7 @@ func TestClient_Init(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_BlockedAllocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1, _ := testServer(t, nil)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -911,6 +917,7 @@ func TestClient_BlockedAllocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_UnarchiveAllocDir(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -136,7 +136,7 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
|
|||
}
|
||||
|
||||
if vault {
|
||||
harness.vault = testutil.NewTestVault(t).Start()
|
||||
harness.vault = testutil.NewTestVault(t)
|
||||
harness.config.VaultConfig = harness.vault.Config
|
||||
harness.vaultToken = harness.vault.RootToken
|
||||
}
|
||||
|
@ -178,6 +178,7 @@ func (h *testHarness) stop() {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Invalid(t *testing.T) {
|
||||
t.Parallel()
|
||||
hooks := NewMockTaskHooks()
|
||||
var tmpls []*structs.Template
|
||||
region := "global"
|
||||
|
@ -235,6 +236,7 @@ func TestTaskTemplateManager_Invalid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_HostPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately and write it to a tmp file
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
|
@ -288,6 +290,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -321,6 +324,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Permissions(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -355,6 +359,7 @@ func TestTaskTemplateManager_Permissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately
|
||||
content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}`
|
||||
expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName)
|
||||
|
@ -389,6 +394,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -429,6 +435,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render based on a key in Consul
|
||||
key := "foo"
|
||||
content := "barbaz"
|
||||
|
@ -477,6 +484,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render based on a key in Vault
|
||||
vaultPath := "secret/password"
|
||||
key := "password"
|
||||
|
@ -527,6 +535,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render immediately
|
||||
staticContent := "hello, world!"
|
||||
staticFile := "my.tmpl"
|
||||
|
@ -595,6 +604,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will render based on a key in Consul
|
||||
key := "foo"
|
||||
content1 := "bar"
|
||||
|
@ -666,6 +676,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that renders based on a key in Consul and sends SIGALRM
|
||||
key1 := "foo"
|
||||
content1_1 := "bar"
|
||||
|
@ -765,6 +776,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that renders based on a key in Consul and sends restart
|
||||
key1 := "bam"
|
||||
content1_1 := "cat"
|
||||
|
@ -831,6 +843,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that will have its destination interpolated
|
||||
content := "hello, world!"
|
||||
file := "${node.unique.id}.tmpl"
|
||||
|
@ -865,6 +878,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Signal_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Make a template that renders based on a key in Consul and sends SIGALRM
|
||||
key1 := "foo"
|
||||
content1 := "bar"
|
||||
|
@ -916,6 +930,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
|
|||
// TestTaskTemplateManager_Env asserts templates with the env flag set are read
|
||||
// into the task's environment.
|
||||
func TestTaskTemplateManager_Env(t *testing.T) {
|
||||
t.Parallel()
|
||||
template := &structs.Template{
|
||||
EmbeddedTmpl: `
|
||||
# Comment lines are ok
|
||||
|
@ -958,6 +973,7 @@ ANYTHING_goes=Spaces are=ok!
|
|||
// TestTaskTemplateManager_Env_Missing asserts the core env
|
||||
// template processing function returns errors when files don't exist
|
||||
func TestTaskTemplateManager_Env_Missing(t *testing.T) {
|
||||
t.Parallel()
|
||||
d, err := ioutil.TempDir("", "ct_env_missing")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -992,6 +1008,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) {
|
|||
// template processing function returns combined env vars from multiple
|
||||
// templates correctly.
|
||||
func TestTaskTemplateManager_Env_Multi(t *testing.T) {
|
||||
t.Parallel()
|
||||
d, err := ioutil.TempDir("", "ct_env_missing")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1038,6 +1055,7 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) {
|
|||
// TestTaskTemplateManager_Config_ServerName asserts the tls_server_name
|
||||
// setting is propogated to consul-template's configuration. See #2776
|
||||
func TestTaskTemplateManager_Config_ServerName(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := config.DefaultConfig()
|
||||
c.VaultConfig = &sconfig.VaultConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
|
|
|
@ -593,15 +593,18 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespon
|
|||
pluginClient.Kill()
|
||||
return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err)
|
||||
}
|
||||
|
||||
// InspectContainer to get all of the container metadata as
|
||||
// much of the metadata (eg networking) isn't populated until
|
||||
// the container is started
|
||||
if container, err = client.InspectContainer(container.ID); err != nil {
|
||||
runningContainer, err := client.InspectContainer(container.ID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to inspect started container %s: %s", container.ID, err)
|
||||
d.logger.Printf("[ERR] driver.docker: %v", err)
|
||||
pluginClient.Kill()
|
||||
return nil, structs.NewRecoverableError(err, true)
|
||||
}
|
||||
container = runningContainer
|
||||
d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
|
||||
} else {
|
||||
d.logger.Printf("[DEBUG] driver.docker: re-attaching to container %s with status %q",
|
||||
|
|
|
@ -44,6 +44,7 @@ func (m *mockImageClient) RemoveImage(id string) error {
|
|||
}
|
||||
|
||||
func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
|
||||
t.Parallel()
|
||||
image := "foo"
|
||||
imageID := structs.GenerateUUID()
|
||||
mapping := map[string]string{imageID: image}
|
||||
|
@ -69,7 +70,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
|
|||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
p := mock.pulled[image]
|
||||
if p != 1 {
|
||||
if p >= 10 {
|
||||
return false, fmt.Errorf("Wrong number of pulls: %d", p)
|
||||
}
|
||||
|
||||
|
@ -90,6 +91,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerCoordinator_Pull_Remove(t *testing.T) {
|
||||
t.Parallel()
|
||||
image := "foo"
|
||||
imageID := structs.GenerateUUID()
|
||||
mapping := map[string]string{imageID: image}
|
||||
|
@ -153,6 +155,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
|
||||
t.Parallel()
|
||||
image := "foo"
|
||||
imageID := structs.GenerateUUID()
|
||||
mapping := map[string]string{imageID: image}
|
||||
|
@ -200,6 +203,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerCoordinator_No_Cleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
image := "foo"
|
||||
imageID := structs.GenerateUUID()
|
||||
mapping := map[string]string{imageID: image}
|
||||
|
|
|
@ -42,8 +42,8 @@ func dockerIsRemote(t *testing.T) bool {
|
|||
|
||||
// Ports used by tests
|
||||
var (
|
||||
docker_reserved = 32768 + int(rand.Int31n(25000))
|
||||
docker_dynamic = 32768 + int(rand.Int31n(25000))
|
||||
docker_reserved = 2000 + int(rand.Int31n(10000))
|
||||
docker_dynamic = 2000 + int(rand.Int31n(10000))
|
||||
)
|
||||
|
||||
// Returns a task with a reserved and dynamic port. The ports are returned
|
||||
|
@ -158,6 +158,9 @@ func newTestDockerClient(t *testing.T) *docker.Client {
|
|||
|
||||
// This test should always pass, even if docker daemon is not available
|
||||
func TestDockerDriver_Fingerprint(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
|
||||
//ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
|
||||
defer ctx.AllocDir.Destroy()
|
||||
|
@ -181,6 +184,9 @@ func TestDockerDriver_Fingerprint(t *testing.T) {
|
|||
// TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
|
||||
// the bridge network's IP as a node attribute. See #2785
|
||||
func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.Skip("requires Docker")
|
||||
}
|
||||
|
@ -213,6 +219,9 @@ func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_StartOpen_Wait(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -264,6 +273,9 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Start_Wait(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "nc-demo",
|
||||
Driver: "docker",
|
||||
|
@ -303,6 +315,9 @@ func TestDockerDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Start_LoadImage(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -369,6 +384,9 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -410,6 +428,9 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
// This test requires that the alloc dir be mounted into docker as a volume.
|
||||
// Because this cannot happen when docker is run remotely, e.g. when running
|
||||
// docker in a VM, we skip this when we detect Docker is being run remotely.
|
||||
|
@ -480,6 +501,9 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "nc-demo",
|
||||
Driver: "docker",
|
||||
|
@ -518,6 +542,9 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_StartN(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -569,6 +596,9 @@ func TestDockerDriver_StartN(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_StartNVersions(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -646,6 +676,9 @@ func waitForExist(t *testing.T, client *docker.Client, handle *DockerHandle) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_NetworkMode_Host(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
expected := "host"
|
||||
|
||||
task := &structs.Task{
|
||||
|
@ -685,6 +718,9 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
// Because go-dockerclient doesn't provide api for query network aliases, just check that
|
||||
// a container can be created with a 'network_aliases' property
|
||||
|
||||
|
@ -731,6 +767,9 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Labels(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["labels"] = []map[string]string{
|
||||
map[string]string{
|
||||
|
@ -759,6 +798,9 @@ func TestDockerDriver_Labels(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_ForcePull_IsInvalidConfig(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["force_pull"] = "nothing"
|
||||
|
||||
|
@ -773,6 +815,9 @@ func TestDockerDriver_ForcePull_IsInvalidConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_ForcePull(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["force_pull"] = "true"
|
||||
|
||||
|
@ -788,6 +833,9 @@ func TestDockerDriver_ForcePull(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_SecurityOpt(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["security_opt"] = []string{"seccomp=unconfined"}
|
||||
|
||||
|
@ -807,6 +855,9 @@ func TestDockerDriver_SecurityOpt(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_DNS(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"}
|
||||
task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"}
|
||||
|
@ -831,6 +882,9 @@ func TestDockerDriver_DNS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_MACAddress(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["mac_address"] = "00:16:3e:00:00:00"
|
||||
|
||||
|
@ -850,6 +904,9 @@ func TestDockerDriver_MACAddress(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerWorkDir(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, _, _ := dockerTask()
|
||||
task.Config["work_dir"] = "/some/path"
|
||||
|
||||
|
@ -876,6 +933,9 @@ func inSlice(needle string, haystack []string) bool {
|
|||
}
|
||||
|
||||
func TestDockerDriver_PortsNoMap(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, res, dyn := dockerTask()
|
||||
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
|
@ -926,6 +986,9 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_PortsMapping(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task, res, dyn := dockerTask()
|
||||
task.Config["port_map"] = []map[string]string{
|
||||
map[string]string{
|
||||
|
@ -971,7 +1034,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
|
|||
expectedEnvironment := map[string]string{
|
||||
"NOMAD_PORT_main": "8080",
|
||||
"NOMAD_PORT_REDIS": "6379",
|
||||
"NOMAD_HOST_PORT_main": strconv.Itoa(docker_reserved),
|
||||
"NOMAD_HOST_PORT_main": strconv.Itoa(res),
|
||||
}
|
||||
|
||||
sort.Strings(container.Config.Env)
|
||||
|
@ -984,6 +1047,9 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_User(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "redis-demo",
|
||||
User: "alice",
|
||||
|
@ -1033,6 +1099,9 @@ func TestDockerDriver_User(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_CleanupContainer(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Driver: "docker",
|
||||
|
@ -1081,6 +1150,9 @@ func TestDockerDriver_CleanupContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_Stats(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "docker",
|
||||
|
@ -1189,6 +1261,9 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
|
|||
}
|
||||
|
||||
func TestDockerDriver_VolumesDisabled(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
cfg := testConfig()
|
||||
cfg.Options = map[string]string{
|
||||
dockerVolumesConfigOption: "false",
|
||||
|
@ -1259,6 +1334,9 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_VolumesEnabled(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
cfg := testConfig()
|
||||
|
||||
tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
|
||||
|
@ -1295,6 +1373,9 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) {
|
|||
|
||||
// TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images.
|
||||
func TestDockerDriver_Cleanup(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -1356,6 +1437,9 @@ func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
|||
}
|
||||
|
||||
func TestDockerDriver_AuthConfiguration(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
path := "./test-resources/docker/auth.json"
|
||||
cases := []struct {
|
||||
Repo string
|
||||
|
|
|
@ -16,6 +16,9 @@ import (
|
|||
)
|
||||
|
||||
func TestDockerDriver_Signal(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
|
|
@ -209,6 +209,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
|
|||
"NOMAD_ALLOC_INDEX": "0",
|
||||
"NOMAD_ALLOC_NAME": alloc.Name,
|
||||
"NOMAD_TASK_NAME": task.Name,
|
||||
"NOMAD_GROUP_NAME": alloc.TaskGroup,
|
||||
"NOMAD_JOB_NAME": alloc.Job.Name,
|
||||
"NOMAD_DC": "dc1",
|
||||
"NOMAD_REGION": "global",
|
||||
|
@ -219,6 +220,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
|
|||
}
|
||||
|
||||
func TestDriver_GetTaskEnv_None(t *testing.T) {
|
||||
t.Parallel()
|
||||
taskDir, exp, act := setupTaskEnv(t, "raw_exec")
|
||||
|
||||
// raw_exec should use host alloc dir path
|
||||
|
@ -247,6 +249,7 @@ func TestDriver_GetTaskEnv_None(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDriver_GetTaskEnv_Chroot(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, exp, act := setupTaskEnv(t, "exec")
|
||||
|
||||
exp[env.AllocDir] = allocdir.SharedAllocContainerPath
|
||||
|
@ -276,6 +279,7 @@ func TestDriver_GetTaskEnv_Chroot(t *testing.T) {
|
|||
// TestDriver_TaskEnv_Image ensures host environment variables are not set
|
||||
// for image based drivers. See #2211
|
||||
func TestDriver_TaskEnv_Image(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, exp, act := setupTaskEnv(t, "docker")
|
||||
|
||||
exp[env.AllocDir] = allocdir.SharedAllocContainerPath
|
||||
|
@ -301,6 +305,7 @@ func TestDriver_TaskEnv_Image(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMapMergeStrInt(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := map[string]int{
|
||||
"cakes": 5,
|
||||
"cookies": 3,
|
||||
|
@ -325,6 +330,7 @@ func TestMapMergeStrInt(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMapMergeStrStr(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := map[string]string{
|
||||
"cake": "chocolate",
|
||||
"cookie": "caramel",
|
||||
|
@ -349,6 +355,7 @@ func TestMapMergeStrStr(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreatedResources_AddMerge(t *testing.T) {
|
||||
t.Parallel()
|
||||
res1 := NewCreatedResources()
|
||||
res1.Add("k1", "v1")
|
||||
res1.Add("k1", "v2")
|
||||
|
@ -388,6 +395,7 @@ func TestCreatedResources_AddMerge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreatedResources_CopyRemove(t *testing.T) {
|
||||
t.Parallel()
|
||||
res1 := NewCreatedResources()
|
||||
res1.Add("k1", "v1")
|
||||
res1.Add("k1", "v2")
|
||||
|
|
8
client/driver/env/env.go
vendored
8
client/driver/env/env.go
vendored
|
@ -44,6 +44,9 @@ const (
|
|||
// TaskName is the environment variable for passing the task name.
|
||||
TaskName = "NOMAD_TASK_NAME"
|
||||
|
||||
// GroupName is the environment variable for passing the task group name.
|
||||
GroupName = "NOMAD_GROUP_NAME"
|
||||
|
||||
// JobName is the environment variable for passing the job name.
|
||||
JobName = "NOMAD_JOB_NAME"
|
||||
|
||||
|
@ -208,6 +211,7 @@ type Builder struct {
|
|||
region string
|
||||
allocId string
|
||||
allocName string
|
||||
groupName string
|
||||
vaultToken string
|
||||
injectVaultToken bool
|
||||
jobName string
|
||||
|
@ -277,6 +281,9 @@ func (b *Builder) Build() *TaskEnv {
|
|||
if b.allocName != "" {
|
||||
envMap[AllocName] = b.allocName
|
||||
}
|
||||
if b.groupName != "" {
|
||||
envMap[GroupName] = b.groupName
|
||||
}
|
||||
if b.allocIndex != -1 {
|
||||
envMap[AllocIndex] = strconv.Itoa(b.allocIndex)
|
||||
}
|
||||
|
@ -380,6 +387,7 @@ func (b *Builder) setTask(task *structs.Task) *Builder {
|
|||
func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
|
||||
b.allocId = alloc.ID
|
||||
b.allocName = alloc.Name
|
||||
b.groupName = alloc.TaskGroup
|
||||
b.allocIndex = int(alloc.Index())
|
||||
b.jobName = alloc.Job.Name
|
||||
|
||||
|
|
1
client/driver/env/env_test.go
vendored
1
client/driver/env/env_test.go
vendored
|
@ -181,6 +181,7 @@ func TestEnvironment_AsList(t *testing.T) {
|
|||
"NOMAD_HOST_PORT_http=80",
|
||||
"NOMAD_HOST_PORT_https=8080",
|
||||
"NOMAD_TASK_NAME=web",
|
||||
"NOMAD_GROUP_NAME=web",
|
||||
"NOMAD_ADDR_ssh_other=192.168.0.100:1234",
|
||||
"NOMAD_ADDR_ssh_ssh=192.168.0.100:22",
|
||||
"NOMAD_IP_ssh_other=192.168.0.100",
|
||||
|
|
|
@ -20,6 +20,9 @@ import (
|
|||
)
|
||||
|
||||
func TestExecDriver_Fingerprint(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
|
@ -47,6 +50,9 @@ func TestExecDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriver_StartOpen_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
@ -88,6 +94,9 @@ func TestExecDriver_StartOpen_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriver_Start_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
@ -133,6 +142,9 @@ func TestExecDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
|
||||
exp := []byte{'w', 'i', 'n'}
|
||||
|
@ -189,6 +201,9 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriver_Start_Kill_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
@ -237,6 +252,9 @@ func TestExecDriver_Start_Kill_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriverUser(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
@ -275,6 +293,9 @@ func TestExecDriverUser(t *testing.T) {
|
|||
// TestExecDriver_HandlerExec ensures the exec driver's handle properly
|
||||
// executes commands inside the container.
|
||||
func TestExecDriver_HandlerExec(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
|
|
@ -19,6 +19,9 @@ import (
|
|||
)
|
||||
|
||||
func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
|
@ -88,6 +91,9 @@ func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecDriver_Signal(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.ExecCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "signal",
|
||||
|
|
|
@ -60,6 +60,7 @@ func testExecutorContextWithChroot(t *testing.T) (*ExecutorContext, *allocdir.Al
|
|||
}
|
||||
|
||||
func TestExecutor_IsolationAndConstraints(t *testing.T) {
|
||||
t.Parallel()
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
execCmd := ExecCommand{Cmd: "/bin/ls", Args: []string{"-F", "/", "/etc/"}}
|
||||
|
@ -139,6 +140,7 @@ ld.so.conf.d/`
|
|||
}
|
||||
|
||||
func TestExecutor_ClientCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
ctx, allocDir := testExecutorContextWithChroot(t)
|
||||
|
|
|
@ -63,6 +63,7 @@ func testExecutorContext(t *testing.T) (*ExecutorContext, *allocdir.AllocDir) {
|
|||
}
|
||||
|
||||
func TestExecutor_Start_Invalid(t *testing.T) {
|
||||
t.Parallel()
|
||||
invalid := "/bin/foobar"
|
||||
execCmd := ExecCommand{Cmd: invalid, Args: []string{"1"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
|
@ -79,7 +80,8 @@ func TestExecutor_Start_Invalid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecutor_Start_Wait_Failure_Code(t *testing.T) {
|
||||
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"fail"}}
|
||||
t.Parallel()
|
||||
execCmd := ExecCommand{Cmd: "/bin/date", Args: []string{"fail"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
defer allocDir.Destroy()
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
@ -106,6 +108,7 @@ func TestExecutor_Start_Wait_Failure_Code(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecutor_Start_Wait(t *testing.T) {
|
||||
t.Parallel()
|
||||
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
defer allocDir.Destroy()
|
||||
|
@ -144,6 +147,7 @@ func TestExecutor_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecutor_WaitExitSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10000"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
defer allocDir.Destroy()
|
||||
|
@ -159,13 +163,13 @@ func TestExecutor_WaitExitSignal(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(2 * time.Second)
|
||||
ru, err := executor.Stats()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(ru.Pids) != 2 {
|
||||
t.Fatalf("expected number of pids: 2, actual: %v", len(ru.Pids))
|
||||
if len(ru.Pids) == 0 {
|
||||
t.Fatalf("expected pids")
|
||||
}
|
||||
proc, err := os.FindProcess(ps.Pid)
|
||||
if err != nil {
|
||||
|
@ -186,6 +190,7 @@ func TestExecutor_WaitExitSignal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecutor_Start_Kill(t *testing.T) {
|
||||
t.Parallel()
|
||||
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
defer allocDir.Destroy()
|
||||
|
@ -226,6 +231,7 @@ func TestExecutor_Start_Kill(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestExecutor_MakeExecutable(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create a temp file
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
|
@ -259,6 +265,7 @@ func TestExecutor_MakeExecutable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScanPids(t *testing.T) {
|
||||
t.Parallel()
|
||||
p1 := NewFakeProcess(2, 5)
|
||||
p2 := NewFakeProcess(10, 2)
|
||||
p3 := NewFakeProcess(15, 6)
|
||||
|
|
|
@ -31,6 +31,9 @@ func javaLocated() bool {
|
|||
|
||||
// The fingerprinter test should always pass, even if Java is not installed.
|
||||
func TestJavaDriver_Fingerprint(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.JavaCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
|
@ -67,6 +70,9 @@ func TestJavaDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJavaDriver_StartOpen_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
|
@ -120,6 +126,9 @@ func TestJavaDriver_StartOpen_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJavaDriver_Start_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
|
@ -130,6 +139,7 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
|
|||
Driver: "java",
|
||||
Config: map[string]interface{}{
|
||||
"jar_path": "demoapp.jar",
|
||||
"args": []string{"1"},
|
||||
},
|
||||
LogConfig: &structs.LogConfig{
|
||||
MaxFiles: 10,
|
||||
|
@ -154,15 +164,14 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Task should terminate quickly
|
||||
// Task should terminate after 1 seconds
|
||||
select {
|
||||
case res := <-resp.Handle.WaitCh():
|
||||
if !res.Successful() {
|
||||
t.Fatalf("err: %v", res)
|
||||
t.Fatalf("err: %v", res.String())
|
||||
}
|
||||
case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
|
||||
// expect the timeout b/c it's a long lived process
|
||||
break
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
|
||||
// Get the stdout of the process and assrt that it's not empty
|
||||
|
@ -183,6 +192,9 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
|
@ -242,6 +254,9 @@ func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJavaDriver_Signal(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
|
@ -300,10 +315,16 @@ func TestJavaDriver_Signal(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJavaDriverUser(t *testing.T) {
|
||||
func TestJavaDriver_User(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("Linux only test")
|
||||
}
|
||||
|
||||
ctestutils.JavaCompatible(t)
|
||||
task := &structs.Task{
|
||||
|
@ -339,6 +360,9 @@ func TestJavaDriverUser(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJavaDriver_Start_Wait_Class(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !javaLocated() {
|
||||
t.Skip("Java not found; skipping")
|
||||
}
|
||||
|
@ -350,6 +374,7 @@ func TestJavaDriver_Start_Wait_Class(t *testing.T) {
|
|||
Config: map[string]interface{}{
|
||||
"class_path": "${NOMAD_TASK_DIR}",
|
||||
"class": "Hello",
|
||||
"args": []string{"1"},
|
||||
},
|
||||
LogConfig: &structs.LogConfig{
|
||||
MaxFiles: 10,
|
||||
|
@ -374,15 +399,14 @@ func TestJavaDriver_Start_Wait_Class(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Task should terminate quickly
|
||||
// Task should terminate after 1 seconds
|
||||
select {
|
||||
case res := <-resp.Handle.WaitCh():
|
||||
if !res.Successful() {
|
||||
t.Fatalf("err: %v", res)
|
||||
t.Fatalf("err: %v", res.String())
|
||||
}
|
||||
case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
|
||||
// expect the timeout b/c it's a long lived process
|
||||
break
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
|
||||
// Get the stdout of the process and assrt that it's not empty
|
||||
|
|
|
@ -18,12 +18,14 @@ var (
|
|||
)
|
||||
|
||||
func TestFileRotator_IncorrectPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
if _, err := NewFileRotator("/foo", baseFileName, 10, 10, logger); err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileRotator_CreateNewFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
@ -42,6 +44,7 @@ func TestFileRotator_CreateNewFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFileRotator_OpenLastFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
@ -69,6 +72,7 @@ func TestFileRotator_OpenLastFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFileRotator_WriteToCurrentFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
@ -106,6 +110,7 @@ func TestFileRotator_WriteToCurrentFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFileRotator_RotateFiles(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
@ -164,6 +169,7 @@ func TestFileRotator_RotateFiles(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFileRotator_WriteRemaining(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
@ -243,6 +249,7 @@ func TestFileRotator_WriteRemaining(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFileRotator_PurgeOldFiles(t *testing.T) {
|
||||
t.Parallel()
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ioutil.TempDir("", pathPrefix); err != nil {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
)
|
||||
|
||||
func TestLogParser_Priority(t *testing.T) {
|
||||
t.Parallel()
|
||||
line := []byte("<30>2016-02-10T10:16:43-08:00 d-thinkpad docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf")
|
||||
d := NewDockerLogParser(log.New(os.Stdout, "", log.LstdFlags))
|
||||
p, _, err := d.parsePriority(line)
|
||||
|
@ -30,6 +31,7 @@ func TestLogParser_Priority(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLogParser_Priority_UnixFormatter(t *testing.T) {
|
||||
t.Parallel()
|
||||
line := []byte("<30>Feb 6, 10:16:43 docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf")
|
||||
d := NewDockerLogParser(log.New(os.Stdout, "", log.LstdFlags))
|
||||
p, _, err := d.parsePriority(line)
|
||||
|
|
|
@ -37,11 +37,17 @@ func (s *SyslogServer) Start() {
|
|||
for {
|
||||
select {
|
||||
case <-s.doneCh:
|
||||
s.listener.Close()
|
||||
return
|
||||
default:
|
||||
connection, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
s.doneLock.Lock()
|
||||
done := s.done
|
||||
s.doneLock.Unlock()
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.Printf("[ERR] logcollector.server: error in accepting connection: %v", err)
|
||||
continue
|
||||
}
|
||||
|
@ -74,11 +80,12 @@ func (s *SyslogServer) read(connection net.Conn) {
|
|||
// Shutdown shutsdown the syslog server
|
||||
func (s *SyslogServer) Shutdown() {
|
||||
s.doneLock.Lock()
|
||||
s.doneLock.Unlock()
|
||||
defer s.doneLock.Unlock()
|
||||
|
||||
if !s.done {
|
||||
close(s.doneCh)
|
||||
close(s.messages)
|
||||
s.done = true
|
||||
s.listener.Close()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
)
|
||||
|
||||
func TestSyslogServer_Start_Shutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "sock")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary direcotry: %v", err)
|
||||
|
|
|
@ -382,13 +382,16 @@ func (h *lxcDriverHandle) Exec(ctx context.Context, cmd string, args []string) (
|
|||
}
|
||||
|
||||
func (h *lxcDriverHandle) Kill() error {
|
||||
h.logger.Printf("[INFO] driver.lxc: shutting down container %q", h.container.Name())
|
||||
name := h.container.Name()
|
||||
|
||||
h.logger.Printf("[INFO] driver.lxc: shutting down container %q", name)
|
||||
if err := h.container.Shutdown(h.killTimeout); err != nil {
|
||||
h.logger.Printf("[INFO] driver.lxc: shutting down container %q failed: %v", h.container.Name(), err)
|
||||
h.logger.Printf("[INFO] driver.lxc: shutting down container %q failed: %v", name, err)
|
||||
if err := h.container.Stop(); err != nil {
|
||||
h.logger.Printf("[ERR] driver.lxc: error stopping container %q: %v", h.container.Name(), err)
|
||||
h.logger.Printf("[ERR] driver.lxc: error stopping container %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
close(h.doneCh)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
)
|
||||
|
||||
func TestLxcDriver_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
|
@ -54,6 +55,9 @@ func TestLxcDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLxcDriver_Start_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
|
@ -127,6 +131,9 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLxcDriver_Open_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !lxcPresent(t) {
|
||||
t.Skip("lxc not present")
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ func (h *mockDriverHandle) ID() string {
|
|||
TaskName: h.taskName,
|
||||
RunFor: h.runFor,
|
||||
KillAfter: h.killAfter,
|
||||
KillTimeout: h.killAfter,
|
||||
KillTimeout: h.killTimeout,
|
||||
ExitCode: h.exitCode,
|
||||
ExitSignal: h.exitSignal,
|
||||
ExitErr: h.exitErr,
|
||||
|
@ -250,6 +250,7 @@ func (h *mockDriverHandle) Exec(ctx context.Context, cmd string, args []string)
|
|||
|
||||
// TODO Implement when we need it.
|
||||
func (h *mockDriverHandle) Update(task *structs.Task) error {
|
||||
h.killTimeout = task.KillTimeout
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,12 +9,16 @@ import (
|
|||
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
||||
ctestutils "github.com/hashicorp/nomad/client/testutil"
|
||||
)
|
||||
|
||||
// The fingerprinter test should always pass, even if QEMU is not installed.
|
||||
func TestQemuDriver_Fingerprint(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.QemuCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
|
@ -44,6 +48,9 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestQemuDriver_StartOpen_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.QemuCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "linux",
|
||||
|
@ -110,6 +117,9 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestQemuDriverUser(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
ctestutils.QemuCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "linux",
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -19,6 +20,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRawExecDriver_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
Driver: "raw_exec",
|
||||
|
@ -60,6 +62,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "raw_exec",
|
||||
|
@ -106,6 +109,7 @@ func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriver_Start_Wait(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "raw_exec",
|
||||
|
@ -150,6 +154,7 @@ func TestRawExecDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
t.Parallel()
|
||||
exp := []byte("win")
|
||||
file := "output.txt"
|
||||
outPath := fmt.Sprintf(`${%s}/%s`, env.AllocDir, file)
|
||||
|
@ -206,6 +211,7 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriver_Start_Kill_Wait(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "raw_exec",
|
||||
|
@ -255,6 +261,10 @@ func TestRawExecDriver_Start_Kill_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriverUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("Linux only test")
|
||||
}
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "raw_exec",
|
||||
|
@ -290,6 +300,7 @@ func TestRawExecDriverUser(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRawExecDriver_HandlerExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "sleep",
|
||||
Driver: "raw_exec",
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRawExecDriver_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
task := &structs.Task{
|
||||
Name: "signal",
|
||||
Driver: "raw_exec",
|
||||
|
|
|
@ -80,7 +80,7 @@ type RktDriverConfig struct {
|
|||
Net []string `mapstructure:"net"` // Networks for the containers
|
||||
PortMapRaw []map[string]string `mapstructure:"port_map"` //
|
||||
PortMap map[string]string `mapstructure:"-"` // A map of host port and the port name defined in the image manifest file
|
||||
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
|
||||
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container[:readOnly]
|
||||
InsecureOptions []string `mapstructure:"insecure_options"` // list of args for --insecure-options
|
||||
|
||||
NoOverlay bool `mapstructure:"no_overlay"` // disable overlayfs for rkt run
|
||||
|
@ -319,11 +319,22 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
|
|||
}
|
||||
for i, rawvol := range driverConfig.Volumes {
|
||||
parts := strings.Split(rawvol, ":")
|
||||
if len(parts) != 2 {
|
||||
readOnly := "false"
|
||||
// job spec:
|
||||
// volumes = ["/host/path:/container/path[:readOnly]"]
|
||||
// the third parameter is optional, mount is read-write by default
|
||||
if len(parts) == 3 {
|
||||
if parts[2] == "readOnly" {
|
||||
d.logger.Printf("[DEBUG] Mounting %s:%s as readOnly", parts[0], parts[1])
|
||||
readOnly = "true"
|
||||
} else {
|
||||
d.logger.Printf("[WARN] Unknown volume parameter '%s' ignored for mount %s", parts[2], parts[0])
|
||||
}
|
||||
} else if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid rkt volume: %q", rawvol)
|
||||
}
|
||||
volName := fmt.Sprintf("%s-%s-%d", d.DriverContext.allocID, sanitizedName, i)
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", volName, parts[0]))
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s,readOnly=%s", volName, parts[0], readOnly))
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", volName, parts[1]))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRktVersionRegex(t *testing.T) {
|
||||
t.Parallel()
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("NOMAD_TEST_RKT unset, skipping")
|
||||
}
|
||||
|
@ -41,6 +42,7 @@ func TestRktVersionRegex(t *testing.T) {
|
|||
|
||||
// The fingerprinter test should always pass, even if rkt is not installed.
|
||||
func TestRktDriver_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -70,6 +72,9 @@ func TestRktDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriver_Start_DNS(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -121,6 +126,9 @@ func TestRktDriver_Start_DNS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriver_Start_Wait(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -180,6 +188,9 @@ func TestRktDriver_Start_Wait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -233,6 +244,9 @@ func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -305,6 +319,9 @@ func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriverUser(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -349,6 +366,9 @@ func TestRktDriverUser(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktTrustPrefix(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -390,6 +410,7 @@ func TestRktTrustPrefix(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktTaskValidate(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctestutils.RktCompatible(t)
|
||||
task := &structs.Task{
|
||||
Name: "etcd",
|
||||
|
@ -415,6 +436,9 @@ func TestRktTaskValidate(t *testing.T) {
|
|||
|
||||
// TODO: Port Mapping test should be ran with proper ACI image and test the port access.
|
||||
func TestRktDriver_PortsMapping(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
@ -479,6 +503,9 @@ func TestRktDriver_PortsMapping(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRktDriver_HandlerExec(t *testing.T) {
|
||||
if !testutil.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if os.Getenv("NOMAD_TEST_RKT") == "" {
|
||||
t.Skip("skipping rkt tests")
|
||||
}
|
||||
|
|
BIN
client/driver/test-resources/java/Hello.class
(Stored with Git LFS)
BIN
client/driver/test-resources/java/Hello.class
(Stored with Git LFS)
Binary file not shown.
|
@ -1,12 +1,14 @@
|
|||
public class Hello {
|
||||
public static void main(String[] args) {
|
||||
while (true) {
|
||||
System.out.println("Hello");
|
||||
try {
|
||||
Thread.sleep(1000); //1000 milliseconds is one second.
|
||||
} catch(InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
System.out.println("Hello");
|
||||
int seconds = 5;
|
||||
if (args.length != 0) {
|
||||
seconds = Integer.parseInt(args[0]);
|
||||
}
|
||||
try {
|
||||
Thread.sleep(1000*seconds); //1000 milliseconds is one second.
|
||||
} catch(InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
BIN
client/driver/test-resources/java/demoapp.jar
(Stored with Git LFS)
BIN
client/driver/test-resources/java/demoapp.jar
(Stored with Git LFS)
Binary file not shown.
|
@ -6,6 +6,7 @@ import (
|
|||
)
|
||||
|
||||
func TestDriver_KillTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
expected := 1 * time.Second
|
||||
max := 10 * time.Second
|
||||
|
||||
|
|
|
@ -16,9 +16,15 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// This is where the AWS metadata server normally resides. We hardcode the
|
||||
// "instance" path as well since it's the only one we access here.
|
||||
const DEFAULT_AWS_URL = "http://169.254.169.254/latest/meta-data/"
|
||||
const (
|
||||
// This is where the AWS metadata server normally resides. We hardcode the
|
||||
// "instance" path as well since it's the only one we access here.
|
||||
DEFAULT_AWS_URL = "http://169.254.169.254/latest/meta-data/"
|
||||
|
||||
// AwsMetadataTimeout is the timeout used when contacting the AWS metadata
|
||||
// service
|
||||
AwsMetadataTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// map of instance type to approximate speed, in Mbits/s
|
||||
// Estimates from http://stackoverflow.com/a/35806587
|
||||
|
@ -44,16 +50,25 @@ var ec2InstanceSpeedMap = map[*regexp.Regexp]int{
|
|||
// EnvAWSFingerprint is used to fingerprint AWS metadata
|
||||
type EnvAWSFingerprint struct {
|
||||
StaticFingerprinter
|
||||
logger *log.Logger
|
||||
timeout time.Duration
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewEnvAWSFingerprint is used to create a fingerprint from AWS metadata
|
||||
func NewEnvAWSFingerprint(logger *log.Logger) Fingerprint {
|
||||
f := &EnvAWSFingerprint{logger: logger}
|
||||
f := &EnvAWSFingerprint{
|
||||
logger: logger,
|
||||
timeout: AwsMetadataTimeout,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *EnvAWSFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
|
||||
// Check if we should tighten the timeout
|
||||
if cfg.ReadBoolDefault(TightenNetworkTimeoutsConfig, false) {
|
||||
f.timeout = 1 * time.Millisecond
|
||||
}
|
||||
|
||||
if !f.isAWS() {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -71,9 +86,8 @@ func (f *EnvAWSFingerprint) Fingerprint(cfg *config.Config, node *structs.Node)
|
|||
metadataURL = DEFAULT_AWS_URL
|
||||
}
|
||||
|
||||
// assume 2 seconds is enough time for inside AWS network
|
||||
client := &http.Client{
|
||||
Timeout: 2 * time.Second,
|
||||
Timeout: f.timeout,
|
||||
Transport: cleanhttp.DefaultTransport(),
|
||||
}
|
||||
|
||||
|
@ -174,9 +188,8 @@ func (f *EnvAWSFingerprint) isAWS() bool {
|
|||
metadataURL = DEFAULT_AWS_URL
|
||||
}
|
||||
|
||||
// assume 2 seconds is enough time for inside AWS network
|
||||
client := &http.Client{
|
||||
Timeout: 2 * time.Second,
|
||||
Timeout: f.timeout,
|
||||
Transport: cleanhttp.DefaultTransport(),
|
||||
}
|
||||
|
||||
|
@ -217,9 +230,8 @@ func (f *EnvAWSFingerprint) linkSpeed() int {
|
|||
metadataURL = DEFAULT_AWS_URL
|
||||
}
|
||||
|
||||
// assume 2 seconds is enough time for inside AWS network
|
||||
client := &http.Client{
|
||||
Timeout: 2 * time.Second,
|
||||
Timeout: f.timeout,
|
||||
Transport: cleanhttp.DefaultTransport(),
|
||||
}
|
||||
|
||||
|
|
|
@ -18,9 +18,15 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// This is where the GCE metadata server normally resides. We hardcode the
|
||||
// "instance" path as well since it's the only one we access here.
|
||||
const DEFAULT_GCE_URL = "http://169.254.169.254/computeMetadata/v1/instance/"
|
||||
const (
|
||||
// This is where the GCE metadata server normally resides. We hardcode the
|
||||
// "instance" path as well since it's the only one we access here.
|
||||
DEFAULT_GCE_URL = "http://169.254.169.254/computeMetadata/v1/instance/"
|
||||
|
||||
// GceMetadataTimeout is the timeout used when contacting the GCE metadata
|
||||
// service
|
||||
GceMetadataTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
type GCEMetadataNetworkInterface struct {
|
||||
AccessConfigs []struct {
|
||||
|
@ -64,7 +70,7 @@ func NewEnvGCEFingerprint(logger *log.Logger) Fingerprint {
|
|||
|
||||
// assume 2 seconds is enough time for inside GCE network
|
||||
client := &http.Client{
|
||||
Timeout: 2 * time.Second,
|
||||
Timeout: GceMetadataTimeout,
|
||||
Transport: cleanhttp.DefaultTransport(),
|
||||
}
|
||||
|
||||
|
@ -126,6 +132,11 @@ func checkError(err error, logger *log.Logger, desc string) error {
|
|||
}
|
||||
|
||||
func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
|
||||
// Check if we should tighten the timeout
|
||||
if cfg.ReadBoolDefault(TightenNetworkTimeoutsConfig, false) {
|
||||
f.client.Timeout = 1 * time.Millisecond
|
||||
}
|
||||
|
||||
if !f.isGCE() {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -13,6 +13,10 @@ import (
|
|||
// EmptyDuration is to be used by fingerprinters that are not periodic.
|
||||
const (
|
||||
EmptyDuration = time.Duration(0)
|
||||
|
||||
// TightenNetworkTimeoutsConfig is a config key that can be used during
|
||||
// tests to tighten the timeouts for fingerprinters that make network calls.
|
||||
TightenNetworkTimeoutsConfig = "test.tighten_network_timeouts"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
func TestVaultFingerprint(t *testing.T) {
|
||||
tv := testutil.NewTestVault(t).Start()
|
||||
tv := testutil.NewTestVault(t)
|
||||
defer tv.Stop()
|
||||
|
||||
fp := NewVaultFingerprint(testLogger())
|
||||
|
|
|
@ -20,6 +20,7 @@ func gcConfig() *GCConfig {
|
|||
}
|
||||
|
||||
func TestIndexedGCAllocPQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
pq := NewIndexedGCAllocPQ()
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
|
@ -100,6 +101,7 @@ func (m *MockStatsCollector) Stats() *stats.HostStats {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -115,6 +117,7 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_Collect(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -141,6 +144,7 @@ func TestAllocGarbageCollector_Collect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -163,6 +167,7 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -201,6 +206,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -240,6 +246,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -275,6 +282,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -309,6 +317,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T)
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
liveAllocs = 3
|
||||
maxAllocs = 6
|
||||
|
@ -346,6 +355,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -381,6 +391,7 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -418,6 +429,7 @@ func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MaxAllocsThreshold(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
liveAllocs = 3
|
||||
maxAllocs = 6
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestServerList(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := newServerList()
|
||||
|
||||
// New lists should be empty
|
||||
|
@ -89,6 +90,7 @@ func TestServerList(t *testing.T) {
|
|||
// TestClient_ServerList tests client methods that interact with the internal
|
||||
// nomad server list.
|
||||
func TestClient_ServerList(t *testing.T) {
|
||||
t.Parallel()
|
||||
// manually create a mostly empty client to avoid spinning up a ton of
|
||||
// goroutines that complicate testing
|
||||
client := Client{servers: newServerList(), logger: log.New(os.Stderr, "", log.Ltime|log.Lshortfile)}
|
||||
|
|
|
@ -29,26 +29,34 @@ type AllocListener struct {
|
|||
id int
|
||||
}
|
||||
|
||||
// Send broadcasts a message to the channel.
|
||||
// Sending on a closed channel causes a runtime panic.
|
||||
func (b *AllocBroadcaster) Send(v *structs.Allocation) {
|
||||
// Send broadcasts a message to the channel. Send returns whether the message
|
||||
// was sent to all channels.
|
||||
func (b *AllocBroadcaster) Send(v *structs.Allocation) bool {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
return false
|
||||
}
|
||||
sent := true
|
||||
for _, l := range b.listeners {
|
||||
select {
|
||||
case l <- v:
|
||||
default:
|
||||
sent = false
|
||||
}
|
||||
}
|
||||
|
||||
return sent
|
||||
}
|
||||
|
||||
// Close closes the channel, disabling the sending of further messages.
|
||||
func (b *AllocBroadcaster) Close() {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
|
||||
b.closed = true
|
||||
for _, l := range b.listeners {
|
||||
close(l)
|
||||
|
|
|
@ -259,6 +259,14 @@ func (r *TaskRunner) WaitCh() <-chan struct{} {
|
|||
return r.waitCh
|
||||
}
|
||||
|
||||
// getHandle returns the task's handle or nil
|
||||
func (r *TaskRunner) getHandle() driver.DriverHandle {
|
||||
r.handleLock.Lock()
|
||||
h := r.handle
|
||||
r.handleLock.Unlock()
|
||||
return h
|
||||
}
|
||||
|
||||
// pre060StateFilePath returns the path to our state file that would have been
|
||||
// written pre v0.6.0
|
||||
// COMPAT: Remove in 0.7.0
|
||||
|
@ -416,6 +424,13 @@ func pre06ScriptCheck(ver, driver string, services []*structs.Service) bool {
|
|||
|
||||
// SaveState is used to snapshot our state
|
||||
func (r *TaskRunner) SaveState() error {
|
||||
r.destroyLock.Lock()
|
||||
defer r.destroyLock.Unlock()
|
||||
if r.destroy {
|
||||
// Don't save state if already destroyed
|
||||
return nil
|
||||
}
|
||||
|
||||
r.persistLock.Lock()
|
||||
defer r.persistLock.Unlock()
|
||||
snap := taskRunnerState{
|
||||
|
@ -852,10 +867,12 @@ func (r *TaskRunner) updatedTokenHandler() {
|
|||
}
|
||||
|
||||
// prestart handles life-cycle tasks that occur before the task has started.
|
||||
func (r *TaskRunner) prestart(resultCh chan bool) {
|
||||
if r.task.Vault != nil {
|
||||
// Since it's run asynchronously with the main Run() loop the alloc & task are
|
||||
// passed in to avoid racing with updates.
|
||||
func (r *TaskRunner) prestart(alloc *structs.Allocation, task *structs.Task, resultCh chan bool) {
|
||||
if task.Vault != nil {
|
||||
// Wait for the token
|
||||
r.logger.Printf("[DEBUG] client: waiting for Vault token for task %v in alloc %q", r.task.Name, r.alloc.ID)
|
||||
r.logger.Printf("[DEBUG] client: waiting for Vault token for task %v in alloc %q", task.Name, alloc.ID)
|
||||
tokenCh := r.vaultFuture.Wait()
|
||||
select {
|
||||
case <-tokenCh:
|
||||
|
@ -863,16 +880,16 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
|
|||
resultCh <- false
|
||||
return
|
||||
}
|
||||
r.logger.Printf("[DEBUG] client: retrieved Vault token for task %v in alloc %q", r.task.Name, r.alloc.ID)
|
||||
r.envBuilder.SetVaultToken(r.vaultFuture.Get(), r.task.Vault.Env)
|
||||
r.logger.Printf("[DEBUG] client: retrieved Vault token for task %v in alloc %q", task.Name, alloc.ID)
|
||||
r.envBuilder.SetVaultToken(r.vaultFuture.Get(), task.Vault.Env)
|
||||
}
|
||||
|
||||
// If the job is a dispatch job and there is a payload write it to disk
|
||||
requirePayload := len(r.alloc.Job.Payload) != 0 &&
|
||||
requirePayload := len(alloc.Job.Payload) != 0 &&
|
||||
(r.task.DispatchPayload != nil && r.task.DispatchPayload.File != "")
|
||||
if !r.payloadRendered && requirePayload {
|
||||
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchPayload.File)
|
||||
decoded, err := snappy.Decode(nil, r.alloc.Job.Payload)
|
||||
renderTo := filepath.Join(r.taskDir.LocalDir, task.DispatchPayload.File)
|
||||
decoded, err := snappy.Decode(nil, alloc.Job.Payload)
|
||||
if err != nil {
|
||||
r.setState(
|
||||
structs.TaskStateDead,
|
||||
|
@ -906,10 +923,10 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
|
|||
r.persistLock.Unlock()
|
||||
|
||||
// Download the task's artifacts
|
||||
if !downloaded && len(r.task.Artifacts) > 0 {
|
||||
if !downloaded && len(task.Artifacts) > 0 {
|
||||
r.setState(structs.TaskStatePending, structs.NewTaskEvent(structs.TaskDownloadingArtifacts))
|
||||
taskEnv := r.envBuilder.Build()
|
||||
for _, artifact := range r.task.Artifacts {
|
||||
for _, artifact := range task.Artifacts {
|
||||
if err := getter.GetArtifact(taskEnv, artifact, r.taskDir.Dir); err != nil {
|
||||
wrapped := fmt.Errorf("failed to download artifact %q: %v", artifact.GetterSource, err)
|
||||
r.logger.Printf("[DEBUG] client: %v", wrapped)
|
||||
|
@ -926,7 +943,7 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
|
|||
}
|
||||
|
||||
// We don't have to wait for any template
|
||||
if len(r.task.Templates) == 0 {
|
||||
if len(task.Templates) == 0 {
|
||||
// Send the start signal
|
||||
select {
|
||||
case r.startCh <- struct{}{}:
|
||||
|
@ -940,12 +957,12 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
|
|||
// Build the template manager
|
||||
if r.templateManager == nil {
|
||||
var err error
|
||||
r.templateManager, err = NewTaskTemplateManager(r, r.task.Templates,
|
||||
r.templateManager, err = NewTaskTemplateManager(r, task.Templates,
|
||||
r.config, r.vaultFuture.Get(), r.taskDir.Dir, r.envBuilder)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to build task's template manager: %v", err)
|
||||
r.setState(structs.TaskStateDead, structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask())
|
||||
r.logger.Printf("[ERR] client: alloc %q, task %q %v", r.alloc.ID, r.task.Name, err)
|
||||
r.logger.Printf("[ERR] client: alloc %q, task %q %v", alloc.ID, task.Name, err)
|
||||
resultCh <- false
|
||||
return
|
||||
}
|
||||
|
@ -996,9 +1013,7 @@ func (r *TaskRunner) run() {
|
|||
|
||||
// If we already have a handle, populate the stopCollection and handleWaitCh
|
||||
// to fix the invariant that it exists.
|
||||
r.handleLock.Lock()
|
||||
handleEmpty := r.handle == nil
|
||||
r.handleLock.Unlock()
|
||||
handleEmpty := r.getHandle() == nil
|
||||
|
||||
if !handleEmpty {
|
||||
stopCollection = make(chan struct{})
|
||||
|
@ -1009,7 +1024,7 @@ func (r *TaskRunner) run() {
|
|||
for {
|
||||
// Do the prestart activities
|
||||
prestartResultCh := make(chan bool, 1)
|
||||
go r.prestart(prestartResultCh)
|
||||
go r.prestart(r.alloc, r.task, prestartResultCh)
|
||||
|
||||
WAIT:
|
||||
for {
|
||||
|
@ -1024,9 +1039,7 @@ func (r *TaskRunner) run() {
|
|||
// Start the task if not yet started or it is being forced. This logic
|
||||
// is necessary because in the case of a restore the handle already
|
||||
// exists.
|
||||
r.handleLock.Lock()
|
||||
handleEmpty := r.handle == nil
|
||||
r.handleLock.Unlock()
|
||||
handleEmpty := r.getHandle() == nil
|
||||
if handleEmpty {
|
||||
startErr := r.startTask()
|
||||
r.restartTracker.SetStartError(startErr)
|
||||
|
@ -1131,7 +1144,8 @@ func (r *TaskRunner) run() {
|
|||
|
||||
// Remove from consul before killing the task so that traffic
|
||||
// can be rerouted
|
||||
r.consul.RemoveTask(r.alloc.ID, r.task)
|
||||
interpTask := interpolateServices(r.envBuilder.Build(), r.task)
|
||||
r.consul.RemoveTask(r.alloc.ID, interpTask)
|
||||
|
||||
// Store the task event that provides context on the task
|
||||
// destroy. The Killed event is set from the alloc_runner and
|
||||
|
@ -1179,7 +1193,8 @@ func (r *TaskRunner) run() {
|
|||
// stopping. Errors are logged.
|
||||
func (r *TaskRunner) cleanup() {
|
||||
// Remove from Consul
|
||||
r.consul.RemoveTask(r.alloc.ID, r.task)
|
||||
interpTask := interpolateServices(r.envBuilder.Build(), r.task)
|
||||
r.consul.RemoveTask(r.alloc.ID, interpTask)
|
||||
|
||||
drv, err := r.createDriver()
|
||||
if err != nil {
|
||||
|
@ -1240,7 +1255,8 @@ func (r *TaskRunner) shouldRestart() bool {
|
|||
}
|
||||
|
||||
// Unregister from Consul while waiting to restart.
|
||||
r.consul.RemoveTask(r.alloc.ID, r.task)
|
||||
interpTask := interpolateServices(r.envBuilder.Build(), r.task)
|
||||
r.consul.RemoveTask(r.alloc.ID, interpTask)
|
||||
|
||||
// Sleep but watch for destroy events.
|
||||
select {
|
||||
|
@ -1288,9 +1304,7 @@ func (r *TaskRunner) killTask(killingEvent *structs.TaskEvent) {
|
|||
// Mark that we received the kill event
|
||||
r.setState(structs.TaskStateRunning, event)
|
||||
|
||||
r.handleLock.Lock()
|
||||
handle := r.handle
|
||||
r.handleLock.Unlock()
|
||||
handle := r.getHandle()
|
||||
|
||||
// Kill the task using an exponential backoff in-case of failures.
|
||||
destroySuccess, err := r.handleDestroy(handle)
|
||||
|
@ -1385,14 +1399,15 @@ func (r *TaskRunner) registerServices(d driver.Driver, h driver.DriverHandle, n
|
|||
// Allow set the script executor if the driver supports it
|
||||
exec = h
|
||||
}
|
||||
interpolateServices(r.envBuilder.Build(), r.task)
|
||||
return r.consul.RegisterTask(r.alloc.ID, r.task, exec, n)
|
||||
interpolatedTask := interpolateServices(r.envBuilder.Build(), r.task)
|
||||
return r.consul.RegisterTask(r.alloc.ID, interpolatedTask, exec, n)
|
||||
}
|
||||
|
||||
// interpolateServices interpolates tags in a service and checks with values from the
|
||||
// task's environment.
|
||||
func interpolateServices(taskEnv *env.TaskEnv, task *structs.Task) {
|
||||
for _, service := range task.Services {
|
||||
func interpolateServices(taskEnv *env.TaskEnv, task *structs.Task) *structs.Task {
|
||||
taskCopy := task.Copy()
|
||||
for _, service := range taskCopy.Services {
|
||||
for _, check := range service.Checks {
|
||||
check.Name = taskEnv.ReplaceEnv(check.Name)
|
||||
check.Type = taskEnv.ReplaceEnv(check.Type)
|
||||
|
@ -1407,6 +1422,7 @@ func interpolateServices(taskEnv *env.TaskEnv, task *structs.Task) {
|
|||
service.PortLabel = taskEnv.ReplaceEnv(service.PortLabel)
|
||||
service.Tags = taskEnv.ParseAndReplace(service.Tags)
|
||||
}
|
||||
return taskCopy
|
||||
}
|
||||
|
||||
// buildTaskDir creates the task directory before driver.Prestart. It is safe
|
||||
|
@ -1453,10 +1469,11 @@ func (r *TaskRunner) collectResourceUsageStats(stopCollection <-chan struct{}) {
|
|||
select {
|
||||
case <-next.C:
|
||||
next.Reset(r.config.StatsCollectionInterval)
|
||||
if r.handle == nil {
|
||||
handle := r.getHandle()
|
||||
if handle == nil {
|
||||
continue
|
||||
}
|
||||
ru, err := r.handle.Stats()
|
||||
ru, err := handle.Stats()
|
||||
|
||||
if err != nil {
|
||||
// Check if the driver doesn't implement stats
|
||||
|
@ -1567,11 +1584,12 @@ func (r *TaskRunner) updateServices(d driver.Driver, h driver.ScriptExecutor, ol
|
|||
// Allow set the script executor if the driver supports it
|
||||
exec = h
|
||||
}
|
||||
interpolateServices(r.envBuilder.Build(), new)
|
||||
newInterpolatedTask := interpolateServices(r.envBuilder.Build(), new)
|
||||
oldInterpolatedTask := interpolateServices(r.envBuilder.Build(), old)
|
||||
r.driverNetLock.Lock()
|
||||
net := r.driverNet.Copy()
|
||||
r.driverNetLock.Unlock()
|
||||
return r.consul.UpdateTask(r.alloc.ID, old, new, exec, net)
|
||||
return r.consul.UpdateTask(r.alloc.ID, oldInterpolatedTask, newInterpolatedTask, exec, net)
|
||||
}
|
||||
|
||||
// handleDestroy kills the task handle. In the case that killing fails,
|
||||
|
|
|
@ -22,8 +22,6 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
)
|
||||
|
||||
func testLogger() *log.Logger {
|
||||
|
@ -68,7 +66,12 @@ func (ctx *taskRunnerTestCtx) Cleanup() {
|
|||
}
|
||||
|
||||
func testTaskRunner(t *testing.T, restarts bool) *taskRunnerTestCtx {
|
||||
return testTaskRunnerFromAlloc(t, restarts, mock.Alloc())
|
||||
// Use mock driver
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config["run_for"] = "500ms"
|
||||
return testTaskRunnerFromAlloc(t, restarts, alloc)
|
||||
}
|
||||
|
||||
// Creates a mock task runner using the first task in the first task group of
|
||||
|
@ -160,7 +163,7 @@ func testWaitForTaskToStart(t *testing.T, ctx *taskRunnerTestCtx) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_SimpleRun(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
ctx := testTaskRunner(t, false)
|
||||
ctx.tr.MarkReceived()
|
||||
go ctx.tr.Run()
|
||||
|
@ -198,6 +201,7 @@ func TestTaskRunner_SimpleRun(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -240,27 +244,22 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Destroy(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
ctx := testTaskRunner(t, true)
|
||||
ctx.tr.MarkReceived()
|
||||
//FIXME This didn't used to send a kill status update!!!???
|
||||
defer ctx.Cleanup()
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "1000s",
|
||||
}
|
||||
|
||||
// Change command to ensure we run for a bit
|
||||
ctx.tr.task.Config["command"] = "/bin/sleep"
|
||||
ctx.tr.task.Config["args"] = []string{"1000"}
|
||||
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
||||
ctx.tr.MarkReceived()
|
||||
go ctx.tr.Run()
|
||||
defer ctx.Cleanup()
|
||||
|
||||
// Wait for the task to start
|
||||
testWaitForTaskToStart(t, ctx)
|
||||
|
||||
// Make sure we are collecting a few stats
|
||||
time.Sleep(2 * time.Second)
|
||||
stats := ctx.tr.LatestResourceUsage()
|
||||
if len(stats.Pids) == 0 || stats.ResourceUsage == nil || stats.ResourceUsage.MemoryStats.RSS == 0 {
|
||||
t.Fatalf("expected task runner to have some stats")
|
||||
}
|
||||
|
||||
// Begin the tear down
|
||||
ctx.tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
|
||||
|
||||
|
@ -288,13 +287,17 @@ func TestTaskRunner_Destroy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Update(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
ctx := testTaskRunner(t, false)
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Services[0].Checks[0].Args[0] = "${NOMAD_META_foo}"
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "100s",
|
||||
}
|
||||
|
||||
// Change command to ensure we run for a bit
|
||||
ctx.tr.task.Config["command"] = "/bin/sleep"
|
||||
ctx.tr.task.Config["args"] = []string{"100"}
|
||||
ctx.tr.task.Services[0].Checks[0].Args[0] = "${NOMAD_META_foo}"
|
||||
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
||||
ctx.tr.MarkReceived()
|
||||
go ctx.tr.Run()
|
||||
defer ctx.Cleanup()
|
||||
|
||||
|
@ -314,18 +317,9 @@ func TestTaskRunner_Update(t *testing.T) {
|
|||
newTask.Meta["foo"] = "UPDATE"
|
||||
|
||||
// Update the kill timeout
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if ctx.tr.handle == nil {
|
||||
return false, fmt.Errorf("task not started")
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("err: %v", err)
|
||||
})
|
||||
|
||||
testWaitForTaskToStart(t, ctx)
|
||||
oldHandle := ctx.tr.handle.ID()
|
||||
newTask.KillTimeout = time.Hour
|
||||
|
||||
ctx.tr.Update(updateAlloc)
|
||||
|
||||
// Wait for ctx.update to take place
|
||||
|
@ -364,6 +358,7 @@ func TestTaskRunner_Update(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_SaveRestoreState(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -378,7 +373,6 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
|
|||
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
||||
ctx.tr.MarkReceived()
|
||||
go ctx.tr.Run()
|
||||
//FIXME This test didn't used to defer destroy the allocidr ???!!!
|
||||
defer ctx.Cleanup()
|
||||
|
||||
// Wait for the task to be running and then snapshot the state
|
||||
|
@ -424,14 +418,18 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Download_List(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
|
||||
t.Parallel()
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
|
||||
defer ts.Close()
|
||||
|
||||
// Create an allocation that has a task with a list of artifacts.
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"exit_code": "0",
|
||||
"run_for": "10s",
|
||||
}
|
||||
f1 := "task_runner_test.go"
|
||||
f2 := "task_runner.go"
|
||||
artifact1 := structs.TaskArtifact{
|
||||
|
@ -491,13 +489,17 @@ func TestTaskRunner_Download_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Download_Retries(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
|
||||
t.Parallel()
|
||||
// Create an allocation that has a task with bad artifacts.
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"exit_code": "0",
|
||||
"run_for": "10s",
|
||||
}
|
||||
artifact := structs.TaskArtifact{
|
||||
GetterSource: "http://127.1.1.111:12315/foo/bar/baz",
|
||||
GetterSource: "http://127.0.0.1:0/foo/bar/baz",
|
||||
}
|
||||
task.Artifacts = []*structs.TaskArtifact{&artifact}
|
||||
|
||||
|
@ -564,8 +566,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) {
|
|||
// TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from
|
||||
// Consul when waiting to be retried.
|
||||
func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
|
||||
t.Parallel()
|
||||
// Create an allocation that has a task with bad artifacts.
|
||||
alloc := mock.Alloc()
|
||||
|
||||
|
@ -609,7 +610,7 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
ctx := testTaskRunner(t, false)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
|
@ -636,6 +637,7 @@ func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_RestartTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -659,7 +661,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
|
|||
// Wait for the task to start again
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if len(ctx.upd.events) != 8 {
|
||||
t.Fatalf("task %q in alloc %q should have 8 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
|
||||
return false, fmt.Errorf("task %q in alloc %q should have 8 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -677,7 +679,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
|
|||
}
|
||||
|
||||
if len(ctx.upd.events) != 10 {
|
||||
t.Fatalf("should have 9 ctx.updates: %#v", ctx.upd.events)
|
||||
t.Fatalf("should have 10 ctx.updates: %#v", ctx.upd.events)
|
||||
}
|
||||
|
||||
if ctx.upd.state != structs.TaskStateDead {
|
||||
|
@ -725,6 +727,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_KillTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -783,6 +786,7 @@ func TestTaskRunner_KillTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_SignalFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -806,6 +810,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_BlockForVault(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -913,6 +918,7 @@ func TestTaskRunner_BlockForVault(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -998,6 +1004,7 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1044,6 +1051,7 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Template_Block(t *testing.T) {
|
||||
t.Parallel()
|
||||
testRetryRate = 2 * time.Second
|
||||
defer func() {
|
||||
testRetryRate = 0
|
||||
|
@ -1125,6 +1133,7 @@ func TestTaskRunner_Template_Block(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Template_Artifact(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %v", err)
|
||||
|
@ -1204,6 +1213,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1282,6 +1292,7 @@ func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_VaultManager_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1357,6 +1368,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_VaultManager_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1418,6 +1430,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) {
|
|||
|
||||
// Test that the payload is written to disk
|
||||
func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1486,6 +1499,7 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
|
|||
// TestTaskRunner_CleanupEmpty ensures TaskRunner works when createdResources
|
||||
// is empty.
|
||||
func TestTaskRunner_CleanupEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1503,6 +1517,7 @@ func TestTaskRunner_CleanupEmpty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_CleanupOK(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1528,6 +1543,7 @@ func TestTaskRunner_CleanupOK(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_CleanupFail(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
@ -1553,6 +1569,7 @@ func TestTaskRunner_CleanupFail(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Pre06ScriptCheck(t *testing.T) {
|
||||
t.Parallel()
|
||||
run := func(ver, driver, checkType string, exp bool) (string, func(t *testing.T)) {
|
||||
name := fmt.Sprintf("%s %s %s returns %t", ver, driver, checkType, exp)
|
||||
return name, func(t *testing.T) {
|
||||
|
|
|
@ -12,12 +12,13 @@ import (
|
|||
)
|
||||
|
||||
func TestVaultClient_TokenRenewals(t *testing.T) {
|
||||
v := testutil.NewTestVault(t).Start()
|
||||
t.Parallel()
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags)
|
||||
v.Config.ConnectionRetryIntv = 100 * time.Millisecond
|
||||
v.Config.TaskTokenTTL = "10s"
|
||||
v.Config.TaskTokenTTL = "4s"
|
||||
c, err := NewVaultClient(v.Config, logger, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build vault client: %v", err)
|
||||
|
@ -27,7 +28,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
defer c.Stop()
|
||||
|
||||
// Sleep a little while to ensure that the renewal loop is active
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(time.Duration(testutil.TestMultiplier()) * time.Second)
|
||||
|
||||
tcr := &vaultapi.TokenCreateRequest{
|
||||
Policies: []string{"foo", "bar"},
|
||||
|
@ -66,7 +67,9 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
t.Fatalf("error while renewing the token: %v", err)
|
||||
if err != nil {
|
||||
t.Fatalf("error while renewing the token: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(errCh)
|
||||
|
@ -76,7 +79,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
t.Fatalf("bad: heap length: expected: %d, actual: %d", num, c.heap.Length())
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(5*testutil.TestMultiplier()) * time.Second)
|
||||
time.Sleep(time.Duration(testutil.TestMultiplier()) * time.Second)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
if err := c.StopRenewToken(tokens[i]); err != nil {
|
||||
|
@ -90,6 +93,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVaultClient_Heap(t *testing.T) {
|
||||
t.Parallel()
|
||||
tr := true
|
||||
conf := config.DefaultConfig()
|
||||
conf.VaultConfig.Enabled = &tr
|
||||
|
|
|
@ -192,12 +192,14 @@ func convertServerConfig(agentConfig *Config, logOutput io.Writer) (*nomad.Confi
|
|||
conf.DeploymentGCThreshold = dur
|
||||
}
|
||||
|
||||
if heartbeatGrace := agentConfig.Server.HeartbeatGrace; heartbeatGrace != "" {
|
||||
dur, err := time.ParseDuration(heartbeatGrace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conf.HeartbeatGrace = dur
|
||||
if heartbeatGrace := agentConfig.Server.HeartbeatGrace; heartbeatGrace != 0 {
|
||||
conf.HeartbeatGrace = heartbeatGrace
|
||||
}
|
||||
if min := agentConfig.Server.MinHeartbeatTTL; min != 0 {
|
||||
conf.MinHeartbeatTTL = min
|
||||
}
|
||||
if maxHPS := agentConfig.Server.MaxHeartbeatsPerSecond; maxHPS != 0 {
|
||||
conf.MaxHeartbeatsPerSecond = maxHPS
|
||||
}
|
||||
|
||||
if *agentConfig.Consul.AutoAdvertise && agentConfig.Consul.ServerServiceName == "" {
|
||||
|
@ -711,6 +713,8 @@ func (a *Agent) setupConsul(consulConfig *config.ConsulConfig) error {
|
|||
|
||||
// Create Consul Service client for service advertisement and checks.
|
||||
a.consulService = consul.NewServiceClient(client.Agent(), a.consulSupportsTLSSkipVerify, a.logger)
|
||||
|
||||
// Run the Consul service client's sync'ing main loop
|
||||
go a.consulService.Run()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_AgentSelf(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/agent/self", nil)
|
||||
if err != nil {
|
||||
|
@ -39,7 +40,9 @@ func TestHTTP_AgentSelf(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentJoin(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
// TODO(alexdadgar)
|
||||
// t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Determine the join address
|
||||
member := s.Agent.Server().LocalMember()
|
||||
addr := fmt.Sprintf("%s:%d", member.Addr, member.Port)
|
||||
|
@ -70,7 +73,8 @@ func TestHTTP_AgentJoin(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentMembers(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/agent/members", nil)
|
||||
if err != nil {
|
||||
|
@ -93,7 +97,8 @@ func TestHTTP_AgentMembers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentForceLeave(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("PUT", "/v1/agent/force-leave?node=foo", nil)
|
||||
if err != nil {
|
||||
|
@ -110,7 +115,8 @@ func TestHTTP_AgentForceLeave(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentSetServers(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Establish a baseline number of servers
|
||||
req, err := http.NewRequest("GET", "/v1/agent/servers", nil)
|
||||
if err != nil {
|
||||
|
@ -183,11 +189,13 @@ func TestHTTP_AgentSetServers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentListKeys(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
key1 := "HS5lJ+XuTlYKWaeGYyG+/A=="
|
||||
|
||||
httpTest(t, func(c *Config) {
|
||||
c.Server.EncryptKey = key1
|
||||
}, func(s *TestServer) {
|
||||
}, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/agent/keyring/list", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -206,12 +214,15 @@ func TestHTTP_AgentListKeys(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentInstallKey(t *testing.T) {
|
||||
// TODO(alexdadgar)
|
||||
// t.Parallel()
|
||||
|
||||
key1 := "HS5lJ+XuTlYKWaeGYyG+/A=="
|
||||
key2 := "wH1Bn9hlJ0emgWB1JttVRA=="
|
||||
|
||||
httpTest(t, func(c *Config) {
|
||||
c.Server.EncryptKey = key1
|
||||
}, func(s *TestServer) {
|
||||
}, func(s *TestAgent) {
|
||||
b, err := json.Marshal(&structs.KeyringRequest{Key: key2})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -244,12 +255,15 @@ func TestHTTP_AgentInstallKey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AgentRemoveKey(t *testing.T) {
|
||||
// TODO(alexdadgar)
|
||||
// t.Parallel()
|
||||
|
||||
key1 := "HS5lJ+XuTlYKWaeGYyG+/A=="
|
||||
key2 := "wH1Bn9hlJ0emgWB1JttVRA=="
|
||||
|
||||
httpTest(t, func(c *Config) {
|
||||
c.Server.EncryptKey = key1
|
||||
}, func(s *TestServer) {
|
||||
}, func(s *TestAgent) {
|
||||
b, err := json.Marshal(&structs.KeyringRequest{Key: key2})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
||||
)
|
||||
|
||||
|
@ -38,61 +37,9 @@ func tmpDir(t testing.TB) string {
|
|||
return dir
|
||||
}
|
||||
|
||||
func makeAgent(t testing.TB, cb func(*Config)) (string, *Agent) {
|
||||
dir := tmpDir(t)
|
||||
conf := DevConfig()
|
||||
|
||||
// Customize the server configuration
|
||||
config := nomad.DefaultConfig()
|
||||
conf.NomadConfig = config
|
||||
|
||||
// Set the data_dir
|
||||
conf.DataDir = dir
|
||||
conf.NomadConfig.DataDir = dir
|
||||
|
||||
// Bind and set ports
|
||||
conf.BindAddr = "127.0.0.1"
|
||||
conf.Ports = &Ports{
|
||||
HTTP: getPort(),
|
||||
RPC: getPort(),
|
||||
Serf: getPort(),
|
||||
}
|
||||
conf.NodeName = fmt.Sprintf("Node %d", conf.Ports.RPC)
|
||||
conf.Consul = sconfig.DefaultConsulConfig()
|
||||
conf.Vault.Enabled = new(bool)
|
||||
|
||||
// Tighten the Serf timing
|
||||
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
|
||||
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
|
||||
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
||||
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
||||
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
||||
|
||||
// Tighten the Raft timing
|
||||
config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
|
||||
config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
|
||||
config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
|
||||
config.RaftConfig.StartAsLeader = true
|
||||
config.RaftTimeout = 500 * time.Millisecond
|
||||
|
||||
if cb != nil {
|
||||
cb(conf)
|
||||
}
|
||||
|
||||
if err := conf.normalizeAddrs(); err != nil {
|
||||
t.Fatalf("error normalizing config: %v", err)
|
||||
}
|
||||
agent, err := NewAgent(conf, os.Stderr)
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
return dir, agent
|
||||
}
|
||||
|
||||
func TestAgent_RPCPing(t *testing.T) {
|
||||
dir, agent := makeAgent(t, nil)
|
||||
defer os.RemoveAll(dir)
|
||||
t.Parallel()
|
||||
agent := NewTestAgent(t.Name(), nil)
|
||||
defer agent.Shutdown()
|
||||
|
||||
var out struct{}
|
||||
|
@ -102,6 +49,7 @@ func TestAgent_RPCPing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_ServerConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
conf := DefaultConfig()
|
||||
conf.DevMode = true // allow localhost for advertise addrs
|
||||
a := &Agent{config: conf}
|
||||
|
@ -233,24 +181,24 @@ func TestAgent_ServerConfig(t *testing.T) {
|
|||
t.Fatalf("expect 10s, got: %s", threshold)
|
||||
}
|
||||
|
||||
conf.Server.HeartbeatGrace = "42g"
|
||||
if err := conf.normalizeAddrs(); err != nil {
|
||||
t.Fatalf("error normalizing config: %v", err)
|
||||
}
|
||||
out, err = a.serverConfig()
|
||||
if err == nil || !strings.Contains(err.Error(), "unknown unit") {
|
||||
t.Fatalf("expected unknown unit error, got: %#v", err)
|
||||
}
|
||||
|
||||
conf.Server.HeartbeatGrace = "37s"
|
||||
if err := conf.normalizeAddrs(); err != nil {
|
||||
t.Fatalf("error normalizing config: %v", err)
|
||||
}
|
||||
conf.Server.HeartbeatGrace = 37 * time.Second
|
||||
out, err = a.serverConfig()
|
||||
if threshold := out.HeartbeatGrace; threshold != time.Second*37 {
|
||||
t.Fatalf("expect 37s, got: %s", threshold)
|
||||
}
|
||||
|
||||
conf.Server.MinHeartbeatTTL = 37 * time.Second
|
||||
out, err = a.serverConfig()
|
||||
if min := out.MinHeartbeatTTL; min != time.Second*37 {
|
||||
t.Fatalf("expect 37s, got: %s", min)
|
||||
}
|
||||
|
||||
conf.Server.MaxHeartbeatsPerSecond = 11.0
|
||||
out, err = a.serverConfig()
|
||||
if max := out.MaxHeartbeatsPerSecond; max != 11.0 {
|
||||
t.Fatalf("expect 11, got: %v", max)
|
||||
}
|
||||
|
||||
// Defaults to the global bind addr
|
||||
conf.Addresses.RPC = ""
|
||||
conf.Addresses.Serf = ""
|
||||
|
@ -320,6 +268,7 @@ func TestAgent_ServerConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_ClientConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
conf := DefaultConfig()
|
||||
conf.Client.Enabled = true
|
||||
|
||||
|
@ -365,6 +314,7 @@ func TestAgent_ClientConfig(t *testing.T) {
|
|||
// TestAgent_HTTPCheck asserts Agent.agentHTTPCheck properly alters the HTTP
|
||||
// API health check depending on configuration.
|
||||
func TestAgent_HTTPCheck(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := log.New(ioutil.Discard, "", 0)
|
||||
if testing.Verbose() {
|
||||
logger = log.New(os.Stdout, "[TestAgent_HTTPCheck] ", log.Lshortfile)
|
||||
|
@ -455,6 +405,7 @@ func TestAgent_HTTPCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_ConsulSupportsTLSSkipVerify(t *testing.T) {
|
||||
t.Parallel()
|
||||
assertSupport := func(expected bool, blob string) {
|
||||
self := map[string]map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte("{"+blob+"}"), &self); err != nil {
|
||||
|
@ -561,6 +512,7 @@ func TestAgent_ConsulSupportsTLSSkipVerify(t *testing.T) {
|
|||
// TestAgent_HTTPCheckPath asserts clients and servers use different endpoints
|
||||
// for healthchecks.
|
||||
func TestAgent_HTTPCheckPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Agent.agentHTTPCheck only needs a config and logger
|
||||
a := &Agent{
|
||||
config: DevConfig(),
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_AllocsList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
alloc1 := mock.Alloc()
|
||||
|
@ -59,7 +60,8 @@ func TestHTTP_AllocsList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocsPrefixList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
|
||||
|
@ -118,7 +120,8 @@ func TestHTTP_AllocsPrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocQuery(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
alloc := mock.Alloc()
|
||||
|
@ -164,7 +167,8 @@ func TestHTTP_AllocQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocQuery_Payload(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
alloc := mock.Alloc()
|
||||
|
@ -220,7 +224,8 @@ func TestHTTP_AllocQuery_Payload(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocStats(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/client/allocation/123/foo", nil)
|
||||
if err != nil {
|
||||
|
@ -237,7 +242,8 @@ func TestHTTP_AllocStats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocSnapshot(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/client/allocation/123/snapshot", nil)
|
||||
if err != nil {
|
||||
|
@ -254,7 +260,8 @@ func TestHTTP_AllocSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocGC(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/client/allocation/123/gc", nil)
|
||||
if err != nil {
|
||||
|
@ -271,7 +278,8 @@ func TestHTTP_AllocGC(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_AllocAllGC(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/client/gc", nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/hashicorp/scada-client/scada"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
// gracefulTimeout controls how long we wait before forcefully terminating
|
||||
|
@ -408,6 +409,20 @@ func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Command) AutocompleteFlags() complete.Flags {
|
||||
configFilePredictor := complete.PredictOr(
|
||||
complete.PredictFiles("*.json"),
|
||||
complete.PredictFiles("*.hcl"))
|
||||
|
||||
return map[string]complete.Predictor{
|
||||
"-config": configFilePredictor,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Command) AutocompleteArgs() complete.Predictor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Command) Run(args []string) int {
|
||||
c.Ui = &cli.PrefixedUi{
|
||||
OutputPrefix: "==> ",
|
||||
|
@ -475,12 +490,6 @@ func (c *Command) Run(args []string) int {
|
|||
info["log level"] = config.LogLevel
|
||||
info["server"] = strconv.FormatBool(config.Server.Enabled)
|
||||
info["region"] = fmt.Sprintf("%s (DC: %s)", config.Region, config.Datacenter)
|
||||
if config.Atlas != nil && config.Atlas.Infrastructure != "" {
|
||||
info["atlas"] = fmt.Sprintf("(Infrastructure: '%s' Join: %v)",
|
||||
config.Atlas.Infrastructure, config.Atlas.Join)
|
||||
} else {
|
||||
info["atlas"] = "<disabled>"
|
||||
}
|
||||
|
||||
// Sort the keys for output
|
||||
infoKeys := make([]string, 0, len(info))
|
||||
|
|
|
@ -12,10 +12,12 @@ import (
|
|||
)
|
||||
|
||||
func TestCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &Command{}
|
||||
}
|
||||
|
||||
func TestCommand_Args(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpDir, err := ioutil.TempDir("", "nomad")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -75,9 +77,10 @@ func TestCommand_Args(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO Why is this failing
|
||||
func TestRetryJoin(t *testing.T) {
|
||||
dir, agent := makeAgent(t, nil)
|
||||
defer os.RemoveAll(dir)
|
||||
t.Parallel()
|
||||
agent := NewTestAgent(t.Name(), nil)
|
||||
defer agent.Shutdown()
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
|
@ -97,14 +100,11 @@ func TestRetryJoin(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
serfAddr := fmt.Sprintf(
|
||||
"%s:%d",
|
||||
agent.config.BindAddr,
|
||||
agent.config.Ports.Serf)
|
||||
serfAddr := agent.Config.normalizedAddrs.Serf
|
||||
|
||||
args := []string{
|
||||
"-dev",
|
||||
"-node", fmt.Sprintf(`"Node %d"`, getPort()),
|
||||
"-node", "foo",
|
||||
"-retry-join", serfAddr,
|
||||
"-retry-interval", "1s",
|
||||
}
|
||||
|
|
|
@ -73,6 +73,8 @@ server {
|
|||
eval_gc_threshold = "12h"
|
||||
deployment_gc_threshold = "12h"
|
||||
heartbeat_grace = "30s"
|
||||
min_heartbeat_ttl = "33s"
|
||||
max_heartbeats_per_second = 11.0
|
||||
retry_join = [ "1.1.1.1", "2.2.2.2" ]
|
||||
start_join = [ "1.1.1.1", "2.2.2.2" ]
|
||||
retry_max = 3
|
||||
|
|
|
@ -277,7 +277,16 @@ type ServerConfig struct {
|
|||
|
||||
// HeartbeatGrace is the grace period beyond the TTL to account for network,
|
||||
// processing delays and clock skew before marking a node as "down".
|
||||
HeartbeatGrace string `mapstructure:"heartbeat_grace"`
|
||||
HeartbeatGrace time.Duration `mapstructure:"heartbeat_grace"`
|
||||
|
||||
// MinHeartbeatTTL is the minimum time between heartbeats. This is used as
|
||||
// a floor to prevent excessive updates.
|
||||
MinHeartbeatTTL time.Duration `mapstructure:"min_heartbeat_ttl"`
|
||||
|
||||
// MaxHeartbeatsPerSecond is the maximum target rate of heartbeats
|
||||
// being processed per second. This allows the TTL to be increased
|
||||
// to meet the target rate.
|
||||
MaxHeartbeatsPerSecond float64 `mapstructure:"max_heartbeats_per_second"`
|
||||
|
||||
// StartJoin is a list of addresses to attempt to join when the
|
||||
// agent starts. If Serf is unable to communicate with any of these
|
||||
|
@ -765,7 +774,7 @@ func (c *Config) normalizeAddrs() error {
|
|||
|
||||
addr, err = normalizeAdvertise(c.AdvertiseAddrs.HTTP, c.Addresses.HTTP, c.Ports.HTTP, c.DevMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse HTTP advertise address: %v", err)
|
||||
return fmt.Errorf("Failed to parse HTTP advertise address (%v, %v, %v, %v): %v", c.AdvertiseAddrs.HTTP, c.Addresses.HTTP, c.Ports.HTTP, c.DevMode, err)
|
||||
}
|
||||
c.AdvertiseAddrs.HTTP = addr
|
||||
|
||||
|
@ -924,9 +933,15 @@ func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
|
|||
if b.DeploymentGCThreshold != "" {
|
||||
result.DeploymentGCThreshold = b.DeploymentGCThreshold
|
||||
}
|
||||
if b.HeartbeatGrace != "" {
|
||||
if b.HeartbeatGrace != 0 {
|
||||
result.HeartbeatGrace = b.HeartbeatGrace
|
||||
}
|
||||
if b.MinHeartbeatTTL != 0 {
|
||||
result.MinHeartbeatTTL = b.MinHeartbeatTTL
|
||||
}
|
||||
if b.MaxHeartbeatsPerSecond != 0.0 {
|
||||
result.MaxHeartbeatsPerSecond = b.MaxHeartbeatsPerSecond
|
||||
}
|
||||
if b.RetryMaxAttempts != 0 {
|
||||
result.RetryMaxAttempts = b.RetryMaxAttempts
|
||||
}
|
||||
|
|
|
@ -506,6 +506,8 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
|||
"job_gc_threshold",
|
||||
"deployment_gc_threshold",
|
||||
"heartbeat_grace",
|
||||
"min_heartbeat_ttl",
|
||||
"max_heartbeats_per_second",
|
||||
"start_join",
|
||||
"retry_join",
|
||||
"retry_max",
|
||||
|
@ -523,7 +525,15 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
|||
}
|
||||
|
||||
var config ServerConfig
|
||||
if err := mapstructure.WeakDecode(m, &config); err != nil {
|
||||
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||
WeaklyTypedInput: true,
|
||||
Result: &config,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dec.Decode(m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
File string
|
||||
Result *Config
|
||||
|
@ -82,23 +83,25 @@ func TestConfig_Parse(t *testing.T) {
|
|||
NoHostUUID: helper.BoolToPtr(false),
|
||||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: true,
|
||||
BootstrapExpect: 5,
|
||||
DataDir: "/tmp/data",
|
||||
ProtocolVersion: 3,
|
||||
NumSchedulers: 2,
|
||||
EnabledSchedulers: []string{"test"},
|
||||
NodeGCThreshold: "12h",
|
||||
EvalGCThreshold: "12h",
|
||||
JobGCThreshold: "12h",
|
||||
DeploymentGCThreshold: "12h",
|
||||
HeartbeatGrace: "30s",
|
||||
RetryJoin: []string{"1.1.1.1", "2.2.2.2"},
|
||||
StartJoin: []string{"1.1.1.1", "2.2.2.2"},
|
||||
RetryInterval: "15s",
|
||||
RejoinAfterLeave: true,
|
||||
RetryMaxAttempts: 3,
|
||||
EncryptKey: "abc",
|
||||
Enabled: true,
|
||||
BootstrapExpect: 5,
|
||||
DataDir: "/tmp/data",
|
||||
ProtocolVersion: 3,
|
||||
NumSchedulers: 2,
|
||||
EnabledSchedulers: []string{"test"},
|
||||
NodeGCThreshold: "12h",
|
||||
EvalGCThreshold: "12h",
|
||||
JobGCThreshold: "12h",
|
||||
DeploymentGCThreshold: "12h",
|
||||
HeartbeatGrace: 30 * time.Second,
|
||||
MinHeartbeatTTL: 33 * time.Second,
|
||||
MaxHeartbeatsPerSecond: 11.0,
|
||||
RetryJoin: []string{"1.1.1.1", "2.2.2.2"},
|
||||
StartJoin: []string{"1.1.1.1", "2.2.2.2"},
|
||||
RetryInterval: "15s",
|
||||
RejoinAfterLeave: true,
|
||||
RetryMaxAttempts: 3,
|
||||
EncryptKey: "abc",
|
||||
},
|
||||
Telemetry: &Telemetry{
|
||||
StatsiteAddr: "127.0.0.1:1234",
|
||||
|
|
|
@ -90,13 +90,15 @@ func TestConfig_Merge(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: false,
|
||||
BootstrapExpect: 1,
|
||||
DataDir: "/tmp/data1",
|
||||
ProtocolVersion: 1,
|
||||
NumSchedulers: 1,
|
||||
NodeGCThreshold: "1h",
|
||||
HeartbeatGrace: "30s",
|
||||
Enabled: false,
|
||||
BootstrapExpect: 1,
|
||||
DataDir: "/tmp/data1",
|
||||
ProtocolVersion: 1,
|
||||
NumSchedulers: 1,
|
||||
NodeGCThreshold: "1h",
|
||||
HeartbeatGrace: 30 * time.Second,
|
||||
MinHeartbeatTTL: 30 * time.Second,
|
||||
MaxHeartbeatsPerSecond: 30.0,
|
||||
},
|
||||
Ports: &Ports{
|
||||
HTTP: 4646,
|
||||
|
@ -220,19 +222,21 @@ func TestConfig_Merge(t *testing.T) {
|
|||
GCInodeUsageThreshold: 86,
|
||||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: true,
|
||||
BootstrapExpect: 2,
|
||||
DataDir: "/tmp/data2",
|
||||
ProtocolVersion: 2,
|
||||
NumSchedulers: 2,
|
||||
EnabledSchedulers: []string{structs.JobTypeBatch},
|
||||
NodeGCThreshold: "12h",
|
||||
HeartbeatGrace: "2m",
|
||||
RejoinAfterLeave: true,
|
||||
StartJoin: []string{"1.1.1.1"},
|
||||
RetryJoin: []string{"1.1.1.1"},
|
||||
RetryInterval: "10s",
|
||||
retryInterval: time.Second * 10,
|
||||
Enabled: true,
|
||||
BootstrapExpect: 2,
|
||||
DataDir: "/tmp/data2",
|
||||
ProtocolVersion: 2,
|
||||
NumSchedulers: 2,
|
||||
EnabledSchedulers: []string{structs.JobTypeBatch},
|
||||
NodeGCThreshold: "12h",
|
||||
HeartbeatGrace: 2 * time.Minute,
|
||||
MinHeartbeatTTL: 2 * time.Minute,
|
||||
MaxHeartbeatsPerSecond: 200.0,
|
||||
RejoinAfterLeave: true,
|
||||
StartJoin: []string{"1.1.1.1"},
|
||||
RetryJoin: []string{"1.1.1.1"},
|
||||
RetryInterval: "10s",
|
||||
retryInterval: time.Second * 10,
|
||||
},
|
||||
Ports: &Ports{
|
||||
HTTP: 20000,
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
|
@ -115,6 +116,10 @@ type ServiceClient struct {
|
|||
agentServices map[string]struct{}
|
||||
agentChecks map[string]struct{}
|
||||
agentLock sync.Mutex
|
||||
|
||||
// seen is 1 if Consul has ever been seen; otherise 0. Accessed with
|
||||
// atomics.
|
||||
seen int64
|
||||
}
|
||||
|
||||
// NewServiceClient creates a new Consul ServiceClient from an existing Consul API
|
||||
|
@ -139,6 +144,21 @@ func NewServiceClient(consulClient AgentAPI, skipVerifySupport bool, logger *log
|
|||
}
|
||||
}
|
||||
|
||||
// seen is used by markSeen and hasSeen
|
||||
const seen = 1
|
||||
|
||||
// markSeen marks Consul as having been seen (meaning at least one operation
|
||||
// has succeeded).
|
||||
func (c *ServiceClient) markSeen() {
|
||||
atomic.StoreInt64(&c.seen, seen)
|
||||
}
|
||||
|
||||
// hasSeen returns true if any Consul operation has ever succeeded. Useful to
|
||||
// squelch errors if Consul isn't running.
|
||||
func (c *ServiceClient) hasSeen() bool {
|
||||
return atomic.LoadInt64(&c.seen) == seen
|
||||
}
|
||||
|
||||
// Run the Consul main loop which retries operations against Consul. It should
|
||||
// be called exactly once.
|
||||
func (c *ServiceClient) Run() {
|
||||
|
@ -336,6 +356,9 @@ func (c *ServiceClient) sync() error {
|
|||
}
|
||||
}
|
||||
|
||||
// A Consul operation has succeeded, mark Consul as having been seen
|
||||
c.markSeen()
|
||||
|
||||
c.logger.Printf("[DEBUG] consul.sync: registered %d services, %d checks; deregistered %d services, %d checks",
|
||||
sreg, creg, sdereg, cdereg)
|
||||
return nil
|
||||
|
@ -648,29 +671,14 @@ func (c *ServiceClient) Shutdown() error {
|
|||
// Serialize Shutdown calls with RegisterAgent to prevent leaking agent
|
||||
// entries.
|
||||
c.agentLock.Lock()
|
||||
defer c.agentLock.Unlock()
|
||||
select {
|
||||
case <-c.shutdownCh:
|
||||
return nil
|
||||
default:
|
||||
close(c.shutdownCh)
|
||||
}
|
||||
|
||||
// Deregister Nomad agent Consul entries before closing shutdown.
|
||||
ops := operations{}
|
||||
for id := range c.agentServices {
|
||||
ops.deregServices = append(ops.deregServices, id)
|
||||
}
|
||||
for id := range c.agentChecks {
|
||||
ops.deregChecks = append(ops.deregChecks, id)
|
||||
}
|
||||
c.commit(&ops)
|
||||
|
||||
// Then signal shutdown
|
||||
close(c.shutdownCh)
|
||||
|
||||
// Safe to unlock after shutdownCh closed as RegisterAgent will check
|
||||
// shutdownCh before committing.
|
||||
c.agentLock.Unlock()
|
||||
|
||||
// Give run loop time to sync, but don't block indefinitely
|
||||
deadline := time.After(c.shutdownWait)
|
||||
|
||||
|
@ -679,7 +687,24 @@ func (c *ServiceClient) Shutdown() error {
|
|||
case <-c.exitCh:
|
||||
case <-deadline:
|
||||
// Don't wait forever though
|
||||
return fmt.Errorf("timed out waiting for Consul operations to complete")
|
||||
}
|
||||
|
||||
// If Consul was never seen nothing could be written so exit early
|
||||
if !c.hasSeen() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Always attempt to deregister Nomad agent Consul entries, even if
|
||||
// deadline was reached
|
||||
for id := range c.agentServices {
|
||||
if err := c.client.ServiceDeregister(id); err != nil {
|
||||
c.logger.Printf("[ERR] consul.sync: error deregistering agent service (id: %q): %v", id, err)
|
||||
}
|
||||
}
|
||||
for id := range c.agentChecks {
|
||||
if err := c.client.CheckDeregister(id); err != nil {
|
||||
c.logger.Printf("[ERR] consul.sync: error deregistering agent service (id: %q): %v", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Give script checks time to exit (no need to lock as Run() has exited)
|
||||
|
@ -779,7 +804,9 @@ func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host
|
|||
}
|
||||
|
||||
// isNomadService returns true if the ID matches the pattern of a Nomad managed
|
||||
// service.
|
||||
// service. Agent services return false as independent client and server agents
|
||||
// may be running on the same machine. #2827
|
||||
func isNomadService(id string) bool {
|
||||
return strings.HasPrefix(id, nomadServicePrefix)
|
||||
const prefix = nomadServicePrefix + "-executor"
|
||||
return strings.HasPrefix(id, prefix)
|
||||
}
|
||||
|
|
|
@ -4,9 +4,7 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -39,11 +37,6 @@ func TestConsul_Integration(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("-short set; skipping")
|
||||
}
|
||||
if runtime.GOOS != "windows" {
|
||||
if u, err := user.Current(); err == nil && u.Uid != "0" {
|
||||
t.Skip("Must be run as root")
|
||||
}
|
||||
}
|
||||
// Create an embedded Consul server
|
||||
testconsul, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
|
||||
// If -v wasn't specified squelch consul logging
|
||||
|
|
|
@ -36,6 +36,7 @@ func newBlockingScriptExec() *blockingScriptExec {
|
|||
func (b *blockingScriptExec) Exec(ctx context.Context, _ string, _ []string) ([]byte, int, error) {
|
||||
b.running <- struct{}{}
|
||||
cmd := exec.CommandContext(ctx, testtask.Path(), "sleep", "9000h")
|
||||
testtask.SetCmdEnv(cmd)
|
||||
err := cmd.Run()
|
||||
code := 0
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
|
|
|
@ -1231,3 +1231,27 @@ func TestConsul_DriverNetwork_Change(t *testing.T) {
|
|||
|
||||
syncAndAssertPort(net.PortMap["x"])
|
||||
}
|
||||
|
||||
// TestIsNomadService asserts the isNomadService helper returns true for Nomad
|
||||
// task IDs and false for unknown IDs and Nomad agent IDs (see #2827).
|
||||
func TestIsNomadService(t *testing.T) {
|
||||
tests := []struct {
|
||||
id string
|
||||
result bool
|
||||
}{
|
||||
{"_nomad-client-nomad-client-http", false},
|
||||
{"_nomad-server-nomad-serf", false},
|
||||
{"_nomad-executor-abc", true},
|
||||
{"_nomad-executor", true},
|
||||
{"not-nomad", false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.id, func(t *testing.T) {
|
||||
actual := isNomadService(test.id)
|
||||
if actual != test.result {
|
||||
t.Errorf("%q should be %t but found %t", test.id, test.result, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,8 +11,9 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_DeploymentList(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
d1 := mock.Deployment()
|
||||
|
@ -41,8 +42,9 @@ func TestHTTP_DeploymentList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentPrefixList(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
d1 := mock.Deployment()
|
||||
|
@ -74,8 +76,9 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentAllocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
j := mock.Job()
|
||||
|
@ -112,8 +115,9 @@ func TestHTTP_DeploymentAllocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
d := mock.Deployment()
|
||||
|
@ -140,8 +144,9 @@ func TestHTTP_DeploymentQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentPause(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
j := mock.Job()
|
||||
|
@ -177,8 +182,9 @@ func TestHTTP_DeploymentPause(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentPromote(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
j := mock.Job()
|
||||
|
@ -214,8 +220,9 @@ func TestHTTP_DeploymentPromote(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentAllocHealth(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
j := mock.Job()
|
||||
|
@ -255,8 +262,9 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_DeploymentFail(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
j := mock.Job()
|
||||
|
|
|
@ -10,7 +10,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_EvalList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
eval1 := mock.Eval()
|
||||
|
@ -54,7 +55,8 @@ func TestHTTP_EvalList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_EvalPrefixList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
eval1 := mock.Eval()
|
||||
|
@ -105,7 +107,8 @@ func TestHTTP_EvalPrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_EvalAllocations(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
alloc1 := mock.Alloc()
|
||||
|
@ -153,7 +156,8 @@ func TestHTTP_EvalAllocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_EvalQuery(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Directly manipulate the state
|
||||
state := s.Agent.server.State()
|
||||
eval := mock.Eval()
|
||||
|
|
|
@ -25,7 +25,8 @@ import (
|
|||
)
|
||||
|
||||
func TestAllocDirFS_List_MissingParams(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/fs/ls/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -40,7 +41,8 @@ func TestAllocDirFS_List_MissingParams(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDirFS_Stat_MissingParams(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/fs/stat/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -67,7 +69,8 @@ func TestAllocDirFS_Stat_MissingParams(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDirFS_ReadAt_MissingParams(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/fs/readat/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -500,7 +503,8 @@ func TestStreamFramer_Order_PlainText(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Stream_MissingParams(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/fs/stream/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -560,7 +564,8 @@ func (n nopWriteCloser) Close() error {
|
|||
}
|
||||
|
||||
func TestHTTP_Stream_NoFile(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -576,7 +581,8 @@ func TestHTTP_Stream_NoFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Stream_Modify(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -651,7 +657,8 @@ func TestHTTP_Stream_Modify(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Stream_Truncate(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -760,7 +767,8 @@ func TestHTTP_Stream_Truncate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Stream_Delete(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -842,7 +850,8 @@ func TestHTTP_Stream_Delete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Logs_NoFollow(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir and create the log dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -923,7 +932,8 @@ func TestHTTP_Logs_NoFollow(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_Logs_Follow(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Get a temp alloc dir and create the log dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
@ -1029,7 +1039,7 @@ func BenchmarkHTTP_Logs_Follow(t *testing.B) {
|
|||
runtime.MemProfileRate = 1
|
||||
|
||||
s := makeHTTPServer(t, nil)
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
testutil.WaitForLeader(t, s.Agent.RPC)
|
||||
|
||||
// Get a temp alloc dir and create the log dir
|
||||
|
|
|
@ -35,7 +35,7 @@ type HTTPServer struct {
|
|||
mux *http.ServeMux
|
||||
listener net.Listener
|
||||
logger *log.Logger
|
||||
addr string
|
||||
Addr string
|
||||
}
|
||||
|
||||
// NewHTTPServer starts new HTTP server over the agent
|
||||
|
@ -76,7 +76,7 @@ func NewHTTPServer(agent *Agent, config *Config) (*HTTPServer, error) {
|
|||
mux: mux,
|
||||
listener: ln,
|
||||
logger: agent.logger,
|
||||
addr: ln.Addr().String(),
|
||||
Addr: ln.Addr().String(),
|
||||
}
|
||||
srv.registerHandlers(config.EnableDebug)
|
||||
|
||||
|
@ -97,7 +97,7 @@ func newScadaHttp(agent *Agent, list net.Listener) *HTTPServer {
|
|||
mux: mux,
|
||||
listener: list,
|
||||
logger: agent.logger,
|
||||
addr: scadaHTTPAddr,
|
||||
Addr: scadaHTTPAddr,
|
||||
}
|
||||
srv.registerHandlers(false) // Never allow debug for SCADA
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -23,41 +22,17 @@ import (
|
|||
"github.com/hashicorp/nomad/testutil"
|
||||
)
|
||||
|
||||
type TestServer struct {
|
||||
T testing.TB
|
||||
Dir string
|
||||
Agent *Agent
|
||||
Server *HTTPServer
|
||||
}
|
||||
|
||||
func (s *TestServer) Cleanup() {
|
||||
s.Server.Shutdown()
|
||||
s.Agent.Shutdown()
|
||||
os.RemoveAll(s.Dir)
|
||||
}
|
||||
|
||||
// makeHTTPServer returns a test server whose logs will be written to
|
||||
// the passed writer. If the writer is nil, the logs are written to stderr.
|
||||
func makeHTTPServer(t testing.TB, cb func(c *Config)) *TestServer {
|
||||
dir, agent := makeAgent(t, cb)
|
||||
srv, err := NewHTTPServer(agent, agent.config)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
s := &TestServer{
|
||||
T: t,
|
||||
Dir: dir,
|
||||
Agent: agent,
|
||||
Server: srv,
|
||||
}
|
||||
return s
|
||||
func makeHTTPServer(t testing.TB, cb func(c *Config)) *TestAgent {
|
||||
return NewTestAgent(t.Name(), cb)
|
||||
}
|
||||
|
||||
func BenchmarkHTTPRequests(b *testing.B) {
|
||||
s := makeHTTPServer(b, func(c *Config) {
|
||||
c.Client.Enabled = false
|
||||
})
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
job := mock.Job()
|
||||
var allocs []*structs.Allocation
|
||||
|
@ -85,6 +60,7 @@ func BenchmarkHTTPRequests(b *testing.B) {
|
|||
}
|
||||
|
||||
func TestSetIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
setIndex(resp, 1000)
|
||||
header := resp.Header().Get("X-Nomad-Index")
|
||||
|
@ -98,6 +74,7 @@ func TestSetIndex(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetKnownLeader(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
setKnownLeader(resp, true)
|
||||
header := resp.Header().Get("X-Nomad-KnownLeader")
|
||||
|
@ -113,6 +90,7 @@ func TestSetKnownLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetLastContact(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
setLastContact(resp, 123456*time.Microsecond)
|
||||
header := resp.Header().Get("X-Nomad-LastContact")
|
||||
|
@ -122,6 +100,7 @@ func TestSetLastContact(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
meta := structs.QueryMeta{
|
||||
Index: 1000,
|
||||
KnownLeader: true,
|
||||
|
@ -144,9 +123,10 @@ func TestSetMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetHeaders(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := makeHTTPServer(t, nil)
|
||||
s.Agent.config.HTTPAPIResponseHeaders = map[string]string{"foo": "bar"}
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
|
@ -164,8 +144,9 @@ func TestSetHeaders(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestContentTypeIsJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := makeHTTPServer(t, nil)
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
|
@ -184,20 +165,23 @@ func TestContentTypeIsJSON(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPrettyPrint(t *testing.T) {
|
||||
t.Parallel()
|
||||
testPrettyPrint("pretty=1", true, t)
|
||||
}
|
||||
|
||||
func TestPrettyPrintOff(t *testing.T) {
|
||||
t.Parallel()
|
||||
testPrettyPrint("pretty=0", false, t)
|
||||
}
|
||||
|
||||
func TestPrettyPrintBare(t *testing.T) {
|
||||
t.Parallel()
|
||||
testPrettyPrint("pretty", true, t)
|
||||
}
|
||||
|
||||
func testPrettyPrint(pretty string, prettyFmt bool, t *testing.T) {
|
||||
s := makeHTTPServer(t, nil)
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
r := &structs.Job{Name: "foo"}
|
||||
|
||||
|
@ -228,6 +212,7 @@ func testPrettyPrint(pretty string, prettyFmt bool, t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseWait(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
var b structs.QueryOptions
|
||||
|
||||
|
@ -250,6 +235,7 @@ func TestParseWait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseWait_InvalidTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
var b structs.QueryOptions
|
||||
|
||||
|
@ -269,6 +255,7 @@ func TestParseWait_InvalidTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseWait_InvalidIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp := httptest.NewRecorder()
|
||||
var b structs.QueryOptions
|
||||
|
||||
|
@ -288,6 +275,7 @@ func TestParseWait_InvalidIndex(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseConsistency(t *testing.T) {
|
||||
t.Parallel()
|
||||
var b structs.QueryOptions
|
||||
|
||||
req, err := http.NewRequest("GET",
|
||||
|
@ -315,8 +303,9 @@ func TestParseConsistency(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseRegion(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := makeHTTPServer(t, nil)
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
req, err := http.NewRequest("GET",
|
||||
"/v1/jobs?region=foo", nil)
|
||||
|
@ -345,6 +334,7 @@ func TestParseRegion(t *testing.T) {
|
|||
// TestHTTP_VerifyHTTPSClient asserts that a client certificate signed by the
|
||||
// appropriate CA is required when VerifyHTTPSClient=true.
|
||||
func TestHTTP_VerifyHTTPSClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
cafile = "../../helper/tlsutil/testdata/ca.pem"
|
||||
foocert = "../../helper/tlsutil/testdata/nomad-foo.pem"
|
||||
|
@ -360,7 +350,7 @@ func TestHTTP_VerifyHTTPSClient(t *testing.T) {
|
|||
KeyFile: fookey,
|
||||
}
|
||||
})
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
|
||||
reqURL := fmt.Sprintf("https://%s/v1/agent/self", s.Agent.config.AdvertiseAddrs.HTTP)
|
||||
|
||||
|
@ -492,9 +482,9 @@ func getIndex(t *testing.T, resp *httptest.ResponseRecorder) uint64 {
|
|||
return uint64(val)
|
||||
}
|
||||
|
||||
func httpTest(t testing.TB, cb func(c *Config), f func(srv *TestServer)) {
|
||||
func httpTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
|
||||
s := makeHTTPServer(t, cb)
|
||||
defer s.Cleanup()
|
||||
defer s.Shutdown()
|
||||
testutil.WaitForLeader(t, s.Agent.RPC)
|
||||
f(s)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_JobsList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
for i := 0; i < 3; i++ {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
|
@ -70,7 +71,8 @@ func TestHTTP_PrefixJobsList(t *testing.T) {
|
|||
"aabbbbbb-e8f7-fd38-c855-ab94ceb89706",
|
||||
"aabbcccc-e8f7-fd38-c855-ab94ceb89706",
|
||||
}
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
for i := 0; i < 3; i++ {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
|
@ -119,7 +121,8 @@ func TestHTTP_PrefixJobsList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobsRegister(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
|
@ -169,7 +172,8 @@ func TestHTTP_JobsRegister(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
|
||||
|
@ -226,7 +230,8 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobQuery(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -271,7 +276,8 @@ func TestHTTP_JobQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobQuery_Payload(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
|
||||
|
@ -324,7 +330,8 @@ func TestHTTP_JobQuery_Payload(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobUpdate(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
|
@ -374,7 +381,8 @@ func TestHTTP_JobUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobDelete(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -466,7 +474,8 @@ func TestHTTP_JobDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobForceEvaluate(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -505,7 +514,8 @@ func TestHTTP_JobForceEvaluate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobEvaluations(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -552,7 +562,8 @@ func TestHTTP_JobEvaluations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobAllocations(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
alloc1 := mock.Alloc()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -605,7 +616,8 @@ func TestHTTP_JobAllocations(t *testing.T) {
|
|||
|
||||
func TestHTTP_JobDeployments(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
j := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -643,7 +655,8 @@ func TestHTTP_JobDeployments(t *testing.T) {
|
|||
|
||||
func TestHTTP_JobDeployment(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
j := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -680,7 +693,8 @@ func TestHTTP_JobDeployment(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobVersions(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := mock.Job()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -751,7 +765,8 @@ func TestHTTP_JobVersions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_PeriodicForce(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create and register a periodic job.
|
||||
job := mock.PeriodicJob()
|
||||
args := structs.JobRegisterRequest{
|
||||
|
@ -790,7 +805,8 @@ func TestHTTP_PeriodicForce(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobPlan(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
args := api.JobPlanRequest{
|
||||
|
@ -826,7 +842,8 @@ func TestHTTP_JobPlan(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobDispatch(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the parameterized job
|
||||
job := mock.Job()
|
||||
job.Type = "batch"
|
||||
|
@ -874,7 +891,8 @@ func TestHTTP_JobDispatch(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobRevert(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job and register it twice
|
||||
job := mock.Job()
|
||||
regReq := structs.JobRegisterRequest{
|
||||
|
@ -926,7 +944,8 @@ func TestHTTP_JobRevert(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_JobStable(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job and register it twice
|
||||
job := mock.Job()
|
||||
regReq := structs.JobRegisterRequest{
|
||||
|
|
|
@ -9,11 +9,11 @@ import (
|
|||
)
|
||||
|
||||
func TestAgent_LoadKeyrings(t *testing.T) {
|
||||
t.Parallel()
|
||||
key := "tbLJg26ZJyJ9pK3qhc9jig=="
|
||||
|
||||
// Should be no configured keyring file by default
|
||||
dir1, agent1 := makeAgent(t, nil)
|
||||
defer os.RemoveAll(dir1)
|
||||
agent1 := NewTestAgent(t.Name(), nil)
|
||||
defer agent1.Shutdown()
|
||||
|
||||
c := agent1.server.GetConfig()
|
||||
|
@ -24,14 +24,12 @@ func TestAgent_LoadKeyrings(t *testing.T) {
|
|||
t.Fatalf("keyring should not be loaded")
|
||||
}
|
||||
|
||||
// Server should auto-load LAN and WAN keyring files
|
||||
dir2, agent2 := makeAgent(t, func(c *Config) {
|
||||
file := filepath.Join(c.DataDir, serfKeyring)
|
||||
if err := initKeyring(file, key); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
})
|
||||
defer os.RemoveAll(dir2)
|
||||
// Server should auto-load WAN keyring files
|
||||
agent2 := &TestAgent{
|
||||
Name: t.Name() + "2",
|
||||
Key: key,
|
||||
}
|
||||
agent2.Start()
|
||||
defer agent2.Shutdown()
|
||||
|
||||
c = agent2.server.GetConfig()
|
||||
|
@ -44,6 +42,7 @@ func TestAgent_LoadKeyrings(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgent_InitKeyring(t *testing.T) {
|
||||
t.Parallel()
|
||||
key1 := "tbLJg26ZJyJ9pK3qhc9jig=="
|
||||
key2 := "4leC33rgtXKIVUr9Nr0snQ=="
|
||||
expected := fmt.Sprintf(`["%s"]`, key1)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
func TestLevelFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
filt := LevelFilter()
|
||||
filt.Levels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"}
|
||||
|
|
|
@ -13,6 +13,7 @@ func (m *MockLogHandler) HandleLog(l string) {
|
|||
}
|
||||
|
||||
func TestLogWriter(t *testing.T) {
|
||||
t.Parallel()
|
||||
h := &MockLogHandler{}
|
||||
w := NewLogWriter(4)
|
||||
|
||||
|
|
|
@ -10,7 +10,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_NodesList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
for i := 0; i < 3; i++ {
|
||||
// Create the node
|
||||
node := mock.Node()
|
||||
|
@ -57,7 +58,8 @@ func TestHTTP_NodesList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_NodesPrefixList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
ids := []string{
|
||||
"12345678-abcd-efab-cdef-123456789abc",
|
||||
"12345678-aaaa-efab-cdef-123456789abc",
|
||||
|
@ -113,7 +115,8 @@ func TestHTTP_NodesPrefixList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_NodeForceEval(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the node
|
||||
node := mock.Node()
|
||||
args := structs.NodeRegisterRequest{
|
||||
|
@ -164,7 +167,8 @@ func TestHTTP_NodeForceEval(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_NodeAllocations(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
node := mock.Node()
|
||||
args := structs.NodeRegisterRequest{
|
||||
|
@ -221,7 +225,8 @@ func TestHTTP_NodeAllocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_NodeDrain(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the node
|
||||
node := mock.Node()
|
||||
args := structs.NodeRegisterRequest{
|
||||
|
@ -272,7 +277,8 @@ func TestHTTP_NodeDrain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_NodeQuery(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
node := mock.Node()
|
||||
args := structs.NodeRegisterRequest{
|
||||
|
|
|
@ -11,7 +11,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_OperatorRaftConfiguration(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
body := bytes.NewBuffer(nil)
|
||||
req, err := http.NewRequest("GET", "/v1/operator/raft/configuration", body)
|
||||
if err != nil {
|
||||
|
@ -39,7 +40,8 @@ func TestHTTP_OperatorRaftConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_OperatorRaftPeer(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
body := bytes.NewBuffer(nil)
|
||||
req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?address=nope", body)
|
||||
if err != nil {
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_RegionList(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/regions", nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
)
|
||||
|
||||
func TestClientStatsRequest(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/stats/?since=foo", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_StatusLeader(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/status/leader", nil)
|
||||
if err != nil {
|
||||
|
@ -29,7 +30,8 @@ func TestHTTP_StatusLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_StatusPeers(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("GET", "/v1/status/peers", nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package agent
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/hashicorp/go-syslog"
|
||||
"github.com/hashicorp/logutils"
|
||||
)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
)
|
||||
|
||||
func TestSyslogFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Syslog not supported on Windows")
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
)
|
||||
|
||||
func TestHTTP_SystemGarbageCollect(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("PUT", "/v1/system/gc", nil)
|
||||
if err != nil {
|
||||
|
@ -23,7 +24,8 @@ func TestHTTP_SystemGarbageCollect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTP_ReconcileJobSummaries(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Make the HTTP request
|
||||
req, err := http.NewRequest("PUT", "/v1/system/reconcile/summaries", nil)
|
||||
if err != nil {
|
||||
|
|
293
command/agent/testagent.go
Normal file
293
command/agent/testagent.go
Normal file
|
@ -0,0 +1,293 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/client/fingerprint"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano()) // seed random number generator
|
||||
}
|
||||
|
||||
// TempDir defines the base dir for temporary directories.
|
||||
var TempDir = os.TempDir()
|
||||
|
||||
// TestAgent encapsulates an Agent with a default configuration and
|
||||
// startup procedure suitable for testing. It panics if there are errors
|
||||
// during creation or startup instead of returning errors. It manages a
|
||||
// temporary data directory which is removed after shutdown.
|
||||
type TestAgent struct {
|
||||
// Name is an optional name of the agent.
|
||||
Name string
|
||||
|
||||
// ConfigCallback is an optional callback that allows modification of the
|
||||
// configuration before the agent is started.
|
||||
ConfigCallback func(*Config)
|
||||
|
||||
// Config is the agent configuration. If Config is nil then
|
||||
// TestConfig() is used. If Config.DataDir is set then it is
|
||||
// the callers responsibility to clean up the data directory.
|
||||
// Otherwise, a temporary data directory is created and removed
|
||||
// when Shutdown() is called.
|
||||
Config *Config
|
||||
|
||||
// LogOutput is the sink for the logs. If nil, logs are written
|
||||
// to os.Stderr.
|
||||
LogOutput io.Writer
|
||||
|
||||
// DataDir is the data directory which is used when Config.DataDir
|
||||
// is not set. It is created automatically and removed when
|
||||
// Shutdown() is called.
|
||||
DataDir string
|
||||
|
||||
// Key is the optional encryption key for the keyring.
|
||||
Key string
|
||||
|
||||
// Server is a reference to the started HTTP endpoint.
|
||||
// It is valid after Start().
|
||||
Server *HTTPServer
|
||||
|
||||
// Agent is the embedded Nomad agent.
|
||||
// It is valid after Start().
|
||||
*Agent
|
||||
}
|
||||
|
||||
// NewTestAgent returns a started agent with the given name and
|
||||
// configuration. It panics if the agent could not be started. The
|
||||
// caller should call Shutdown() to stop the agent and remove temporary
|
||||
// directories.
|
||||
func NewTestAgent(name string, configCallback func(*Config)) *TestAgent {
|
||||
a := &TestAgent{Name: name, ConfigCallback: configCallback}
|
||||
a.Start()
|
||||
return a
|
||||
}
|
||||
|
||||
// Start starts a test agent. It panics if the agent could not be started.
|
||||
func (a *TestAgent) Start() *TestAgent {
|
||||
if a.Agent != nil {
|
||||
panic("TestAgent already started")
|
||||
}
|
||||
if a.Config == nil {
|
||||
a.Config = a.config()
|
||||
}
|
||||
if a.Config.DataDir == "" {
|
||||
name := "agent"
|
||||
if a.Name != "" {
|
||||
name = a.Name + "-agent"
|
||||
}
|
||||
name = strings.Replace(name, "/", "_", -1)
|
||||
d, err := ioutil.TempDir(TempDir, name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error creating data dir %s: %s", filepath.Join(TempDir, name), err))
|
||||
}
|
||||
a.DataDir = d
|
||||
a.Config.DataDir = d
|
||||
a.Config.NomadConfig.DataDir = d
|
||||
}
|
||||
|
||||
for i := 10; i >= 0; i-- {
|
||||
pickRandomPorts(a.Config)
|
||||
if a.Config.NodeName == "" {
|
||||
a.Config.NodeName = fmt.Sprintf("Node %d", a.Config.Ports.RPC)
|
||||
}
|
||||
|
||||
// write the keyring
|
||||
if a.Key != "" {
|
||||
writeKey := func(key, filename string) {
|
||||
path := filepath.Join(a.Config.DataDir, filename)
|
||||
if err := initKeyring(path, key); err != nil {
|
||||
panic(fmt.Sprintf("Error creating keyring %s: %s", path, err))
|
||||
}
|
||||
}
|
||||
writeKey(a.Key, serfKeyring)
|
||||
}
|
||||
|
||||
// we need the err var in the next exit condition
|
||||
if agent, err := a.start(); err == nil {
|
||||
a.Agent = agent
|
||||
break
|
||||
} else if i == 0 {
|
||||
fmt.Println(a.Name, "Error starting agent:", err)
|
||||
runtime.Goexit()
|
||||
} else {
|
||||
if agent != nil {
|
||||
agent.Shutdown()
|
||||
}
|
||||
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
|
||||
fmt.Println(a.Name, "retrying in", wait)
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
||||
// Clean out the data dir if we are responsible for it before we
|
||||
// try again, since the old ports may have gotten written to
|
||||
// the data dir, such as in the Raft configuration.
|
||||
if a.DataDir != "" {
|
||||
if err := os.RemoveAll(a.DataDir); err != nil {
|
||||
fmt.Println(a.Name, "Error resetting data dir:", err)
|
||||
runtime.Goexit()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.NomadConfig.Bootstrap && a.Config.Server.Enabled {
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
args := &structs.GenericRequest{}
|
||||
var leader string
|
||||
err := a.RPC("Status.Leader", args, &leader)
|
||||
return leader != "", err
|
||||
}, func(err error) {
|
||||
panic(fmt.Sprintf("failed to find leader: %v", err))
|
||||
})
|
||||
} else {
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/self", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
_, err := a.Server.AgentSelfRequest(resp, req)
|
||||
return err == nil && resp.Code == 200, err
|
||||
}, func(err error) {
|
||||
panic(fmt.Sprintf("failed OK response: %v", err))
|
||||
})
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *TestAgent) start() (*Agent, error) {
|
||||
if a.LogOutput == nil {
|
||||
a.LogOutput = os.Stderr
|
||||
}
|
||||
|
||||
agent, err := NewAgent(a.Config, a.LogOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup the HTTP server
|
||||
http, err := NewHTTPServer(agent, a.Config)
|
||||
if err != nil {
|
||||
return agent, err
|
||||
}
|
||||
|
||||
a.Server = http
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
// Shutdown stops the agent and removes the data directory if it is
|
||||
// managed by the test agent.
|
||||
func (a *TestAgent) Shutdown() error {
|
||||
defer func() {
|
||||
if a.DataDir != "" {
|
||||
os.RemoveAll(a.DataDir)
|
||||
}
|
||||
}()
|
||||
|
||||
// shutdown agent before endpoints
|
||||
a.Server.Shutdown()
|
||||
return a.Agent.Shutdown()
|
||||
}
|
||||
|
||||
func (a *TestAgent) HTTPAddr() string {
|
||||
if a.Server == nil {
|
||||
return ""
|
||||
}
|
||||
return "http://" + a.Server.Addr
|
||||
}
|
||||
|
||||
func (a *TestAgent) Client() *api.Client {
|
||||
conf := api.DefaultConfig()
|
||||
conf.Address = a.HTTPAddr()
|
||||
c, err := api.NewClient(conf)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error creating Nomad API client: %s", err))
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// FivePorts returns the first port number of a block of
|
||||
// five random ports.
|
||||
func FivePorts() int {
|
||||
return 1030 + int(rand.Int31n(6440))*5
|
||||
}
|
||||
|
||||
// pickRandomPorts selects random ports from fixed size random blocks of
|
||||
// ports. This does not eliminate the chance for port conflict but
|
||||
// reduces it significanltly with little overhead. Furthermore, asking
|
||||
// the kernel for a random port by binding to port 0 prolongs the test
|
||||
// execution (in our case +20sec) while also not fully eliminating the
|
||||
// chance of port conflicts for concurrently executed test binaries.
|
||||
// Instead of relying on one set of ports to be sufficient we retry
|
||||
// starting the agent with different ports on port conflict.
|
||||
func pickRandomPorts(c *Config) {
|
||||
port := FivePorts()
|
||||
c.Ports.HTTP = port + 1
|
||||
c.Ports.RPC = port + 2
|
||||
c.Ports.Serf = port + 3
|
||||
|
||||
if err := c.normalizeAddrs(); err != nil {
|
||||
panic(fmt.Sprintf("error normalizing config: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfig returns a unique default configuration for testing an
|
||||
// agent.
|
||||
func (a *TestAgent) config() *Config {
|
||||
conf := DevConfig()
|
||||
|
||||
// Customize the server configuration
|
||||
config := nomad.DefaultConfig()
|
||||
conf.NomadConfig = config
|
||||
|
||||
// Set the name
|
||||
conf.NodeName = a.Name
|
||||
|
||||
// Bind and set ports
|
||||
conf.BindAddr = "127.0.0.1"
|
||||
|
||||
conf.Consul = sconfig.DefaultConsulConfig()
|
||||
conf.Vault.Enabled = new(bool)
|
||||
|
||||
// Tighten the Serf timing
|
||||
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
|
||||
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
|
||||
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
||||
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
||||
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
||||
|
||||
// Tighten the Raft timing
|
||||
config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
|
||||
config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
|
||||
config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
|
||||
config.RaftConfig.StartAsLeader = true
|
||||
config.RaftTimeout = 500 * time.Millisecond
|
||||
|
||||
// Bootstrap ourselves
|
||||
config.Bootstrap = true
|
||||
config.BootstrapExpect = 1
|
||||
|
||||
// Tighten the fingerprinter timeouts
|
||||
if conf.Client.Options == nil {
|
||||
conf.Client.Options = make(map[string]string)
|
||||
}
|
||||
conf.Client.Options[fingerprint.TightenNetworkTimeoutsConfig] = "true"
|
||||
|
||||
if a.ConfigCallback != nil {
|
||||
a.ConfigCallback(conf)
|
||||
}
|
||||
|
||||
return conf
|
||||
}
|
|
@ -8,12 +8,14 @@ import (
|
|||
)
|
||||
|
||||
func TestAgentInfoCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &AgentInfoCommand{}
|
||||
}
|
||||
|
||||
func TestAgentInfoCommand_Run(t *testing.T) {
|
||||
srv, _, url := testServer(t, nil)
|
||||
defer srv.Stop()
|
||||
t.Parallel()
|
||||
srv, _, url := testServer(t, false, nil)
|
||||
defer srv.Shutdown()
|
||||
|
||||
ui := new(cli.MockUi)
|
||||
cmd := &AgentInfoCommand{Meta: Meta{Ui: ui}}
|
||||
|
@ -25,6 +27,7 @@ func TestAgentInfoCommand_Run(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentInfoCommand_Fails(t *testing.T) {
|
||||
t.Parallel()
|
||||
ui := new(cli.MockUi)
|
||||
cmd := &AgentInfoCommand{Meta: Meta{Ui: ui}}
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
|
|||
fmt.Sprintf("Name|%s", alloc.Name),
|
||||
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
|
||||
fmt.Sprintf("Job ID|%s", alloc.JobID),
|
||||
fmt.Sprintf("Job Version|%d", *alloc.Job.Version),
|
||||
fmt.Sprintf("Job Version|%d", getVersion(alloc.Job)),
|
||||
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
|
||||
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
|
||||
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
)
|
||||
|
||||
func TestAllocStatusCommand_Implements(t *testing.T) {
|
||||
t.Parallel()
|
||||
var _ cli.Command = &AllocStatusCommand{}
|
||||
}
|
||||
|
||||
func TestAllocStatusCommand_Fails(t *testing.T) {
|
||||
srv, _, url := testServer(t, nil)
|
||||
defer srv.Stop()
|
||||
t.Parallel()
|
||||
srv, _, url := testServer(t, false, nil)
|
||||
defer srv.Shutdown()
|
||||
|
||||
ui := new(cli.MockUi)
|
||||
cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}}
|
||||
|
@ -76,10 +78,9 @@ func TestAllocStatusCommand_Fails(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocStatusCommand_Run(t *testing.T) {
|
||||
srv, client, url := testServer(t, func(c *testutil.TestServerConfig) {
|
||||
c.DevMode = true
|
||||
})
|
||||
defer srv.Stop()
|
||||
t.Parallel()
|
||||
srv, client, url := testServer(t, true, nil)
|
||||
defer srv.Shutdown()
|
||||
|
||||
// Wait for a node to be ready
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue