Merge branch 'master' into f-bolt-db

This commit is contained in:
Alex Dadgar 2017-05-09 11:06:05 -07:00
commit ba70cc4f01
81 changed files with 4506 additions and 760 deletions

View File

@ -7,7 +7,7 @@ services:
language: go language: go
go: go:
- 1.8 - 1.8.x
branches: branches:
only: only:

View File

@ -4,10 +4,14 @@ IMPROVEMENTS:
* core: Back-pressure when evaluations are nacked and ensure scheduling * core: Back-pressure when evaluations are nacked and ensure scheduling
progress on evaluation failures [GH-2555] progress on evaluation failures [GH-2555]
* core: Track multiple job versions and add a stopped state for jobs [GH-2566] * core: Track multiple job versions and add a stopped state for jobs [GH-2566]
* api: Add `verify_https_client` to require certificates from HTTP clients
[GH-2587]
* api/job: Ability to revert job to older versions [GH-2575] * api/job: Ability to revert job to older versions [GH-2575]
* client: Fingerprint all routable addresses on an interface including IPv6 * client: Fingerprint all routable addresses on an interface including IPv6
addresses [GH-2536] addresses [GH-2536]
* client: Hash host ID so its stable and well distributed [GH-2541] * client: Hash host ID so its stable and well distributed [GH-2541]
* client: Environment variables for client DC and Region [GH-2507]
* config: Support Unix socket addresses for Consul [GH-2622]
* driver/docker: Allow specifying extra hosts [GH-2547] * driver/docker: Allow specifying extra hosts [GH-2547]
* driver/docker: Allow setting container IP with user defined networks * driver/docker: Allow setting container IP with user defined networks
[GH-2535] [GH-2535]
@ -22,6 +26,7 @@ BUG FIXES:
* client/artifact: Handle tars where file in directory is listed before * client/artifact: Handle tars where file in directory is listed before
directory [GH-2524] directory [GH-2524]
* driver/exec: Properly set file/dir ownership in chroots [GH-2552] * driver/exec: Properly set file/dir ownership in chroots [GH-2552]
* driver/docker: Fix panic in Docker driver on Windows [GH-2614]
* server: Reject non-TLS clients when TLS enabled [GH-2525] * server: Reject non-TLS clients when TLS enabled [GH-2525]
* server: Fix a panic in plan evaluation with partial failures and all_at_once * server: Fix a panic in plan evaluation with partial failures and all_at_once
set [GH-2544] set [GH-2544]

3
Vagrantfile vendored
View File

@ -20,7 +20,8 @@ sudo DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential curl git-
liblxc1 lxc-dev lxc-templates \ liblxc1 lxc-dev lxc-templates \
gcc-5-aarch64-linux-gnu binutils-aarch64-linux-gnu \ gcc-5-aarch64-linux-gnu binutils-aarch64-linux-gnu \
libc6-dev-i386 linux-libc-dev:i386 \ libc6-dev-i386 linux-libc-dev:i386 \
gcc-5-arm-linux-gnueabihf gcc-5-multilib-arm-linux-gnueabihf binutils-arm-linux-gnueabihf gcc-5-arm-linux-gnueabihf gcc-5-multilib-arm-linux-gnueabihf binutils-arm-linux-gnueabihf \
gcc-mingw-w64 binutils-mingw-w64
# Setup go, for development of Nomad # Setup go, for development of Nomad
SRCROOT="/opt/go" SRCROOT="/opt/go"

View File

@ -276,8 +276,8 @@ func (r *AllocRunner) RestoreState() error {
continue continue
} }
if err := tr.RestoreState(); err != nil { if restartReason, err := tr.RestoreState(); err != nil {
r.logger.Printf("[ERR] client: failed to restore state for alloc %s task '%s': %v", r.alloc.ID, name, err) r.logger.Printf("[ERR] client: failed to restore state for alloc %s task %q: %v", r.alloc.ID, name, err)
mErr.Errors = append(mErr.Errors, err) mErr.Errors = append(mErr.Errors, err)
} else if !r.alloc.TerminalStatus() { } else if !r.alloc.TerminalStatus() {
// Only start if the alloc isn't in a terminal status. // Only start if the alloc isn't in a terminal status.
@ -288,6 +288,11 @@ func (r *AllocRunner) RestoreState() error {
r.logger.Printf("[WARN] client: initial save state for alloc %s task %s failed: %v", r.alloc.ID, name, err) r.logger.Printf("[WARN] client: initial save state for alloc %s task %s failed: %v", r.alloc.ID, name, err)
} }
} }
// Restart task runner if RestoreState gave a reason
if restartReason != "" {
tr.Restart("upgrade", restartReason)
}
} }
} }

View File

@ -34,6 +34,7 @@ func (m *MockAllocStateUpdater) Update(alloc *structs.Allocation) {
func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) { func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
logger := testLogger() logger := testLogger()
conf := config.DefaultConfig() conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.StateDir = os.TempDir() conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir() conf.AllocDir = os.TempDir()
tmp, _ := ioutil.TempFile("", "state-db") tmp, _ := ioutil.TempFile("", "state-db")
@ -502,6 +503,84 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
}) })
} }
// TestAllocRunner_SaveRestoreState_Upgrade asserts that pre-0.6 exec tasks are
// restarted on upgrade.
func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
}
upd, ar := testAllocRunnerFromAlloc(alloc, false)
// Hack in old version to cause an upgrade on RestoreState
origConfig := ar.config.Copy()
ar.config.Version = "0.5.6"
go ar.Run()
// Snapshot state
testutil.WaitForResult(func() (bool, error) {
return len(ar.tasks) == 1, nil
}, func(err error) {
t.Fatalf("task never started: %v", err)
})
err := ar.SaveState()
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a new alloc runner
l2 := prefixedTestLogger("----- ar2: ")
ar2 := NewAllocRunner(l2, origConfig, upd.Update,
&structs.Allocation{ID: ar.alloc.ID}, ar.vaultClient,
ar.consulClient)
err = ar2.RestoreState()
if err != nil {
t.Fatalf("err: %v", err)
}
go ar2.Run()
testutil.WaitForResult(func() (bool, error) {
if len(ar2.tasks) != 1 {
return false, fmt.Errorf("Incorrect number of tasks")
}
if upd.Count < 3 {
return false, nil
}
for _, ev := range ar2.alloc.TaskStates["web"].Events {
if strings.HasSuffix(ev.RestartReason, pre06ScriptCheckReason) {
return true, nil
}
}
return false, fmt.Errorf("no restart with proper reason found")
}, func(err error) {
t.Fatalf("err: %v\nAllocs: %#v\nWeb State: %#v", err, upd.Allocs, ar2.alloc.TaskStates["web"])
})
// Destroy and wait
ar2.Destroy()
start := time.Now()
testutil.WaitForResult(func() (bool, error) {
alloc := ar2.Alloc()
if alloc.ClientStatus != structs.AllocClientStatusComplete {
return false, fmt.Errorf("Bad client status; got %v; want %v", alloc.ClientStatus, structs.AllocClientStatusComplete)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
})
if time.Since(start) > time.Duration(testutil.TestMultiplier()*5)*time.Second {
t.Fatalf("took too long to terminate")
}
}
// Ensure pre-#2132 state files containing the Context struct are properly // Ensure pre-#2132 state files containing the Context struct are properly
// migrated to the new format. // migrated to the new format.
// //

View File

@ -121,7 +121,10 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
harness.taskDir = d harness.taskDir = d
if consul { if consul {
harness.consul = ctestutil.NewTestServer(t) harness.consul, err = ctestutil.NewTestServer()
if err != nil {
t.Fatalf("error starting test Consul server: %v", err)
}
harness.config.ConsulConfig = &sconfig.ConsulConfig{ harness.config.ConsulConfig = &sconfig.ConsulConfig{
Addr: harness.consul.HTTPAddr, Addr: harness.consul.HTTPAddr,
} }
@ -445,7 +448,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key, []byte(content)) harness.consul.SetKV(t, key, []byte(content))
// Wait for the unblock // Wait for the unblock
select { select {
@ -563,7 +566,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(consulKey, []byte(consulContent)) harness.consul.SetKV(t, consulKey, []byte(consulContent))
// Wait for the unblock // Wait for the unblock
select { select {
@ -612,7 +615,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key, []byte(content1)) harness.consul.SetKV(t, key, []byte(content1))
// Wait for the unblock // Wait for the unblock
select { select {
@ -633,7 +636,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
} }
// Update the key in Consul // Update the key in Consul
harness.consul.SetKV(key, []byte(content2)) harness.consul.SetKV(t, key, []byte(content2))
select { select {
case <-harness.mockHooks.RestartCh: case <-harness.mockHooks.RestartCh:
@ -697,8 +700,8 @@ func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key1, []byte(content1_1)) harness.consul.SetKV(t, key1, []byte(content1_1))
harness.consul.SetKV(key2, []byte(content2_1)) harness.consul.SetKV(t, key2, []byte(content2_1))
// Wait for the unblock // Wait for the unblock
select { select {
@ -712,8 +715,8 @@ func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
} }
// Update the keys in Consul // Update the keys in Consul
harness.consul.SetKV(key1, []byte(content1_2)) harness.consul.SetKV(t, key1, []byte(content1_2))
harness.consul.SetKV(key2, []byte(content2_2)) harness.consul.SetKV(t, key2, []byte(content2_2))
// Wait for signals // Wait for signals
timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second) timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second)
@ -782,7 +785,7 @@ func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key1, []byte(content1_1)) harness.consul.SetKV(t, key1, []byte(content1_1))
// Wait for the unblock // Wait for the unblock
select { select {
@ -792,7 +795,7 @@ func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
} }
// Update the keys in Consul // Update the keys in Consul
harness.consul.SetKV(key1, []byte(content1_2)) harness.consul.SetKV(t, key1, []byte(content1_2))
// Wait for restart // Wait for restart
timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second) timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second)
@ -878,7 +881,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
harness.mockHooks.SignalError = fmt.Errorf("test error") harness.mockHooks.SignalError = fmt.Errorf("test error")
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key1, []byte(content1)) harness.consul.SetKV(t, key1, []byte(content1))
// Wait a little // Wait a little
select { select {
@ -888,7 +891,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
} }
// Write the key to Consul // Write the key to Consul
harness.consul.SetKV(key1, []byte(content2)) harness.consul.SetKV(t, key1, []byte(content2))
// Wait for kill channel // Wait for kill channel
select { select {

View File

@ -309,6 +309,8 @@ func GetTaskEnv(taskDir *allocdir.TaskDir, node *structs.Node,
env := env.NewTaskEnvironment(node). env := env.NewTaskEnvironment(node).
SetTaskMeta(alloc.Job.CombinedTaskMeta(alloc.TaskGroup, task.Name)). SetTaskMeta(alloc.Job.CombinedTaskMeta(alloc.TaskGroup, task.Name)).
SetJobName(alloc.Job.Name). SetJobName(alloc.Job.Name).
SetDatacenterName(node.Datacenter).
SetRegionName(conf.Region).
SetEnvvars(task.Env). SetEnvvars(task.Env).
SetTaskName(task.Name) SetTaskName(task.Name)

View File

@ -74,6 +74,8 @@ func testConfig() *config.Config {
conf.StateDir = os.TempDir() conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir() conf.AllocDir = os.TempDir()
conf.MaxKillTimeout = 10 * time.Second conf.MaxKillTimeout = 10 * time.Second
conf.Region = "global"
conf.Node = mock.Node()
return conf return conf
} }
@ -88,6 +90,7 @@ type testContext struct {
// It is up to the caller to call AllocDir.Destroy to cleanup. // It is up to the caller to call AllocDir.Destroy to cleanup.
func testDriverContexts(t *testing.T, task *structs.Task) *testContext { func testDriverContexts(t *testing.T, task *structs.Task) *testContext {
cfg := testConfig() cfg := testConfig()
cfg.Node = mock.Node()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(cfg.AllocDir, structs.GenerateUUID())) allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(cfg.AllocDir, structs.GenerateUUID()))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
t.Fatalf("AllocDir.Build() failed: %v", err) t.Fatalf("AllocDir.Build() failed: %v", err)
@ -162,7 +165,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
conf := testConfig() conf := testConfig()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID)) allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID))
taskDir := allocDir.NewTaskDir(task.Name) taskDir := allocDir.NewTaskDir(task.Name)
env, err := GetTaskEnv(taskDir, nil, task, alloc, conf, "") env, err := GetTaskEnv(taskDir, conf.Node, task, alloc, conf, "")
if err != nil { if err != nil {
t.Fatalf("GetTaskEnv() failed: %v", err) t.Fatalf("GetTaskEnv() failed: %v", err)
} }
@ -209,6 +212,8 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
"NOMAD_ALLOC_NAME": alloc.Name, "NOMAD_ALLOC_NAME": alloc.Name,
"NOMAD_TASK_NAME": task.Name, "NOMAD_TASK_NAME": task.Name,
"NOMAD_JOB_NAME": alloc.Job.Name, "NOMAD_JOB_NAME": alloc.Job.Name,
"NOMAD_DC": "dc1",
"NOMAD_REGION": "global",
} }
act := env.EnvMap() act := env.EnvMap()

View File

@ -48,6 +48,12 @@ const (
// AllocIndex is the environment variable for passing the allocation index. // AllocIndex is the environment variable for passing the allocation index.
AllocIndex = "NOMAD_ALLOC_INDEX" AllocIndex = "NOMAD_ALLOC_INDEX"
// Datacenter is the environment variable for passing the datacenter in which the alloc is running.
Datacenter = "NOMAD_DC"
// Region is the environment variable for passing the region in which the alloc is running.
Region = "NOMAD_REGION"
// AddrPrefix is the prefix for passing both dynamic and static port // AddrPrefix is the prefix for passing both dynamic and static port
// allocations to tasks. // allocations to tasks.
// E.g $NOMAD_ADDR_http=127.0.0.1:80 // E.g $NOMAD_ADDR_http=127.0.0.1:80
@ -74,6 +80,7 @@ const (
const ( const (
nodeIdKey = "node.unique.id" nodeIdKey = "node.unique.id"
nodeDcKey = "node.datacenter" nodeDcKey = "node.datacenter"
nodeRegionKey = "node.region"
nodeNameKey = "node.unique.name" nodeNameKey = "node.unique.name"
nodeClassKey = "node.class" nodeClassKey = "node.class"
@ -94,6 +101,8 @@ type TaskEnvironment struct {
MemLimit int MemLimit int
TaskName string TaskName string
AllocIndex int AllocIndex int
Datacenter string
Region string
AllocId string AllocId string
AllocName string AllocName string
Node *structs.Node Node *structs.Node
@ -195,6 +204,12 @@ func (t *TaskEnvironment) Build() *TaskEnvironment {
if t.JobName != "" { if t.JobName != "" {
t.TaskEnv[JobName] = t.JobName t.TaskEnv[JobName] = t.JobName
} }
if t.Datacenter != "" {
t.TaskEnv[Datacenter] = t.Datacenter
}
if t.Region != "" {
t.TaskEnv[Region] = t.Region
}
// Build the addr of the other tasks // Build the addr of the other tasks
if t.Alloc != nil { if t.Alloc != nil {
@ -227,6 +242,7 @@ func (t *TaskEnvironment) Build() *TaskEnvironment {
// Set up the node values. // Set up the node values.
t.NodeValues[nodeIdKey] = t.Node.ID t.NodeValues[nodeIdKey] = t.Node.ID
t.NodeValues[nodeDcKey] = t.Node.Datacenter t.NodeValues[nodeDcKey] = t.Node.Datacenter
t.NodeValues[nodeRegionKey] = t.Region
t.NodeValues[nodeNameKey] = t.Node.Name t.NodeValues[nodeNameKey] = t.Node.Name
t.NodeValues[nodeClassKey] = t.Node.NodeClass t.NodeValues[nodeClassKey] = t.Node.NodeClass
@ -488,21 +504,41 @@ func (t *TaskEnvironment) SetTaskName(name string) *TaskEnvironment {
return t return t
} }
func (t *TaskEnvironment) SetJobName(name string) *TaskEnvironment {
t.JobName = name
return t
}
func (t *TaskEnvironment) ClearTaskName() *TaskEnvironment { func (t *TaskEnvironment) ClearTaskName() *TaskEnvironment {
t.TaskName = "" t.TaskName = ""
return t return t
} }
func (t *TaskEnvironment) SetJobName(name string) *TaskEnvironment {
t.JobName = name
return t
}
func (t *TaskEnvironment) ClearJobName() *TaskEnvironment { func (t *TaskEnvironment) ClearJobName() *TaskEnvironment {
t.JobName = "" t.JobName = ""
return t return t
} }
func (t *TaskEnvironment) SetDatacenterName(name string) *TaskEnvironment {
t.Datacenter = name
return t
}
func (t *TaskEnvironment) ClearDatacenterName() *TaskEnvironment {
t.Datacenter = ""
return t
}
func (t *TaskEnvironment) SetRegionName(name string) *TaskEnvironment {
t.Region = name
return t
}
func (t *TaskEnvironment) ClearRegionName() *TaskEnvironment {
t.Region = ""
return t
}
func (t *TaskEnvironment) SetVaultToken(token string, inject bool) *TaskEnvironment { func (t *TaskEnvironment) SetVaultToken(token string, inject bool) *TaskEnvironment {
t.VaultToken = token t.VaultToken = token
t.InjectVaultToken = inject t.InjectVaultToken = inject

View File

@ -259,7 +259,12 @@ func (h *execHandle) Update(task *structs.Task) error {
} }
func (h *execHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) { func (h *execHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
return execChroot(ctx, h.taskDir.Dir, cmd, args) deadline, ok := ctx.Deadline()
if !ok {
// No deadline set on context; default to 1 minute
deadline = time.Now().Add(time.Minute)
}
return h.executor.Exec(deadline, cmd, args)
} }
func (h *execHandle) Signal(s os.Signal) error { func (h *execHandle) Signal(s os.Signal) error {

View File

@ -283,7 +283,8 @@ func TestExecDriverUser(t *testing.T) {
} }
} }
// TestExecDriver_HandlerExec ensures the exec driver's handle properly executes commands inside the chroot. // TestExecDriver_HandlerExec ensures the exec driver's handle properly
// executes commands inside the container.
func TestExecDriver_HandlerExec(t *testing.T) { func TestExecDriver_HandlerExec(t *testing.T) {
ctestutils.ExecCompatible(t) ctestutils.ExecCompatible(t)
task := &structs.Task{ task := &structs.Task{
@ -315,20 +316,60 @@ func TestExecDriver_HandlerExec(t *testing.T) {
t.Fatalf("missing handle") t.Fatalf("missing handle")
} }
// Exec a command that should work // Exec a command that should work and dump the environment
out, code, err := handle.Exec(context.TODO(), "/usr/bin/stat", []string{"/alloc"}) out, code, err := handle.Exec(context.Background(), "/bin/sh", []string{"-c", "env | grep NOMAD"})
if err != nil { if err != nil {
t.Fatalf("error exec'ing stat: %v", err) t.Fatalf("error exec'ing stat: %v", err)
} }
if code != 0 { if code != 0 {
t.Fatalf("expected `stat /alloc` to succeed but exit code was: %d", code) t.Fatalf("expected `stat /alloc` to succeed but exit code was: %d", code)
} }
if expected := 100; len(out) < expected {
t.Fatalf("expected at least %d bytes of output but found %d:\n%s", expected, len(out), out) // Assert exec'd commands are run in a task-like environment
scriptEnv := make(map[string]string)
for _, line := range strings.Split(string(out), "\n") {
if line == "" {
continue
}
parts := strings.SplitN(string(line), "=", 2)
if len(parts) != 2 {
t.Fatalf("Invalid env var: %q", line)
}
scriptEnv[parts[0]] = parts[1]
}
if v, ok := scriptEnv["NOMAD_SECRETS_DIR"]; !ok || v != "/secrets" {
t.Errorf("Expected NOMAD_SECRETS_DIR=/secrets but found=%t value=%q", ok, v)
}
if v, ok := scriptEnv["NOMAD_ALLOC_ID"]; !ok || v != ctx.DriverCtx.allocID {
t.Errorf("Expected NOMAD_SECRETS_DIR=%q but found=%t value=%q", ok, v)
}
// Assert cgroup membership
out, code, err = handle.Exec(context.Background(), "/bin/cat", []string{"/proc/self/cgroup"})
if err != nil {
t.Fatalf("error exec'ing cat /proc/self/cgroup: %v", err)
}
if code != 0 {
t.Fatalf("expected `cat /proc/self/cgroup` to succeed but exit code was: %d", code)
}
found := false
for _, line := range strings.Split(string(out), "\n") {
// Every cgroup entry should be /nomad/$ALLOC_ID
if line == "" {
continue
}
if !strings.Contains(line, ":/nomad/") {
t.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line)
continue
}
found = true
}
if !found {
t.Errorf("exec'd command isn't in the task's cgroup")
} }
// Exec a command that should fail // Exec a command that should fail
out, code, err = handle.Exec(context.TODO(), "/usr/bin/stat", []string{"lkjhdsaflkjshowaisxmcvnlia"}) out, code, err = handle.Exec(context.Background(), "/usr/bin/stat", []string{"lkjhdsaflkjshowaisxmcvnlia"})
if err != nil { if err != nil {
t.Fatalf("error exec'ing stat: %v", err) t.Fatalf("error exec'ing stat: %v", err)
} }

View File

@ -1,6 +1,7 @@
package executor package executor
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
@ -15,6 +16,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/armon/circbuf"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/mitchellh/go-ps" "github.com/mitchellh/go-ps"
"github.com/shirou/gopsutil/process" "github.com/shirou/gopsutil/process"
@ -57,6 +59,7 @@ type Executor interface {
Version() (*ExecutorVersion, error) Version() (*ExecutorVersion, error)
Stats() (*cstructs.TaskResourceUsage, error) Stats() (*cstructs.TaskResourceUsage, error)
Signal(s os.Signal) error Signal(s os.Signal) error
Exec(deadline time.Time, cmd string, args []string) ([]byte, int, error)
} }
// ExecutorContext holds context to configure the command user // ExecutorContext holds context to configure the command user
@ -203,8 +206,8 @@ func (e *UniversalExecutor) SetContext(ctx *ExecutorContext) error {
return nil return nil
} }
// LaunchCmd launches a process and returns it's state. It also configures an // LaunchCmd launches the main process and returns its state. It also
// applies isolation on certain platforms. // configures an applies isolation on certain platforms.
func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, error) { func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, error) {
e.logger.Printf("[DEBUG] executor: launching command %v %v", command.Cmd, strings.Join(command.Args, " ")) e.logger.Printf("[DEBUG] executor: launching command %v %v", command.Cmd, strings.Join(command.Args, " "))
@ -283,6 +286,51 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, erro
return &ProcessState{Pid: e.cmd.Process.Pid, ExitCode: -1, IsolationConfig: ic, Time: time.Now()}, nil return &ProcessState{Pid: e.cmd.Process.Pid, ExitCode: -1, IsolationConfig: ic, Time: time.Now()}, nil
} }
// Exec a command inside a container for exec and java drivers.
func (e *UniversalExecutor) Exec(deadline time.Time, name string, args []string) ([]byte, int, error) {
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
return ExecScript(ctx, e.cmd.Dir, e.ctx.TaskEnv, e.cmd.SysProcAttr, name, args)
}
// ExecScript executes cmd with args and returns the output, exit code, and
// error. Output is truncated to client/driver/structs.CheckBufSize
func ExecScript(ctx context.Context, dir string, env *env.TaskEnvironment, attrs *syscall.SysProcAttr,
name string, args []string) ([]byte, int, error) {
name = env.ReplaceEnv(name)
cmd := exec.CommandContext(ctx, name, env.ParseAndReplace(args)...)
// Copy runtime environment from the main command
cmd.SysProcAttr = attrs
cmd.Dir = dir
cmd.Env = env.EnvList()
// Capture output
buf, _ := circbuf.NewBuffer(int64(dstructs.CheckBufSize))
cmd.Stdout = buf
cmd.Stderr = buf
if err := cmd.Run(); err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Non-exit error, return it and let the caller treat
// it as a critical failure
return nil, 0, err
}
// Some kind of error happened; default to critical
exitCode := 2
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
exitCode = status.ExitStatus()
}
// Don't return the exitError as the caller only needs the
// output and code.
return buf.Bytes(), exitCode, nil
}
return buf.Bytes(), 0, nil
}
// configureLoggers sets up the standard out/error file rotators // configureLoggers sets up the standard out/error file rotators
func (e *UniversalExecutor) configureLoggers() error { func (e *UniversalExecutor) configureLoggers() error {
e.rotatorLock.Lock() e.rotatorLock.Lock()

View File

@ -6,6 +6,7 @@ import (
"net/rpc" "net/rpc"
"os" "os"
"syscall" "syscall"
"time"
"github.com/hashicorp/go-plugin" "github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/driver/executor" "github.com/hashicorp/nomad/client/driver/executor"
@ -33,6 +34,17 @@ type LaunchCmdArgs struct {
Cmd *executor.ExecCommand Cmd *executor.ExecCommand
} }
type ExecCmdArgs struct {
Deadline time.Time
Name string
Args []string
}
type ExecCmdReturn struct {
Output []byte
Code int
}
func (e *ExecutorRPC) LaunchCmd(cmd *executor.ExecCommand) (*executor.ProcessState, error) { func (e *ExecutorRPC) LaunchCmd(cmd *executor.ExecCommand) (*executor.ProcessState, error) {
var ps *executor.ProcessState var ps *executor.ProcessState
err := e.client.Call("Plugin.LaunchCmd", LaunchCmdArgs{Cmd: cmd}, &ps) err := e.client.Call("Plugin.LaunchCmd", LaunchCmdArgs{Cmd: cmd}, &ps)
@ -91,6 +103,20 @@ func (e *ExecutorRPC) Signal(s os.Signal) error {
return e.client.Call("Plugin.Signal", &s, new(interface{})) return e.client.Call("Plugin.Signal", &s, new(interface{}))
} }
func (e *ExecutorRPC) Exec(deadline time.Time, name string, args []string) ([]byte, int, error) {
req := ExecCmdArgs{
Deadline: deadline,
Name: name,
Args: args,
}
var resp *ExecCmdReturn
err := e.client.Call("Plugin.Exec", req, &resp)
if resp == nil {
return nil, 0, err
}
return resp.Output, resp.Code, err
}
type ExecutorRPCServer struct { type ExecutorRPCServer struct {
Impl executor.Executor Impl executor.Executor
logger *log.Logger logger *log.Logger
@ -165,6 +191,16 @@ func (e *ExecutorRPCServer) Signal(args os.Signal, resp *interface{}) error {
return e.Impl.Signal(args) return e.Impl.Signal(args)
} }
func (e *ExecutorRPCServer) Exec(args ExecCmdArgs, result *ExecCmdReturn) error {
out, code, err := e.Impl.Exec(args.Deadline, args.Name, args.Args)
ret := &ExecCmdReturn{
Output: out,
Code: code,
}
*result = *ret
return err
}
type ExecutorPlugin struct { type ExecutorPlugin struct {
logger *log.Logger logger *log.Logger
Impl *ExecutorRPCServer Impl *ExecutorRPCServer

View File

@ -390,7 +390,12 @@ func (h *javaHandle) Update(task *structs.Task) error {
} }
func (h *javaHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) { func (h *javaHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
return execChroot(ctx, h.taskDir, cmd, args) deadline, ok := ctx.Deadline()
if !ok {
// No deadline set on context; default to 1 minute
deadline = time.Now().Add(time.Minute)
}
return h.executor.Exec(deadline, cmd, args)
} }
func (h *javaHandle) Signal(s os.Signal) error { func (h *javaHandle) Signal(s os.Signal) error {

View File

@ -10,7 +10,9 @@ import (
"time" "time"
"github.com/hashicorp/go-plugin" "github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/executor" "github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs" dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/fingerprint"
@ -48,6 +50,8 @@ type rawExecHandle struct {
logger *log.Logger logger *log.Logger
waitCh chan *dstructs.WaitResult waitCh chan *dstructs.WaitResult
doneCh chan struct{} doneCh chan struct{}
taskEnv *env.TaskEnvironment
taskDir *allocdir.TaskDir
} }
// NewRawExecDriver is used to create a new raw exec driver // NewRawExecDriver is used to create a new raw exec driver
@ -165,6 +169,8 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandl
logger: d.logger, logger: d.logger,
doneCh: make(chan struct{}), doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1), waitCh: make(chan *dstructs.WaitResult, 1),
taskEnv: d.taskEnv,
taskDir: ctx.TaskDir,
} }
go h.run() go h.run()
return h, nil return h, nil
@ -212,6 +218,8 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
version: id.Version, version: id.Version,
doneCh: make(chan struct{}), doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1), waitCh: make(chan *dstructs.WaitResult, 1),
taskEnv: d.taskEnv,
taskDir: ctx.TaskDir,
} }
go h.run() go h.run()
return h, nil return h, nil
@ -247,7 +255,7 @@ func (h *rawExecHandle) Update(task *structs.Task) error {
} }
func (h *rawExecHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) { func (h *rawExecHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
return execChroot(ctx, "", cmd, args) return executor.ExecScript(ctx, h.taskDir.Dir, h.taskEnv, nil, cmd, args)
} }
func (h *rawExecHandle) Signal(s os.Signal) error { func (h *rawExecHandle) Signal(s os.Signal) error {

View File

@ -22,6 +22,7 @@ import (
"github.com/hashicorp/go-version" "github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/executor" "github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs" dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
@ -87,6 +88,8 @@ type RktDriverConfig struct {
// rktHandle is returned from Start/Open as a handle to the PID // rktHandle is returned from Start/Open as a handle to the PID
type rktHandle struct { type rktHandle struct {
uuid string uuid string
env *env.TaskEnvironment
taskDir *allocdir.TaskDir
pluginClient *plugin.Client pluginClient *plugin.Client
executorPid int executorPid int
executor executor.Executor executor executor.Executor
@ -474,6 +477,8 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
maxKill := d.DriverContext.config.MaxKillTimeout maxKill := d.DriverContext.config.MaxKillTimeout
h := &rktHandle{ h := &rktHandle{
uuid: uuid, uuid: uuid,
env: d.taskEnv,
taskDir: ctx.TaskDir,
pluginClient: pluginClient, pluginClient: pluginClient,
executor: execIntf, executor: execIntf,
executorPid: ps.Pid, executorPid: ps.Pid,
@ -514,6 +519,8 @@ func (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error
// Return a driver handle // Return a driver handle
h := &rktHandle{ h := &rktHandle{
uuid: id.UUID, uuid: id.UUID,
env: d.taskEnv,
taskDir: ctx.TaskDir,
pluginClient: pluginClient, pluginClient: pluginClient,
executorPid: id.ExecutorPid, executorPid: id.ExecutorPid,
executor: exec, executor: exec,
@ -566,7 +573,7 @@ func (h *rktHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte
enterArgs[1] = h.uuid enterArgs[1] = h.uuid
enterArgs[2] = cmd enterArgs[2] = cmd
copy(enterArgs[3:], args) copy(enterArgs[3:], args)
return execChroot(ctx, "", rktCmd, enterArgs) return executor.ExecScript(ctx, h.taskDir.Dir, h.env, nil, rktCmd, enterArgs)
} }
func (h *rktHandle) Signal(s os.Signal) error { func (h *rktHandle) Signal(s os.Signal) error {

View File

@ -22,7 +22,7 @@ import (
func TestRktVersionRegex(t *testing.T) { func TestRktVersionRegex(t *testing.T) {
if os.Getenv("NOMAD_TEST_RKT") == "" { if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests") t.Skip("NOMAD_TEST_RKT unset, skipping")
} }
input_rkt := "rkt version 0.8.1" input_rkt := "rkt version 0.8.1"

View File

@ -1,7 +1,6 @@
package driver package driver
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -9,10 +8,8 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall"
"time" "time"
"github.com/armon/circbuf"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-plugin" "github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
@ -181,36 +178,3 @@ func getExecutorUser(task *structs.Task) string {
} }
return task.User return task.User
} }
// execChroot executes cmd with args inside chroot if set and returns the
// output, exit code, and error. If chroot is an empty string the command is
// executed on the host.
func execChroot(ctx context.Context, chroot, name string, args []string) ([]byte, int, error) {
buf, _ := circbuf.NewBuffer(int64(cstructs.CheckBufSize))
cmd := exec.CommandContext(ctx, name, args...)
cmd.Dir = "/"
cmd.Stdout = buf
cmd.Stderr = buf
if chroot != "" {
setChroot(cmd, chroot)
}
if err := cmd.Run(); err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Non-exit error, return it and let the caller treat
// it as a critical failure
return nil, 0, err
}
// Some kind of error happened; default to critical
exitCode := 2
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
exitCode = status.ExitStatus()
}
// Don't return the exitError as the caller only needs the
// output and code.
return buf.Bytes(), exitCode, nil
}
return buf.Bytes(), 0, nil
}

View File

@ -19,6 +19,7 @@ import (
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/hashicorp/consul-template/signals" "github.com/hashicorp/consul-template/signals"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver" "github.com/hashicorp/nomad/client/driver"
@ -262,8 +263,11 @@ func (r *TaskRunner) pre060StateFilePath() string {
return filepath.Join(r.config.StateDir, "alloc", r.alloc.ID, dirName, "state.json") return filepath.Join(r.config.StateDir, "alloc", r.alloc.ID, dirName, "state.json")
} }
// RestoreState is used to restore our state // RestoreState is used to restore our state. If a non-empty string is returned
func (r *TaskRunner) RestoreState() error { // the task is restarted with the string as the reason. This is useful for
// backwards incompatible upgrades that need to restart tasks with a new
// executor.
func (r *TaskRunner) RestoreState() (string, error) {
// COMPAT: Remove in 0.7.0 // COMPAT: Remove in 0.7.0
// 0.6.0 transistioned from individual state files to a single bolt-db. // 0.6.0 transistioned from individual state files to a single bolt-db.
// The upgrade path is to: // The upgrade path is to:
@ -280,7 +284,7 @@ func (r *TaskRunner) RestoreState() error {
os.RemoveAll(oldPath) os.RemoveAll(oldPath)
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
// Something corrupt in the old state file // Something corrupt in the old state file
return err return "", err
} else { } else {
// We are doing a normal restore // We are doing a normal restore
err := r.stateDB.View(func(tx *bolt.Tx) error { err := r.stateDB.View(func(tx *bolt.Tx) error {
@ -295,7 +299,7 @@ func (r *TaskRunner) RestoreState() error {
return nil return nil
}) })
if err != nil { if err != nil {
return err return "", err
} }
} }
@ -307,7 +311,7 @@ func (r *TaskRunner) RestoreState() error {
r.setCreatedResources(snap.CreatedResources) r.setCreatedResources(snap.CreatedResources)
if err := r.setTaskEnv(); err != nil { if err := r.setTaskEnv(); err != nil {
return fmt.Errorf("client: failed to create task environment for task %q in allocation %q: %v", return "", fmt.Errorf("client: failed to create task environment for task %q in allocation %q: %v",
r.task.Name, r.alloc.ID, err) r.task.Name, r.alloc.ID, err)
} }
@ -317,7 +321,7 @@ func (r *TaskRunner) RestoreState() error {
data, err := ioutil.ReadFile(tokenPath) data, err := ioutil.ReadFile(tokenPath)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return fmt.Errorf("failed to read token for task %q in alloc %q: %v", r.task.Name, r.alloc.ID, err) return "", fmt.Errorf("failed to read token for task %q in alloc %q: %v", r.task.Name, r.alloc.ID, err)
} }
// Token file doesn't exist // Token file doesn't exist
@ -328,10 +332,11 @@ func (r *TaskRunner) RestoreState() error {
} }
// Restore the driver // Restore the driver
restartReason := ""
if snap.HandleID != "" { if snap.HandleID != "" {
d, err := r.createDriver() d, err := r.createDriver()
if err != nil { if err != nil {
return err return "", err
} }
ctx := driver.NewExecContext(r.taskDir) ctx := driver.NewExecContext(r.taskDir)
@ -341,7 +346,11 @@ func (r *TaskRunner) RestoreState() error {
if err != nil { if err != nil {
r.logger.Printf("[ERR] client: failed to open handle to task %q for alloc %q: %v", r.logger.Printf("[ERR] client: failed to open handle to task %q for alloc %q: %v",
r.task.Name, r.alloc.ID, err) r.task.Name, r.alloc.ID, err)
return nil return "", nil
}
if pre06ScriptCheck(snap.Version, r.task.Driver, r.task.Services) {
restartReason = pre06ScriptCheckReason
} }
if err := r.registerServices(d, handle); err != nil { if err := r.registerServices(d, handle); err != nil {
@ -360,8 +369,40 @@ func (r *TaskRunner) RestoreState() error {
r.running = true r.running = true
r.runningLock.Unlock() r.runningLock.Unlock()
} }
return restartReason, nil
}
return nil // ver06 is used for checking for pre-0.6 script checks
var ver06 = version.Must(version.NewVersion("0.6.0dev"))
// pre06ScriptCheckReason is the restart reason given when a pre-0.6 script
// check is found on an exec/java task.
const pre06ScriptCheckReason = "upgrading pre-0.6 script checks"
// pre06ScriptCheck returns true if version is prior to 0.6.0dev, has a script
// check, and uses exec or java drivers.
func pre06ScriptCheck(ver, driver string, services []*structs.Service) bool {
if driver != "exec" && driver != "java" && driver != "mock_driver" {
// Only exec and java are affected
return false
}
v, err := version.NewVersion(ver)
if err != nil {
// Treat it as old
return true
}
if !v.LessThan(ver06) {
// >= 0.6.0dev
return false
}
for _, service := range services {
for _, check := range service.Checks {
if check.Type == "script" {
return true
}
}
}
return false
} }
// SaveState is used to snapshot our state // SaveState is used to snapshot our state
@ -1118,6 +1159,10 @@ func (r *TaskRunner) run() {
return return
} }
// Remove from consul before killing the task so that traffic
// can be rerouted
r.consul.RemoveTask(r.alloc.ID, r.task)
// Store the task event that provides context on the task // Store the task event that provides context on the task
// destroy. The Killed event is set from the alloc_runner and // destroy. The Killed event is set from the alloc_runner and
// doesn't add detail // doesn't add detail

View File

@ -75,6 +75,7 @@ func testTaskRunner(t *testing.T, restarts bool) *taskRunnerTestCtx {
func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocation) *taskRunnerTestCtx { func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocation) *taskRunnerTestCtx {
logger := testLogger() logger := testLogger()
conf := config.DefaultConfig() conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.StateDir = os.TempDir() conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir() conf.AllocDir = os.TempDir()
@ -380,7 +381,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
tr2 := NewTaskRunner(ctx.tr.logger, ctx.tr.config, ctx.tr.stateDB, ctx.upd.Update, tr2 := NewTaskRunner(ctx.tr.logger, ctx.tr.config, ctx.tr.stateDB, ctx.upd.Update,
ctx.tr.taskDir, ctx.tr.alloc, task2, ctx.tr.vaultClient, ctx.tr.consul) ctx.tr.taskDir, ctx.tr.alloc, task2, ctx.tr.vaultClient, ctx.tr.consul)
tr2.restartTracker = noRestartsTracker() tr2.restartTracker = noRestartsTracker()
if err := tr2.RestoreState(); err != nil { if _, err := tr2.RestoreState(); err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
go tr2.Run() go tr2.Run()
@ -1531,3 +1532,49 @@ func TestTaskRunner_CleanupFail(t *testing.T) {
t.Fatalf("expected %#v but found: %#v", expected, ctx.tr.createdResources.Resources) t.Fatalf("expected %#v but found: %#v", expected, ctx.tr.createdResources.Resources)
} }
} }
func TestTaskRunner_Pre06ScriptCheck(t *testing.T) {
run := func(ver, driver, checkType string, exp bool) (string, func(t *testing.T)) {
name := fmt.Sprintf("%s %s %s returns %t", ver, driver, checkType, exp)
return name, func(t *testing.T) {
services := []*structs.Service{
{
Checks: []*structs.ServiceCheck{
{
Type: checkType,
},
},
},
}
if act := pre06ScriptCheck(ver, driver, services); act != exp {
t.Errorf("expected %t received %t", exp, act)
}
}
}
t.Run(run("0.5.6", "exec", "script", true))
t.Run(run("0.5.6", "java", "script", true))
t.Run(run("0.5.6", "mock_driver", "script", true))
t.Run(run("0.5.9", "exec", "script", true))
t.Run(run("0.5.9", "java", "script", true))
t.Run(run("0.5.9", "mock_driver", "script", true))
t.Run(run("0.6.0dev", "exec", "script", false))
t.Run(run("0.6.0dev", "java", "script", false))
t.Run(run("0.6.0dev", "mock_driver", "script", false))
t.Run(run("0.6.0", "exec", "script", false))
t.Run(run("0.6.0", "java", "script", false))
t.Run(run("0.6.0", "mock_driver", "script", false))
t.Run(run("1.0.0", "exec", "script", false))
t.Run(run("1.0.0", "java", "script", false))
t.Run(run("1.0.0", "mock_driver", "script", false))
t.Run(run("0.5.6", "rkt", "script", false))
t.Run(run("0.5.6", "docker", "script", false))
t.Run(run("0.5.6", "qemu", "script", false))
t.Run(run("0.5.6", "raw_exec", "script", false))
t.Run(run("0.5.6", "invalid", "script", false))
t.Run(run("0.5.6", "exec", "tcp", false))
t.Run(run("0.5.6", "java", "tcp", false))
t.Run(run("0.5.6", "mock_driver", "tcp", false))
}

View File

@ -351,43 +351,22 @@ func (a *Agent) setupServer() error {
a.server = server a.server = server
// Consul check addresses default to bind but can be toggled to use advertise // Consul check addresses default to bind but can be toggled to use advertise
httpCheckAddr := a.config.normalizedAddrs.HTTP
rpcCheckAddr := a.config.normalizedAddrs.RPC rpcCheckAddr := a.config.normalizedAddrs.RPC
serfCheckAddr := a.config.normalizedAddrs.Serf serfCheckAddr := a.config.normalizedAddrs.Serf
if *a.config.Consul.ChecksUseAdvertise { if *a.config.Consul.ChecksUseAdvertise {
httpCheckAddr = a.config.AdvertiseAddrs.HTTP
rpcCheckAddr = a.config.AdvertiseAddrs.RPC rpcCheckAddr = a.config.AdvertiseAddrs.RPC
serfCheckAddr = a.config.AdvertiseAddrs.Serf serfCheckAddr = a.config.AdvertiseAddrs.Serf
} }
// Create the Nomad Server services for Consul // Create the Nomad Server services for Consul
// TODO re-introduce HTTP/S checks when Consul 0.7.1 comes out
if *a.config.Consul.AutoAdvertise { if *a.config.Consul.AutoAdvertise {
httpServ := &structs.Service{ httpServ := &structs.Service{
Name: a.config.Consul.ServerServiceName, Name: a.config.Consul.ServerServiceName,
PortLabel: a.config.AdvertiseAddrs.HTTP, PortLabel: a.config.AdvertiseAddrs.HTTP,
Tags: []string{consul.ServiceTagHTTP}, Tags: []string{consul.ServiceTagHTTP},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Server HTTP Check",
Type: "http",
Path: "/v1/status/peers",
Protocol: "http",
Interval: serverHttpCheckInterval,
Timeout: serverHttpCheckTimeout,
PortLabel: httpCheckAddr,
},
},
}
if conf.TLSConfig.EnableHTTP {
if a.consulSupportsTLSSkipVerify {
httpServ.Checks[0].Protocol = "https"
httpServ.Checks[0].TLSSkipVerify = true
} else {
// No TLSSkipVerify support, don't register https check
a.logger.Printf("[WARN] agent: not registering Nomad HTTPS Health Check because it requires Consul>=0.7.2")
httpServ.Checks = []*structs.ServiceCheck{}
} }
if check := a.agentHTTPCheck(); check != nil {
httpServ.Checks = []*structs.ServiceCheck{check}
} }
rpcServ := &structs.Service{ rpcServ := &structs.Service{
Name: a.config.Consul.ServerServiceName, Name: a.config.Consul.ServerServiceName,
@ -482,39 +461,15 @@ func (a *Agent) setupClient() error {
} }
a.client = client a.client = client
// Resolve the http check address
httpCheckAddr := a.config.normalizedAddrs.HTTP
if *a.config.Consul.ChecksUseAdvertise {
httpCheckAddr = a.config.AdvertiseAddrs.HTTP
}
// Create the Nomad Client services for Consul // Create the Nomad Client services for Consul
if *a.config.Consul.AutoAdvertise { if *a.config.Consul.AutoAdvertise {
httpServ := &structs.Service{ httpServ := &structs.Service{
Name: a.config.Consul.ClientServiceName, Name: a.config.Consul.ClientServiceName,
PortLabel: a.config.AdvertiseAddrs.HTTP, PortLabel: a.config.AdvertiseAddrs.HTTP,
Tags: []string{consul.ServiceTagHTTP}, Tags: []string{consul.ServiceTagHTTP},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Client HTTP Check",
Type: "http",
Path: "/v1/agent/servers",
Protocol: "http",
Interval: clientHttpCheckInterval,
Timeout: clientHttpCheckTimeout,
PortLabel: httpCheckAddr,
},
},
}
if conf.TLSConfig.EnableHTTP {
if a.consulSupportsTLSSkipVerify {
httpServ.Checks[0].Protocol = "https"
httpServ.Checks[0].TLSSkipVerify = true
} else {
// No TLSSkipVerify support, don't register https check
a.logger.Printf("[WARN] agent: not registering Nomad HTTPS Health Check because it requires Consul>=0.7.2")
httpServ.Checks = []*structs.ServiceCheck{}
} }
if check := a.agentHTTPCheck(); check != nil {
httpServ.Checks = []*structs.ServiceCheck{check}
} }
if err := a.consulService.RegisterAgent(consulRoleClient, []*structs.Service{httpServ}); err != nil { if err := a.consulService.RegisterAgent(consulRoleClient, []*structs.Service{httpServ}); err != nil {
return err return err
@ -524,6 +479,42 @@ func (a *Agent) setupClient() error {
return nil return nil
} }
// agentHTTPCheck returns a health check for the agent's HTTP API if possible.
// If no HTTP health check can be supported nil is returned.
func (a *Agent) agentHTTPCheck() *structs.ServiceCheck {
// Resolve the http check address
httpCheckAddr := a.config.normalizedAddrs.HTTP
if *a.config.Consul.ChecksUseAdvertise {
httpCheckAddr = a.config.AdvertiseAddrs.HTTP
}
check := structs.ServiceCheck{
Name: "Nomad Client HTTP Check",
Type: "http",
Path: "/v1/agent/servers",
Protocol: "http",
Interval: clientHttpCheckInterval,
Timeout: clientHttpCheckTimeout,
PortLabel: httpCheckAddr,
}
if !a.config.TLSConfig.EnableHTTP {
// No HTTPS, return a plain http check
return &check
}
if !a.consulSupportsTLSSkipVerify {
a.logger.Printf("[WARN] agent: not registering Nomad HTTPS Health Check because it requires Consul>=0.7.2")
return nil
}
if a.config.TLSConfig.VerifyHTTPSClient {
a.logger.Printf("[WARN] agent: not registering Nomad HTTPS Health Check because verify_https_client enabled")
return nil
}
// HTTPS enabled; skip verification
check.Protocol = "https"
check.TLSSkipVerify = true
return &check
}
// reservePortsForClient reserves a range of ports for the client to use when // reservePortsForClient reserves a range of ports for the client to use when
// it creates various plugins for log collection, executors, drivers, etc // it creates various plugins for log collection, executors, drivers, etc
func (a *Agent) reservePortsForClient(conf *clientconfig.Config) error { func (a *Agent) reservePortsForClient(conf *clientconfig.Config) error {
@ -691,7 +682,7 @@ func (a *Agent) setupConsul(consulConfig *config.ConsulConfig) error {
} }
// Determine version for TLSSkipVerify // Determine version for TLSSkipVerify
if self, err := client.Agent().Self(); err != nil { if self, err := client.Agent().Self(); err == nil {
a.consulSupportsTLSSkipVerify = consulSupportsTLSSkipVerify(self) a.consulSupportsTLSSkipVerify = consulSupportsTLSSkipVerify(self)
} }

View File

@ -4,12 +4,14 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"net" "net"
"os" "os"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad"
sconfig "github.com/hashicorp/nomad/nomad/structs/config" sconfig "github.com/hashicorp/nomad/nomad/structs/config"
) )
@ -360,6 +362,98 @@ func TestAgent_ClientConfig(t *testing.T) {
} }
} }
// TestAgent_HTTPCheck asserts Agent.agentHTTPCheck properly alters the HTTP
// API health check depending on configuration.
func TestAgent_HTTPCheck(t *testing.T) {
logger := log.New(ioutil.Discard, "", 0)
if testing.Verbose() {
logger = log.New(os.Stdout, "[TestAgent_HTTPCheck] ", log.Lshortfile)
}
agent := func() *Agent {
return &Agent{
logger: logger,
config: &Config{
AdvertiseAddrs: &AdvertiseAddrs{HTTP: "advertise:4646"},
normalizedAddrs: &Addresses{HTTP: "normalized:4646"},
Consul: &sconfig.ConsulConfig{
ChecksUseAdvertise: helper.BoolToPtr(false),
},
TLSConfig: &sconfig.TLSConfig{EnableHTTP: false},
},
}
}
t.Run("Plain HTTP Check", func(t *testing.T) {
a := agent()
check := a.agentHTTPCheck()
if check == nil {
t.Fatalf("expected non-nil check")
}
if check.Type != "http" {
t.Errorf("expected http check not: %q", check.Type)
}
if expected := "/v1/agent/servers"; check.Path != expected {
t.Errorf("expected %q path not: %q", expected, check.Path)
}
if check.Protocol != "http" {
t.Errorf("expected http proto not: %q", check.Protocol)
}
if expected := a.config.normalizedAddrs.HTTP; check.PortLabel != expected {
t.Errorf("expected normalized addr not %q", check.PortLabel)
}
})
t.Run("Plain HTTP + ChecksUseAdvertise", func(t *testing.T) {
a := agent()
a.config.Consul.ChecksUseAdvertise = helper.BoolToPtr(true)
check := a.agentHTTPCheck()
if check == nil {
t.Fatalf("expected non-nil check")
}
if expected := a.config.AdvertiseAddrs.HTTP; check.PortLabel != expected {
t.Errorf("expected advertise addr not %q", check.PortLabel)
}
})
t.Run("HTTPS + consulSupportsTLSSkipVerify", func(t *testing.T) {
a := agent()
a.consulSupportsTLSSkipVerify = true
a.config.TLSConfig.EnableHTTP = true
check := a.agentHTTPCheck()
if check == nil {
t.Fatalf("expected non-nil check")
}
if !check.TLSSkipVerify {
t.Errorf("expected tls skip verify")
}
if check.Protocol != "https" {
t.Errorf("expected https not: %q", check.Protocol)
}
})
t.Run("HTTPS w/o TLSSkipVerify", func(t *testing.T) {
a := agent()
a.consulSupportsTLSSkipVerify = false
a.config.TLSConfig.EnableHTTP = true
if check := a.agentHTTPCheck(); check != nil {
t.Fatalf("expected nil check not: %#v", check)
}
})
t.Run("HTTPS + VerifyHTTPSClient", func(t *testing.T) {
a := agent()
a.consulSupportsTLSSkipVerify = true
a.config.TLSConfig.EnableHTTP = true
a.config.TLSConfig.VerifyHTTPSClient = true
if check := a.agentHTTPCheck(); check != nil {
t.Fatalf("expected nil check not: %#v", check)
}
})
}
func TestAgent_ConsulSupportsTLSSkipVerify(t *testing.T) { func TestAgent_ConsulSupportsTLSSkipVerify(t *testing.T) {
assertSupport := func(expected bool, blob string) { assertSupport := func(expected bool, blob string) {
self := map[string]map[string]interface{}{} self := map[string]map[string]interface{}{}

View File

@ -138,4 +138,5 @@ tls {
ca_file = "foo" ca_file = "foo"
cert_file = "bar" cert_file = "bar"
key_file = "pipe" key_file = "pipe"
verify_https_client = true
} }

View File

@ -689,6 +689,7 @@ func parseTLSConfig(result **config.TLSConfig, list *ast.ObjectList) error {
"ca_file", "ca_file",
"cert_file", "cert_file",
"key_file", "key_file",
"verify_https_client",
} }
if err := checkHCLKeys(listVal, valid); err != nil { if err := checkHCLKeys(listVal, valid); err != nil {

View File

@ -154,6 +154,7 @@ func TestConfig_Parse(t *testing.T) {
CAFile: "foo", CAFile: "foo",
CertFile: "bar", CertFile: "bar",
KeyFile: "pipe", KeyFile: "pipe",
VerifyHTTPSClient: true,
}, },
HTTPAPIResponseHeaders: map[string]string{ HTTPAPIResponseHeaders: map[string]string{
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Origin": "*",

View File

@ -45,16 +45,20 @@ func TestConsul_Integration(t *testing.T) {
} }
} }
// Create an embedded Consul server // Create an embedded Consul server
testconsul := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { testconsul, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
// If -v wasn't specified squelch consul logging // If -v wasn't specified squelch consul logging
if !testing.Verbose() { if !testing.Verbose() {
c.Stdout = ioutil.Discard c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard c.Stderr = ioutil.Discard
} }
}) })
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop() defer testconsul.Stop()
conf := config.DefaultConfig() conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.ConsulConfig.Addr = testconsul.HTTPAddr conf.ConsulConfig.Addr = testconsul.HTTPAddr
consulConfig, err := conf.ConsulConfig.ApiConfig() consulConfig, err := conf.ConsulConfig.ApiConfig()
if err != nil { if err != nil {

View File

@ -53,7 +53,7 @@ func NewHTTPServer(agent *Agent, config *Config) (*HTTPServer, error) {
// If TLS is enabled, wrap the listener with a TLS listener // If TLS is enabled, wrap the listener with a TLS listener
if config.TLSConfig.EnableHTTP { if config.TLSConfig.EnableHTTP {
tlsConf := &tlsutil.Config{ tlsConf := &tlsutil.Config{
VerifyIncoming: false, VerifyIncoming: config.TLSConfig.VerifyHTTPSClient,
VerifyOutgoing: true, VerifyOutgoing: true,
VerifyServerHostname: config.TLSConfig.VerifyServerHostname, VerifyServerHostname: config.TLSConfig.VerifyServerHostname,
CAFile: config.TLSConfig.CAFile, CAFile: config.TLSConfig.CAFile,

View File

@ -2,12 +2,16 @@ package agent
import ( import (
"bytes" "bytes"
"crypto/tls"
"crypto/x509"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url"
"os" "os"
"strconv" "strconv"
"testing" "testing"
@ -15,6 +19,7 @@ import (
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
) )
@ -337,6 +342,126 @@ func TestParseRegion(t *testing.T) {
} }
} }
// TestHTTP_VerifyHTTPSClient asserts that a client certificate signed by the
// appropriate CA is required when VerifyHTTPSClient=true.
func TestHTTP_VerifyHTTPSClient(t *testing.T) {
const (
cafile = "../../helper/tlsutil/testdata/ca.pem"
foocert = "../../helper/tlsutil/testdata/nomad-foo.pem"
fookey = "../../helper/tlsutil/testdata/nomad-foo-key.pem"
)
s := makeHTTPServer(t, func(c *Config) {
c.Region = "foo" // match the region on foocert
c.TLSConfig = &config.TLSConfig{
EnableHTTP: true,
VerifyHTTPSClient: true,
CAFile: cafile,
CertFile: foocert,
KeyFile: fookey,
}
})
defer s.Cleanup()
reqURL := fmt.Sprintf("https://%s/v1/agent/self", s.Agent.config.AdvertiseAddrs.HTTP)
// FAIL: Requests that expect 127.0.0.1 as the name should fail
resp, err := http.Get(reqURL)
if err == nil {
resp.Body.Close()
t.Fatalf("expected non-nil error but received: %v", resp.StatusCode)
}
urlErr, ok := err.(*url.Error)
if !ok {
t.Fatalf("expected a *url.Error but received: %T -> %v", err, err)
}
hostErr, ok := urlErr.Err.(x509.HostnameError)
if !ok {
t.Fatalf("expected a x509.HostnameError but received: %T -> %v", urlErr.Err, urlErr.Err)
}
if expected := "127.0.0.1"; hostErr.Host != expected {
t.Fatalf("expected hostname on error to be %q but found %q", expected, hostErr.Host)
}
// FAIL: Requests that specify a valid hostname but not the CA should
// fail
tlsConf := &tls.Config{
ServerName: "client.regionFoo.nomad",
}
transport := &http.Transport{TLSClientConfig: tlsConf}
client := &http.Client{Transport: transport}
req, err := http.NewRequest("GET", reqURL, nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
resp, err = client.Do(req)
if err == nil {
resp.Body.Close()
t.Fatalf("expected non-nil error but received: %v", resp.StatusCode)
}
urlErr, ok = err.(*url.Error)
if !ok {
t.Fatalf("expected a *url.Error but received: %T -> %v", err, err)
}
_, ok = urlErr.Err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected a x509.UnknownAuthorityError but received: %T -> %v", urlErr.Err, urlErr.Err)
}
// FAIL: Requests that specify a valid hostname and CA cert but lack a
// client certificate should fail
cacertBytes, err := ioutil.ReadFile(cafile)
if err != nil {
t.Fatalf("error reading cacert: %v", err)
}
tlsConf.RootCAs = x509.NewCertPool()
tlsConf.RootCAs.AppendCertsFromPEM(cacertBytes)
req, err = http.NewRequest("GET", reqURL, nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
resp, err = client.Do(req)
if err == nil {
resp.Body.Close()
t.Fatalf("expected non-nil error but received: %v", resp.StatusCode)
}
urlErr, ok = err.(*url.Error)
if !ok {
t.Fatalf("expected a *url.Error but received: %T -> %v", err, err)
}
opErr, ok := urlErr.Err.(*net.OpError)
if !ok {
t.Fatalf("expected a *net.OpErr but received: %T -> %v", urlErr.Err, urlErr.Err)
}
const badCertificate = "tls: bad certificate" // from crypto/tls/alert.go:52 and RFC 5246 § A.3
if opErr.Err.Error() != badCertificate {
t.Fatalf("expected tls.alert bad_certificate but received: %q", opErr.Err.Error())
}
// PASS: Requests that specify a valid hostname, CA cert, and client
// certificate succeed.
tlsConf.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
c, err := tls.LoadX509KeyPair(foocert, fookey)
if err != nil {
return nil, err
}
return &c, nil
}
transport = &http.Transport{TLSClientConfig: tlsConf}
client = &http.Client{Transport: transport}
req, err = http.NewRequest("GET", reqURL, nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
resp, err = client.Do(req)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("expected 200 status code but got: %d", resp.StatusCode)
}
}
// assertIndex tests that X-Nomad-Index is set and non-zero // assertIndex tests that X-Nomad-Index is set and non-zero
func assertIndex(t *testing.T, resp *httptest.ResponseRecorder) { func assertIndex(t *testing.T, resp *httptest.ResponseRecorder) {
header := resp.Header().Get("X-Nomad-Index") header := resp.Header().Get("X-Nomad-Index")

View File

@ -143,7 +143,7 @@ func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig {
return result return result
} }
// ApiConfig() returns a usable Consul config that can be passed directly to // ApiConfig returns a usable Consul config that can be passed directly to
// hashicorp/consul/api. NOTE: datacenter is not set // hashicorp/consul/api. NOTE: datacenter is not set
func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
config := consul.DefaultConfig() config := consul.DefaultConfig()

View File

@ -28,6 +28,9 @@ type TLSConfig struct {
// KeyFile is used to provide a TLS key that is used for serving TLS connections. // KeyFile is used to provide a TLS key that is used for serving TLS connections.
// Must be provided to serve TLS connections. // Must be provided to serve TLS connections.
KeyFile string `mapstructure:"key_file"` KeyFile string `mapstructure:"key_file"`
// Verify connections to the HTTPS API
VerifyHTTPSClient bool `mapstructure:"verify_https_client"`
} }
// Merge is used to merge two TLS configs together // Merge is used to merge two TLS configs together
@ -52,6 +55,8 @@ func (t *TLSConfig) Merge(b *TLSConfig) *TLSConfig {
if b.KeyFile != "" { if b.KeyFile != "" {
result.KeyFile = b.KeyFile result.KeyFile = b.KeyFile
} }
if b.VerifyHTTPSClient {
result.VerifyHTTPSClient = true
}
return &result return &result
} }

View File

@ -64,11 +64,15 @@ for target in $targets; do
;; ;;
"windows_386") "windows_386")
echo "==> Building windows 386..." echo "==> Building windows 386..."
CGO_ENABLED=1 GOARCH="386" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_386/nomad.exe" CGO_ENABLED=0 GOARCH="386" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_386/nomad.exe"
# Use the following if CGO is required
#CGO_ENABLED=1 CXX=i686-w64-mingw32-g++ CC=i686-w64-mingw32-gcc GOARCH="386" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_386/nomad.exe"
;; ;;
"windows_amd64") "windows_amd64")
echo "==> Building windows amd64..." echo "==> Building windows amd64..."
CGO_ENABLED=1 GOARCH="amd64" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_amd64/nomad.exe" CGO_ENABLED=0 GOARCH="amd64" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_amd64/nomad.exe"
# Use the following if CGO is required
#CGO_ENABLED=1 CXX=x86_64-w64-mingw32-g++ CC=x86_64-w64-mingw32-gcc GOARCH="amd64" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_amd64/nomad.exe"
;; ;;
"darwin_amd64") "darwin_amd64")
echo "==> Building darwin amd64..." echo "==> Building darwin amd64..."

View File

@ -185,7 +185,6 @@ type BackupFileReader struct {
// Read will attempt to read the security descriptor of the file. // Read will attempt to read the security descriptor of the file.
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
r := &BackupFileReader{f, includeSecurity, 0} r := &BackupFileReader{f, includeSecurity, 0}
runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() })
return r return r
} }
@ -196,6 +195,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
if err != nil { if err != nil {
return 0, &os.PathError{"BackupRead", r.f.Name(), err} return 0, &os.PathError{"BackupRead", r.f.Name(), err}
} }
runtime.KeepAlive(r.f)
if bytesRead == 0 { if bytesRead == 0 {
return 0, io.EOF return 0, io.EOF
} }
@ -207,6 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
func (r *BackupFileReader) Close() error { func (r *BackupFileReader) Close() error {
if r.ctx != 0 { if r.ctx != 0 {
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0 r.ctx = 0
} }
return nil return nil
@ -223,7 +224,6 @@ type BackupFileWriter struct {
// Write() will attempt to restore the security descriptor from the stream. // Write() will attempt to restore the security descriptor from the stream.
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
w := &BackupFileWriter{f, includeSecurity, 0} w := &BackupFileWriter{f, includeSecurity, 0}
runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() })
return w return w
} }
@ -234,6 +234,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
if err != nil { if err != nil {
return 0, &os.PathError{"BackupWrite", w.f.Name(), err} return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
} }
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) { if int(bytesWritten) != len(b) {
return int(bytesWritten), errors.New("not all bytes could be written") return int(bytesWritten), errors.New("not all bytes could be written")
} }
@ -245,6 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
func (w *BackupFileWriter) Close() error { func (w *BackupFileWriter) Close() error {
if w.ctx != 0 { if w.ctx != 0 {
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0 w.ctx = 0
} }
return nil return nil

View File

@ -7,6 +7,7 @@ import (
"io" "io"
"runtime" "runtime"
"sync" "sync"
"sync/atomic"
"syscall" "syscall"
"time" "time"
) )
@ -17,6 +18,12 @@ import (
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod //sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
const ( const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
@ -33,6 +40,8 @@ func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true } func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true } func (e *timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle var ioCompletionPort syscall.Handle
@ -63,8 +72,16 @@ type win32File struct {
handle syscall.Handle handle syscall.Handle
wg sync.WaitGroup wg sync.WaitGroup
closing bool closing bool
readDeadline time.Time readDeadline deadlineHandler
writeDeadline time.Time writeDeadline deadlineHandler
}
type deadlineHandler struct {
setLock sync.Mutex
channel timeoutChan
channelLock sync.RWMutex
timer *time.Timer
timedout atomicBool
} }
// makeWin32File makes a new win32File from an existing file handle // makeWin32File makes a new win32File from an existing file handle
@ -79,7 +96,8 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
runtime.SetFinalizer(f, (*win32File).closeHandle) f.readDeadline.channel = make(timeoutChan)
f.writeDeadline.channel = make(timeoutChan)
return f, nil return f, nil
} }
@ -103,7 +121,6 @@ func (f *win32File) closeHandle() {
// Close closes a win32File. // Close closes a win32File.
func (f *win32File) Close() error { func (f *win32File) Close() error {
f.closeHandle() f.closeHandle()
runtime.SetFinalizer(f, nil)
return nil return nil
} }
@ -136,47 +153,47 @@ func ioCompletionProcessor(h syscall.Handle) {
// asyncIo processes the return value from ReadFile or WriteFile, blocking until // asyncIo processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed. // the operation has actually completed.
func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) { func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING { if err != syscall.ERROR_IO_PENDING {
f.wg.Done() f.wg.Done()
return int(bytes), err return int(bytes), err
} else { }
var r ioResult
wait := true
timedout := false
if f.closing { if f.closing {
cancelIoEx(f.handle, &c.o) cancelIoEx(f.handle, &c.o)
} else if !deadline.IsZero() { }
now := time.Now()
if !deadline.After(now) { var timeout timeoutChan
timedout = true if d != nil {
} else { d.channelLock.Lock()
timeout := time.After(deadline.Sub(now)) timeout = d.channel
d.channelLock.Unlock()
}
var r ioResult
select { select {
case r = <-c.ch: case r = <-c.ch:
wait = false
case <-timeout:
timedout = true
}
}
}
if timedout {
cancelIoEx(f.handle, &c.o)
}
if wait {
r = <-c.ch
}
err = r.err err = r.err
if err == syscall.ERROR_OPERATION_ABORTED { if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing { if f.closing {
err = ErrFileClosed err = ErrFileClosed
} else if timedout { }
}
case <-timeout:
cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
err = ErrTimeout err = ErrTimeout
} }
} }
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
runtime.KeepAlive(c)
f.wg.Done() f.wg.Done()
return int(r.bytes), err return int(r.bytes), err
}
} }
// Read reads from a file handle. // Read reads from a file handle.
@ -185,9 +202,15 @@ func (f *win32File) Read(b []byte) (int, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
if f.readDeadline.timedout.isSet() {
return 0, ErrTimeout
}
var bytes uint32 var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o) err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, f.readDeadline, bytes, err) n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions. // Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 { if err == nil && n == 0 && len(b) != 0 {
@ -205,17 +228,66 @@ func (f *win32File) Write(b []byte) (int, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
if f.writeDeadline.timedout.isSet() {
return 0, ErrTimeout
}
var bytes uint32 var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o) err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
return f.asyncIo(c, f.writeDeadline, bytes, err) n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
} }
func (f *win32File) SetReadDeadline(t time.Time) error { func (f *win32File) SetReadDeadline(deadline time.Time) error {
f.readDeadline = t return f.readDeadline.set(deadline)
return nil
} }
func (f *win32File) SetWriteDeadline(t time.Time) error { func (f *win32File) SetWriteDeadline(deadline time.Time) error {
f.writeDeadline = t return f.writeDeadline.set(deadline)
}
func (f *win32File) Flush() error {
return syscall.FlushFileBuffers(f.handle)
}
func (d *deadlineHandler) set(deadline time.Time) error {
d.setLock.Lock()
defer d.setLock.Unlock()
if d.timer != nil {
if !d.timer.Stop() {
<-d.channel
}
d.timer = nil
}
d.timedout.setFalse()
select {
case <-d.channel:
d.channelLock.Lock()
d.channel = make(chan struct{})
d.channelLock.Unlock()
default:
}
if deadline.IsZero() {
return nil
}
timeoutIO := func() {
d.timedout.setTrue()
close(d.channel)
}
now := time.Now()
duration := deadline.Sub(now)
if deadline.After(now) {
// Deadline is in the future, set a timer to wait
d.timer = time.AfterFunc(duration, timeoutIO)
} else {
// Deadline is in the past. Cancel all pending IO now.
timeoutIO()
}
return nil return nil
} }

View File

@ -4,6 +4,7 @@ package winio
import ( import (
"os" "os"
"runtime"
"syscall" "syscall"
"unsafe" "unsafe"
) )
@ -28,6 +29,7 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
} }
runtime.KeepAlive(f)
return bi, nil return bi, nil
} }
@ -36,6 +38,7 @@ func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
} }
runtime.KeepAlive(f)
return nil return nil
} }
@ -52,5 +55,6 @@ func GetFileID(f *os.File) (*FileIDInfo, error) {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
} }
runtime.KeepAlive(f)
return fileID, nil return fileID, nil
} }

View File

@ -18,10 +18,12 @@ import (
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW //sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys copyMemory(dst uintptr, src uintptr, length uint32) = RtlCopyMemory
type securityAttributes struct { type securityAttributes struct {
Length uint32 Length uint32
SecurityDescriptor *byte SecurityDescriptor uintptr
InheritHandle uint32 InheritHandle uint32
} }
@ -87,7 +89,11 @@ func (f *win32MessageBytePipe) CloseWrite() error {
if f.writeClosed { if f.writeClosed {
return errPipeWriteClosed return errPipeWriteClosed
} }
_, err := f.win32File.Write(nil) err := f.win32File.Flush()
if err != nil {
return err
}
_, err = f.win32File.Write(nil)
if err != nil { if err != nil {
return err return err
} }
@ -227,12 +233,15 @@ func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig,
mode |= cPIPE_TYPE_MESSAGE mode |= cPIPE_TYPE_MESSAGE
} }
var sa securityAttributes sa := &securityAttributes{}
sa.Length = uint32(unsafe.Sizeof(sa)) sa.Length = uint32(unsafe.Sizeof(*sa))
if securityDescriptor != nil { if securityDescriptor != nil {
sa.SecurityDescriptor = &securityDescriptor[0] len := uint32(len(securityDescriptor))
sa.SecurityDescriptor = localAlloc(0, len)
defer localFree(sa.SecurityDescriptor)
copyMemory(sa.SecurityDescriptor, uintptr(unsafe.Pointer(&securityDescriptor[0])), len)
} }
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, &sa) h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
if err != nil { if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err} return 0, &os.PathError{Op: "open", Path: path, Err: err}
} }
@ -359,7 +368,7 @@ func connectPipe(p *win32File) error {
return err return err
} }
err = connectNamedPipe(p.handle, &c.o) err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIo(c, time.Time{}, 0, err) _, err = p.asyncIo(c, nil, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED { if err != nil && err != cERROR_PIPE_CONNECTED {
return err return err
} }

View File

@ -11,6 +11,31 @@ import (
var _ unsafe.Pointer var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var ( var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modwinmm = windows.NewLazySystemDLL("winmm.dll") modwinmm = windows.NewLazySystemDLL("winmm.dll")
@ -27,6 +52,8 @@ var (
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procRtlCopyMemory = modkernel32.NewProc("RtlCopyMemory")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
@ -51,7 +78,7 @@ func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -64,7 +91,7 @@ func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintpt
newport = syscall.Handle(r0) newport = syscall.Handle(r0)
if newport == 0 { if newport == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -76,7 +103,7 @@ func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr,
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -88,7 +115,7 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -106,7 +133,7 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -128,7 +155,7 @@ func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances
handle = syscall.Handle(r0) handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle { if handle == syscall.InvalidHandle {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -150,7 +177,7 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttribute
handle = syscall.Handle(r0) handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle { if handle == syscall.InvalidHandle {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -171,7 +198,7 @@ func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -183,7 +210,7 @@ func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSiz
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -195,7 +222,7 @@ func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *u
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -203,6 +230,17 @@ func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *u
return return
} }
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func copyMemory(dst uintptr, src uintptr, length uint32) {
syscall.Syscall(procRtlCopyMemory.Addr(), 3, uintptr(dst), uintptr(src), uintptr(length))
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16 var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName) _p0, err = syscall.UTF16PtrFromString(accountName)
@ -216,7 +254,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -228,7 +266,7 @@ func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -249,7 +287,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -261,7 +299,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -284,7 +322,7 @@ func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte,
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -296,7 +334,7 @@ func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, si
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -315,7 +353,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
success = r0 != 0 success = r0 != 0
if true { if true {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -327,7 +365,7 @@ func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -339,7 +377,7 @@ func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -357,7 +395,7 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool,
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -389,7 +427,7 @@ func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -410,7 +448,7 @@ func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -431,7 +469,7 @@ func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint1
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -459,7 +497,7 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }
@ -487,7 +525,7 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 { if r1 == 0 {
if e1 != 0 { if e1 != 0 {
err = error(e1) err = errnoErr(e1)
} else { } else {
err = syscall.EINVAL err = syscall.EINVAL
} }

872
vendor/github.com/hashicorp/consul/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,872 @@
## 0.8.1 (April 17, 2017)
FEATURES:
IMPROVEMENTS:
* agent: Node IDs derived from host information are now hashed to prevent things like common server hardware from generating IDs with a common prefix across nodes. [GH-2884]
* agent: Added new `-disable-host-node-id` CLI flag and `disable_host_node_id` config option to the Consul agent to prevent it from using information from the host when generating a node ID. This will result in a random node ID, which is useful when running multiple Consul agents on the same host for testing purposes. Having this built-in eases configuring a random node ID when running in containers. [GH-2877]
* agent: Removed useless "==> Caught signal: broken pipe" logging since that often results from problems sending telemetry or broken incoming client connections; operators don't need to be alerted to these. [GH-2768]
* cli: Added TLS options for setting the client/CA certificates to use when communicating with Consul. These can be provided through environment variables or command line flags. [GH-2914]
* build: Consul is now built with Go 1.8.1. [GH-2888]
* ui: Updates Consul assets to new branding. [GH-2898]
BUG FIXES:
* api: Added missing Raft index fields to AgentService and Node structures. [GH-2882]
* server: Fixed an issue where flood joins would not work with IPv6 addresses. [GH-2878]
* server: Fixed an issue where electing a 0.8.x leader during an upgrade would cause a panic in older servers. [GH-2889]
* server: Fixed an issue where tracking of leadership changes could become incorrect when changes occurred very rapidly. This could manifest as a panic in Autopilot, but could have caused other issues with multiple leader management routines running simultaneously. [GH-2896]
* server: Fixed a panic when checking ACLs on a session that doesn't exist. [GH-2624]
## 0.8.0 (April 5, 2017)
BREAKING CHANGES:
* **Command-Line Interface RPC Deprecation:** The RPC client interface has been removed. All CLI commands that used RPC and the `-rpc-addr` flag to communicate with Consul have been converted to use the HTTP API and the appropriate flags for it, and the `rpc` field has been removed from the port and address binding configs. You will need to remove these fields from your config files and update any scripts that passed a custom `-rpc-addr` to the following commands: `force-leave`, `info`, `join`, `keyring`, `leave`, `members`, `monitor`, `reload`
* **Version 8 ACLs Are Now Opt-Out:** The [`acl_enforce_version_8`](https://www.consul.io/docs/agent/options.html#acl_enforce_version_8) configuration now defaults to `true` to enable [full version 8 ACL support](https://www.consul.io/docs/internals/acl.html#version_8_acls) by default. If you are upgrading an existing cluster with ACLs enabled, you will need to set this to `false` during the upgrade on **both Consul agents and Consul servers**. Version 8 ACLs were also changed so that [`acl_datacenter`](https://www.consul.io/docs/agent/options.html#acl_datacenter) must be set on agents in order to enable the agent-side enforcement of ACLs. This makes for a smoother experience in clusters where ACLs aren't enabled at all, but where the agents would have to wait to contact a Consul server before learning that. [GH-2844]
* **Remote Exec Is Now Opt-In:** The default for [`disable_remote_exec`](https://www.consul.io/docs/agent/options.html#disable_remote_exec) was changed to "true", so now operators need to opt-in to having agents support running commands remotely via [`consul exec`](/docs/commands/exec.html). [GH-2854]
* **Raft Protocol Compatibility:** When upgrading to Consul 0.8.0 from a version lower than 0.7.0, users will need to
set the [`-raft-protocol`](https://www.consul.io/docs/agent/options.html#_raft_protocol) option to 1 in order to maintain backwards compatibility with the old servers during the upgrade. See [Upgrading Specific Versions](https://www.consul.io/docs/upgrade-specific.html) guide for more details.
FEATURES:
* **Autopilot:** A set of features has been added to allow for automatic operator-friendly management of Consul servers. For more information about Autopilot, see the [Autopilot Guide](https://www.consul.io/docs/guides/autopilot.html).
- **Dead Server Cleanup:** Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections.
- **Server Health Checking:** An internal health check has been added to track the stability of servers. The thresholds of this health check are tunable as part of the [Autopilot configuration](https://www.consul.io/docs/agent/options.html#autopilot) and the status can be viewed through the [`/v1/operator/autopilot/health`](https://www.consul.io/docs/agent/http/operator.html#autopilot-health) HTTP endpoint.
- **New Server Stabilization:** When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. This threshold can be configured using the new [`server_stabilization_time`](https://www.consul.io/docs/agent/options.html#server_stabilization_time) setting.
- **Advanced Redundancy:** (Consul Enterprise) A new [`-non-voting-server`](https://www.consul.io/docs/agent/options.html#_non_voting_server) option flag has been added for Consul servers to configure a server that does not participate in the Raft quorum. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed, but non-voting servers can be lost without causing an outage. There's also a new [`redundancy_zone_tag`](https://www.consul.io/docs/agent/options.html#redundancy_zone_tag) configuration that allows Autopilot to manage separating servers into zones for redundancy. Only one server in each zone can be a voting member at one time. This helps when Consul servers are managed with automatic replacement with a system like a resource scheduler or auto-scaling group. Extra non-voting servers in each zone will be available as hot standbys (that help with read-scaling) that can be quickly promoted into service when the voting server in a zone fails.
- **Upgrade Orchestration:** (Consul Enterprise) Autopilot will automatically orchestrate an upgrade strategy for Consul servers where it will initially add newer versions of Consul servers as non-voters, wait for a full set of newer versioned servers to be added, and then gradually swap into service as voters and swap out older versioned servers to non-voters. This allows operators to safely bring up new servers, wait for the upgrade to be complete, and then terminate the old servers.
* **Network Areas:** (Consul Enterprise) A new capability has been added which allows operators to define network areas that join together two Consul datacenters. Unlike Consul's WAN feature, network areas use just the server RPC port for communication, and pairwise relationships can be made between arbitrary datacenters, so not all servers need to be fully connected. This allows for complex topologies among Consul datacenters like hub/spoke and more general trees. See the [Network Areas Guide](https://www.consul.io/docs/guides/areas.html) for more details.
* **WAN Soft Fail:** Request routing between servers in the WAN is now more robust by treating Serf failures as advisory but not final. This means that if there are issues between some subset of the servers in the WAN, Consul will still be able to route RPC requests as long as RPCs are actually still working. Prior to WAN Soft Fail, any datacenters having connectivity problems on the WAN would mean that all DCs might potentially stop sending RPCs to those datacenters. [GH-2801]
* **WAN Join Flooding:** A new routine was added that looks for Consul servers in the LAN and makes sure that they are joined into the WAN as well. This catches up up newly-added servers onto the WAN as soon as they join the LAN, keeping them in sync automatically. [GH-2801]
* **Validate command:** To provide consistency across our products, the `configtest` command has been deprecated and replaced with the `validate` command (to match Nomad and Terraform). The `configtest` command will be removed in Consul 0.9. [GH-2732]
IMPROVEMENTS:
* agent: Fixed a missing case where gossip would stop flowing to dead nodes for a short while. [GH-2722]
* agent: Changed agent to seed Go's random number generator. [GH-2722]
* agent: Serf snapshots no longer have the executable bit set on the file. [GH-2722]
* agent: Consul is now built with Go 1.8. [GH-2752]
* agent: Updated aws-sdk-go version (used for EC2 auto join) for Go 1.8 compatibility. [GH-2755]
* agent: User-supplied node IDs are now normalized to lower-case. [GH-2798]
* agent: Added checks to enforce uniqueness of agent node IDs at cluster join time and when registering with the catalog. [GH-2832]
* cli: Standardized handling of CLI options for connecting to the Consul agent. This makes sure that the same set of flags and environment variables works in all CLI commands (see https://www.consul.io/docs/commands/index.html#environment-variables). [GH-2717]
* cli: Updated go-cleanhttp library for better HTTP connection handling between CLI commands and the Consul agent (tunes reuse settings). [GH-2735]
* cli: The `operator raft` subcommand has had its two modes split into the `list-peers` and `remove-peer` subcommands. The old flags for these will continue to work for backwards compatibility, but will be removed in Consul 0.9.
* cli: Added an `-id` flag to the `operator raft remove-peer` command to allow removing a peer by ID. [GH-2847]
* dns: Allows the `.service` tag to be optional in RFC 2782 lookups. [GH-2690]
* server: Changed the internal `EnsureRegistration` RPC endpoint to prevent registering checks that aren't associated with the top-level node being registered. [GH-2846]
BUG FIXES:
* agent: Fixed an issue with `consul watch` not working when http was listening on a unix socket. [GH-2385]
* agent: Fixed an issue where checks and services could not sync deregister operations back to the catalog when version 8 ACL support is enabled. [GH-2818]
* agent: Fixed an issue where agents could use the ACL token registered with a service when registering checks for the same service that were registered with a different ACL token. [GH-2829]
* cli: Fixed `consul kv` commands not reading the `CONSUL_HTTP_TOKEN` environment variable. [GH-2566]
* cli: Fixed an issue where prefixing an address with a protocol (such as 'http://' or 'https://') in `-http-addr` or `CONSUL_HTTP_ADDR` would give an error.
* cli: Fixed an issue where error messages would get printed to stdout instead of stderr. [GH-2548]
* server: Fixed an issue with version 8 ACLs where servers couldn't deregister nodes from the catalog during reconciliation. [GH-2792] This fix was generalized and applied to registering nodes as well. [GH-2826]
* server: Fixed an issue where servers could temporarily roll back changes to a node's metadata or tagged addresses when making updates to the node's health checks. [GH-2826]
* server: Fixed an issue where the service name `consul` was not subject to service ACL policies with version 8 ACLs enabled. [GH-2816]
## 0.7.5 (February 15, 2017)
BUG FIXES:
* server: Fixed a rare but serious issue where Consul servers could panic when performing a large delete operation followed by a specific sequence of other updates to related parts of the state store (affects KV, sessions, prepared queries, and the catalog). [GH-2724]
## 0.7.4 (February 6, 2017)
IMPROVEMENTS:
* agent: Integrated gopsutil library to use built in host UUID as node ID, if available, instead of a randomly generated UUID. This makes it easier for other applications on the same host to generate the same node ID without coordinating with Consul. [GH-2697]
* agent: Added a configuration option, `tls_min_version`, for setting the minimum allowed TLS version used for the HTTP API and RPC. [GH-2699]
* agent: Added a `relay-factor` option to keyring operations to allow nodes to relay their response through N randomly-chosen other nodes in the cluster. [GH-2704]
* build: Consul is now built with Go 1.7.5. [GH-2682]
* dns: Add ability to lookup Consul agents by either their Node ID or Node Name through the node interface (e.g. DNS `(node-id|node-name).node.consul`). [GH-2702]
BUG FIXES:
* dns: Fixed an issue where SRV lookups for services on a node registered with non-IP addresses were missing the CNAME record in the additional section of the response. [GH-2695]
## 0.7.3 (January 26, 2017)
FEATURES:
* **KV Import/Export CLI:** `consul kv export` and `consul kv import` can be used to move parts of the KV tree between disconnected consul clusters, using JSON as the intermediate representation. [GH-2633]
* **Node Metadata:** Support for assigning user-defined metadata key/value pairs to nodes has been added. This can be viewed when looking up node info, and can be used to filter the results of various catalog and health endpoints. For more information, see the [Catalog](https://www.consul.io/docs/agent/http/catalog.html), [Health](https://www.consul.io/docs/agent/http/health.html), and [Prepared Query](https://www.consul.io/docs/agent/http/query.html) endpoint documentation, as well as the [Node Meta](https://www.consul.io/docs/agent/options.html#_node_meta) section of the agent configuration. [GH-2654]
* **Node Identifiers:** Consul agents can now be configured with a unique identifier, or they will generate one at startup that will persist across agent restarts. This identifier is designed to represent a node across all time, even if the name or address of the node changes. Identifiers are currently only exposed in node-related endpoints, but they will be used in future versions of Consul to help manage Consul servers and the Raft quorum in a more robust manner, as the quorum is currently tracked via addresses, which can change. [GH-2661]
* **Improved Blocking Queries:** Consul's [blocking query](https://www.consul.io/api/index.html#blocking-queries) implementation was improved to provide a much more fine-grained mechanism for detecting changes. For example, in previous versions of Consul blocking to wait on a change to a specific service would result in a wake up if any service changed. Now, wake ups are scoped to the specific service being watched, if possible. This support has been added to all endpoints that support blocking queries, nothing new is required to take advantage of this feature. [GH-2671]
* **GCE auto-discovery:** New `-retry-join-gce` configuration options added to allow bootstrapping by automatically discovering Google Cloud instances with a given tag at startup. [GH-2570]
IMPROVEMENTS:
* build: Consul is now built with Go 1.7.4. [GH-2676]
* cli: `consul kv get` now has a `-base64` flag to base 64 encode the value. [GH-2631]
* cli: `consul kv put` now has a `-base64` flag for setting values which are base 64 encoded. [GH-2632]
* ui: Added a notice that JS is required when viewing the web UI with JS disabled. [GH-2636]
BUG FIXES:
* agent: Redacted the AWS access key and secret key ID from the /v1/agent/self output so they are not disclosed. [GH-2677]
* agent: Fixed a rare startup panic due to a Raft/Serf race condition. [GH-1899]
* cli: Fixed a panic when an empty quoted argument was given to `consul kv put`. [GH-2635]
* tests: Fixed a race condition with check mock's map usage. [GH-2578]
## 0.7.2 (December 19, 2016)
FEATURES:
* **Keyring API:** A new `/v1/operator/keyring` HTTP endpoint was added that allows for performing operations such as list, install, use, and remove on the encryption keys in the gossip keyring. See the [Keyring Endpoint](https://www.consul.io/docs/agent/http/operator.html#keyring) for more details. [GH-2509]
* **Monitor API:** A new `/v1/agent/monitor` HTTP endpoint was added to allow for viewing streaming log output from the agent, similar to the `consul monitor` command. See the [Monitor Endpoint](https://www.consul.io/docs/agent/http/agent.html#agent_monitor) for more details. [GH-2511]
* **Reload API:** A new `/v1/agent/reload` HTTP endpoint was added for triggering a reload of the agent's configuration. See the [Reload Endpoint](https://www.consul.io/docs/agent/http/agent.html#agent_reload) for more details. [GH-2516]
* **Leave API:** A new `/v1/agent/leave` HTTP endpoint was added for causing an agent to gracefully shutdown and leave the cluster (previously, only `force-leave` was present in the HTTP API). See the [Leave Endpoint](https://www.consul.io/docs/agent/http/agent.html#agent_leave) for more details. [GH-2516]
* **Bind Address Templates (beta):** Consul agents now allow [go-sockaddr/template](https://godoc.org/github.com/hashicorp/go-sockaddr/template) syntax to be used for any bind address configuration (`advertise_addr`, `bind_addr`, `client_addr`, and others). This allows for easy creation of immutable images for Consul that can fetch their own address based on an interface name, network CIDR, address family from an actual RFC number, and many other possible schemes. This feature is in beta and we may tweak the template syntax before final release, but we encourage the community to try this and provide feedback. [GH-2563]
* **Complete ACL Coverage (beta):** Consul 0.8 will feature complete ACL coverage for all of Consul. To ease the transition to the new policies, a beta version of complete ACL support was added to help with testing and migration to the new features. Please see the [ACLs Internals Guide](https://www.consul.io/docs/internals/acl.html#version_8_acls) for more details. [GH-2594, GH-2592, GH-2590]
IMPROVEMENTS:
* agent: Defaults to `?pretty` JSON for HTTP API requests when in `-dev` mode. [GH-2518]
* agent: Updated Circonus metrics library and added new Circonus configration options for Consul for customizing check display name and tags. [GH-2555]
* agent: Added a checksum to UDP gossip messages to guard against packet corruption. [GH-2574]
* agent: Check whether a snapshot needs to be taken more often (every 5 seconds instead of 2 minutes) to keep the raft file smaller and to avoid doing huge truncations when writing lots of entries very quickly. [GH-2591]
* agent: Allow gossiping to suspected/recently dead nodes. [GH-2593]
* agent: Changed the gossip suspicion timeout to grow smoothly as the number of nodes grows. [GH-2593]
* agent: Added a deprecation notice for Atlas features to the CLI and docs. [GH-2597]
* agent: Give a better error message when the given data-dir is not a directory. [GH-2529]
BUG FIXES:
* agent: Fixed a panic when SIGPIPE signal was received. [GH-2404]
* api: Added missing Raft index fields to `CatalogService` structure. [GH-2366]
* api: Added missing notes field to `AgentServiceCheck` structure. [GH-2336]
* api: Changed type of `AgentServiceCheck.TLSSkipVerify` from `string` to `bool`. [GH-2530]
* api: Added new `HealthChecks.AggregatedStatus()` method that makes it easy get an overall health status from a list of checks. [GH-2544]
* api: Changed type of `KVTxnOp.Verb` from `string` to `KVOp`. [GH-2531]
* cli: Fixed an issue with the `consul kv put` command where a negative value would be interpreted as an argument to read from standard input. [GH-2526]
* ui: Fixed an issue where extra commas would be shown around service tags. [GH-2340]
* ui: Customized Bootstrap config to avoid missing font file references. [GH-2485]
* ui: Removed "Deregister" button as removing nodes from the catalog isn't a common operation and leads to lots of user confusion. [GH-2541]
## 0.7.1 (November 10, 2016)
BREAKING CHANGES:
* Child process reaping support has been removed, along with the `reap` configuration option. Reaping is also done via [dumb-init](https://github.com/Yelp/dumb-init) in the [Consul Docker image](https://github.com/hashicorp/docker-consul), so removing it from Consul itself simplifies the code and eases future maintainence for Consul. If you are running Consul as PID 1 in a container you will need to arrange for a wrapper process to reap child processes. [GH-1988]
* The default for `max_stale` has been increased to a near-indefinite threshold (10 years) to allow DNS queries to continue to be served in the event of a long outage with no leader. A new telemetry counter has also been added at `consul.dns.stale_queries` to track when agents serve DNS queries that are over a certain staleness (>5 seconds). [GH-2481]
* The api package's `PreparedQuery.Delete()` method now takes `WriteOptions` instead of `QueryOptions`. [GH-2417]
FEATURES:
* **Key/Value Store Command Line Interface:** New `consul kv` commands were added for easy access to all basic key/value store operations. [GH-2360]
* **Snapshot/Restore:** A new /v1/snapshot HTTP endpoint and corresponding set of `consul snapshot` commands were added for easy point-in-time snapshots for disaster recovery. Snapshots include all state managed by Consul's Raft [consensus protocol](/docs/internals/consensus.html), including Key/Value Entries, Service Catalog, Prepared Queries, Sessions, and ACLs. Snapshots can be restored on the fly into a completely fresh cluster. [GH-2396]
* **AWS auto-discovery:** New `-retry-join-ec2` configuration options added to allow bootstrapping by automatically discovering AWS instances with a given tag key/value at startup. [GH-2459]
IMPROVEMENTS:
* api: All session options can now be set when using `api.Lock()`. [GH-2372]
* agent: Added the ability to bind Serf WAN and LAN to different interfaces than the general bind address. [GH-2007]
* agent: Added a new `tls_skip_verify` configuration option for HTTP checks. [GH-1984]
* build: Consul is now built with Go 1.7.3. [GH-2281]
BUG FIXES:
* agent: Fixed a Go race issue with log buffering at startup. [GH-2262]
* agent: Fixed a panic during anti-entropy sync for services and checks. [GH-2125]
* agent: Fixed an issue on Windows where "wsarecv" errors were logged when CLI commands accessed the RPC interface. [GH-2356]
* agent: Syslog initialization will now retry on errors for up to 60 seconds to avoid a race condition at system startup. [GH-1610]
* agent: Fixed a panic when both -dev and -bootstrap-expect flags were provided. [GH-2464]
* agent: Added a retry with backoff when a session fails to invalidate after expiring. [GH-2435]
* agent: Fixed an issue where Consul would fail to start because of leftover malformed check/service state files. [GH-1221]
* agent: Fixed agent crashes on macOS Sierra by upgrading Go. [GH-2407, GH-2281]
* agent: Log a warning instead of success when attempting to deregister a nonexistent service. [GH-2492]
* api: Trim leading slashes from keys/prefixes when querying KV endpoints to avoid a bug with redirects in Go 1.7 (golang/go#4800). [GH-2403]
* dns: Fixed external services that pointed to consul addresses (CNAME records) not resolving to A-records. [GH-1228]
* dns: Fixed an issue with SRV lookups where the service address was different from the node's. [GH-832]
* dns: Fixed an issue where truncated records from a recursor query were improperly reported as errors. [GH-2384]
* server: Fixed the port numbers in the sample JSON inside peers.info. [GH-2391]
* server: Squashes ACL datacenter name to lower case and checks for proper formatting at startup. [GH-2059, GH-1778, GH-2478]
* ui: Fixed an XSS issue with the display of sessions and ACLs in the web UI. [GH-2456]
## 0.7.0 (September 14, 2016)
BREAKING CHANGES:
* The default behavior of `leave_on_terminate` and `skip_leave_on_interrupt` are now dependent on whether or not the agent is acting as a server or client. When Consul is started as a server the defaults for these are `false` and `true`, respectively, which means that you have to explicitly configure a server to leave the cluster. When Consul is started as a client the defaults are the opposite, which means by default, clients will leave the cluster if shutdown or interrupted. [GH-1909] [GH-2320]
* The `allow_stale` configuration for DNS queries to the Consul agent now defaults to `true`, allowing for better utilization of available Consul servers and higher throughput at the expense of weaker consistency. This is almost always an acceptable tradeoff for DNS queries, but this can be reconfigured to use the old default behavior if desired. [GH-2315]
* Output from HTTP checks is truncated to 4k when stored on the servers, similar to script check output. [GH-1952]
* Consul's Go API client will now send ACL tokens using HTTP headers instead of query parameters, requiring Consul 0.6.0 or later. [GH-2233]
* Removed support for protocol version 1, so Consul 0.7 is no longer compatible with Consul versions prior to 0.3. [GH-2259]
* The Raft peers information in `consul info` has changed format and includes information about the suffrage of a server, which will be used in future versions of Consul. [GH-2222]
* New [`translate_wan_addrs`](https://www.consul.io/docs/agent/options.html#translate_wan_addrs) behavior from [GH-2118] translates addresses in HTTP responses and could break clients that are expecting local addresses. A new `X-Consul-Translate-Addresses` header was added to allow clients to detect if translation is enabled for HTTP responses, and a "lan" tag was added to `TaggedAddresses` for clients that need the local address regardless of translation. [GH-2280]
* The behavior of the `peers.json` file is different in this version of Consul. This file won't normally be present and is used only during outage recovery. Be sure to read the updated [Outage Recovery Guide](https://www.consul.io/docs/guides/outage.html) for details. [GH-2222]
* Consul's default Raft timing is now set to work more reliably on lower-performance servers, which allows small clusters to use lower cost compute at the expense of reduced performance for failed leader detection and leader elections. You will need to configure Consul to get the same performance as before. See the new [Server Performance](https://www.consul.io/docs/guides/performance.html) guide for more details. [GH-2303]
FEATURES:
* **Transactional Key/Value API:** A new `/v1/txn` API was added that allows for atomic updates to and fetches from multiple entries in the key/value store inside of an atomic transaction. This includes conditional updates based on obtaining locks, and all other key/value store operations. See the [Key/Value Store Endpoint](https://www.consul.io/docs/agent/http/kv.html#txn) for more details. [GH-2028]
* **Native ACL Replication:** Added a built-in full replication capability for ACLs. Non-ACL datacenters can now replicate the complete ACL set locally to their state store and fall back to that if there's an outage. Additionally, this provides a good way to make a backup ACL datacenter, or to migrate the ACL datacenter to a different one. See the [ACL Internals Guide](https://www.consul.io/docs/internals/acl.html#replication) for more details. [GH-2237]
* **Server Connection Rebalancing:** Consul agents will now periodically reconnect to available Consul servers in order to redistribute their RPC query load. Consul clients will, by default, attempt to establish a new connection every 120s to 180s unless the size of the cluster is sufficiently large. The rate at which agents begin to query new servers is proportional to the size of the Consul cluster (servers should never receive more than 64 new connections per second per Consul server as a result of rebalancing). Clusters in stable environments who use `allow_stale` should see a more even distribution of query load across all of their Consul servers. [GH-1743]
* **Raft Updates and Consul Operator Interface:** This version of Consul upgrades to "stage one" of the v2 HashiCorp Raft library. This version offers improved handling of cluster membership changes and recovery after a loss of quorum. This version also provides a foundation for new features that will appear in future Consul versions once the remainder of the v2 library is complete. [GH-2222] <br> Consul's default Raft timing is now set to work more reliably on lower-performance servers, which allows small clusters to use lower cost compute at the expense of reduced performance for failed leader detection and leader elections. You will need to configure Consul to get the same performance as before. See the new [Server Performance](https://www.consul.io/docs/guides/performance.html) guide for more details. [GH-2303] <br> Servers will now abort bootstrapping if they detect an existing cluster with configured Raft peers. This will help prevent safe but spurious leader elections when introducing new nodes with `bootstrap_expect` enabled into an existing cluster. [GH-2319] <br> Added new `consul operator` command, HTTP endpoint, and associated ACL to allow Consul operators to view and update the Raft configuration. This allows a stale server to be removed from the Raft peers without requiring downtime and peers.json recovery file use. See the new [Consul Operator Command](https://www.consul.io/docs/commands/operator.html) and the [Consul Operator Endpoint](https://www.consul.io/docs/agent/http/operator.html) for details, as well as the updated [Outage Recovery Guide](https://www.consul.io/docs/guides/outage.html). [GH-2312]
* **Serf Lifeguard Updates:** Implemented a new set of feedback controls for the gossip layer that help prevent degraded nodes that can't meet the soft real-time requirements from erroneously causing `serfHealth` flapping in other, healthy nodes. This feature tunes itself automatically and requires no configuration. [GH-2101]
* **Prepared Query Near Parameter:** Prepared queries support baking in a new `Near` sorting parameter. This allows results to be sorted by network round trip time based on a static node, or based on the round trip time from the Consul agent where the request originated. This can be used to find a co-located service instance is one is available, with a transparent fallback to the next best alternate instance otherwise. [GH-2137]
* **Automatic Service Deregistration:** Added a new `deregister_critical_service_after` timeout field for health checks which will cause the service associated with that check to get deregistered if the check is critical for longer than the timeout. This is useful for cleanup of health checks registered natively by applications, or in other situations where services may not always be cleanly shutdown. [GH-679]
* **WAN Address Translation Everywhere:** Extended the [`translate_wan_addrs`](https://www.consul.io/docs/agent/options.html#translate_wan_addrs) config option to also translate node addresses in HTTP responses, making it easy to use this feature from non-DNS clients. [GH-2118]
* **RPC Retries:** Consul will now retry RPC calls that result in "no leader" errors for up to 5 seconds. This allows agents to ride out leader elections with a delayed response vs. an error. [GH-2175]
* **Circonus Telemetry Support:** Added support for Circonus as a telemetry destination. [GH-2193]
IMPROVEMENTS:
* agent: Reap time for failed nodes is now configurable via new `reconnect_timeout` and `reconnect_timeout_wan` config options ([use with caution](https://www.consul.io/docs/agent/options.html#reconnect_timeout)). [GH-1935]
* agent: Joins based on a DNS lookup will use TCP and attempt to join with the full list of returned addresses. [GH-2101]
* agent: Consul will now refuse to start with a helpful message if the same UNIX socket is used for more than one listening endpoint. [GH-1910]
* agent: Removed an obsolete warning message when Consul starts on Windows. [GH-1920]
* agent: Defaults bind address to 127.0.0.1 when running in `-dev` mode. [GH-1878]
* agent: Added version information to the log when Consul starts up. [GH-1404]
* agent: Added timing metrics for HTTP requests in the form of `consul.http.<verb>.<path>`. [GH-2256]
* build: Updated all vendored dependencies. [GH-2258]
* build: Consul releases are now built with Go 1.6.3. [GH-2260]
* checks: Script checks now support an optional `timeout` parameter. [GH-1762]
* checks: HTTP health checks limit saved output to 4K to avoid performance issues. [GH-1952]
* cli: Added a `-stale` mode for watchers to allow them to pull data from any Consul server, not just the leader. [GH-2045] [GH-917]
* dns: Consul agents can now limit the number of UDP answers returned via the DNS interface. The default number of UDP answers is `3`, however by adjusting the `dns_config.udp_answer_limit` configuration parameter, it is now possible to limit the results down to `1`. This tunable provides environments where RFC3484 section 6, rule 9 is enforced with an important workaround in order to preserve the desired behavior of randomized DNS results. Most modern environments will not need to adjust this setting as this RFC was made obsolete by RFC 6724\. See the [agent options](https://www.consul.io/docs/agent/options.html#udp_answer_limit) documentation for additional details for when this should be used. [GH-1712]
* dns: Consul now compresses all DNS responses by default. This prevents issues when recursing records that were originally compressed, where Consul would sometimes generate an invalid, uncompressed response that was too large. [GH-2266]
* dns: Added a new `recursor_timeout` configuration option to set the timeout for Consul's internal DNS client that's used for recursing queries to upstream DNS servers. [GH-2321]
* dns: Added a new `-dns-port` command line option so this can be set without a config file. [GH-2263]
* ui: Added a new network tomography visualization to the UI. [GH-2046]
BUG FIXES:
* agent: Fixed an issue where a health check's output never updates if the check status doesn't change after the Consul agent starts. [GH-1934]
* agent: External services can now be registered with ACL tokens. [GH-1738]
* agent: Fixed an issue where large events affecting many nodes could cause infinite intent rebroadcasts, leading to many log messages about intent queue overflows. [GH-1062]
* agent: Gossip encryption keys are now validated before being made persistent in the keyring, avoiding delayed feedback at runtime. [GH-1299]
* dns: Fixed an issue where DNS requests for SRV records could be incorrectly trimmed, resulting in an ADDITIONAL section that was out of sync with the ANSWER. [GH-1931]
* dns: Fixed two issues where DNS requests for SRV records on a prepared query that failed over would report the wrong domain and fail to translate addresses. [GH-2218] [GH-2220]
* server: Fixed a deadlock related to sorting the list of available datacenters by round trip time. [GH-2130]
* server: Fixed an issue with the state store's immutable radix tree that would prevent it from using cached modified objects during transactions, leading to extra copies and increased memory / GC pressure. [GH-2106]
* server: Upgraded Bolt DB to v1.2.1 to fix an issue on Windows where Consul would sometimes fail to start due to open user-mapped sections. [GH-2203]
OTHER CHANGES:
* build: Switched from Godep to govendor. [GH-2252]
## 0.6.4 (March 16, 2016)
BACKWARDS INCOMPATIBILITIES:
* Added a new `query` ACL type to manage prepared query names, and stopped capturing
ACL tokens by default when prepared queries are created. This won't affect existing
queries and how they are executed, but this will affect how they are managed. Now
management of prepared queries can be delegated within an organization. If you use
prepared queries, you'll need to read the
[Consul 0.6.4 upgrade instructions](https://www.consul.io/docs/upgrade-specific.html)
before upgrading to this version of Consul. [GH-1748]
* Consul's Go API client now pools connections by default, and requires you to manually
opt-out of this behavior. Previously, idle connections were supported and their
lifetime was managed by a finalizer, but this wasn't reliable in certain situations.
If you reuse an API client object during the lifetime of your application, then there's
nothing to do. If you have short-lived API client objects, you may need to configure them
using the new `api.DefaultNonPooledConfig()` method to avoid leaking idle connections. [GH-1825]
* Consul's Go API client's `agent.UpdateTTL()` function was updated in a way that will
only work with Consul 0.6.4 and later. The `agent.PassTTL()`, `agent.WarnTTL()`, and
`agent.FailTTL()` functions were not affected and will continue work with older
versions of Consul. [GH-1794]
FEATURES:
* Added new template prepared queries which allow you to define a prefix (possibly even
an empty prefix) to apply prepared query features like datacenter failover to multiple
services with a single query definition. This makes it easy to apply a common policy to
multiple services without having to manage many prepared queries. See
[Prepared Query Templates](https://www.consul.io/docs/agent/http/query.html#templates)
for more details. [GH-1764]
* Added a new ability to translate address lookups when doing queries of nodes in
remote datacenters via DNS using a new `translate_wan_addrs` configuration
option. This allows the node to be reached within its own datacenter using its
local address, and reached from other datacenters using its WAN address, which is
useful in hybrid setups with mixed networks. [GH-1698]
IMPROVEMENTS:
* Added a new `disable_hostname` configuration option to control whether Consul's
runtime telemetry gets prepended with the host name. All of the telemetry
configuration has also been moved to a `telemetry` nested structure, but the old
format is currently still supported. [GH-1284]
* Consul's Go dependencies are now vendored using Godep. [GH-1714]
* Added support for `EnableTagOverride` for the catalog in the Go API client. [GH-1726]
* Consul now ships built from Go 1.6. [GH-1735]
* Added a new `/v1/agent/check/update/<check id>` API for updating TTL checks which
makes it easier to send large check output as part of a PUT body and not a query
parameter. [GH-1785].
* Added a default set of `Accept` headers for HTTP checks. [GH-1819]
* Added support for RHEL7/Systemd in Terraform example. [GH-1629]
BUG FIXES:
* Updated the internal web UI (`-ui` option) to latest released build, fixing
an ACL-related issue and the broken settings icon. [GH-1619]
* Fixed an issue where blocking KV reads could miss updates and return stale data
when another key whose name is a prefix of the watched key was updated. [GH-1632]
* Fixed the redirect from `/` to `/ui` when the internal web UI (`-ui` option) is
enabled. [GH-1713]
* Updated memberlist to pull in a fix for leaking goroutines when performing TCP
fallback pings. This affected users with frequent UDP connectivity problems. [GH-1802]
* Added a fix to trim UDP DNS responses so they don't exceed 512 bytes. [GH-1813]
* Updated go-dockerclient to fix Docker health checks with Docker 1.10. [GH-1706]
* Removed fixed height display of nodes and services in UI, leading to broken displays
when a node has a lot of services. [GH-2055]
## 0.6.3 (January 15, 2016)
BUG FIXES:
* Fixed an issue when running Consul as PID 1 in a Docker container where
it could consume CPU and show spurious failures for health checks, watch
handlers, and `consul exec` commands [GH-1592]
## 0.6.2 (January 13, 2016)
SECURITY:
* Build against Go 1.5.3 to mitigate a security vulnerability introduced
in Go 1.5. For more information, please see https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4
This is a security-only release; other than the version number and building
against Go 1.5.3, there are no changes from 0.6.1.
## 0.6.1 (January 6, 2016)
BACKWARDS INCOMPATIBILITIES:
* The new `-monitor-retry` option to `consul lock` defaults to 3. This
will cause the lock monitor to retry up to 3 times, waiting 1s between
each attempt if it gets a 500 error from the Consul servers. For the
vast majority of use cases this is desirable to prevent the lock from
being given up during a brief period of Consul unavailability. If you
want to get the previous default behavior you will need to set the
`-monitor-retry=0` option.
IMPROVEMENTS:
* Consul is now built with Go 1.5.2
* Added source IP address and port information to RPC-related log error
messages and HTTP access logs [GH-1513] [GH-1448]
* API clients configured for insecure SSL now use an HTTP transport that's
set up the same way as the Go default transport [GH-1526]
* Added new per-host telemetry on DNS requests [GH-1537]
* Added support for reaping child processes which is useful when running
Consul as PID 1 in Docker containers [GH-1539]
* Added new `-ui` command line and `ui` config option that enables a built-in
Consul web UI, making deployment much simpler [GH-1543]
* Added new `-dev` command line option that creates a completely in-memory
standalone Consul server for development
* Added a Solaris build, now that dependencies have been updated to support
it [GH-1568]
* Added new `-try` option to `consul lock` to allow it to timeout with an error
if it doesn't acquire the lock [GH-1567]
* Added a new `-monitor-retry` option to `consul lock` to help ride out brief
periods of Consul unavailabily without causing the lock to be given up [GH-1567]
BUG FIXES:
* Fixed broken settings icon in web UI [GH-1469]
* Fixed a web UI bug where the supplied token wasn't being passed into
the internal endpoint, breaking some pages when multiple datacenters
were present [GH-1071]
## 0.6.0 (December 3, 2015)
BACKWARDS INCOMPATIBILITIES:
* A KV lock acquisition operation will now allow the lock holder to
update the key's contents without giving up the lock by doing another
PUT with `?acquire=<session>` and providing the same session that
is holding the lock. Previously, this operation would fail.
FEATURES:
* Service ACLs now apply to service discovery [GH-1024]
* Added event ACLs to guard firing user events [GH-1046]
* Added keyring ACLs for gossip encryption keyring operations [GH-1090]
* Added a new TCP check type that does a connect as a check [GH-1130]
* Added new "tag override" feature that lets catalog updates to a
service's tags flow down to agents [GH-1187]
* Ported in-memory database from LMDB to an immutable radix tree to improve
read throughput, reduce garbage collection pressure, and make Consul 100%
pure Go [GH-1291]
* Added support for sending telemetry to DogStatsD [GH-1293]
* Added new network tomography subsystem that estimates the network
round trip times between nodes and exposes that in raw APIs, as well
as in existing APIs (find the service node nearest node X); also
includes a new `consul rtt` command to query interactively [GH-1331]
* Consul now builds under Go 1.5.1 by default [GH-1345]
* Added built-in support for running health checks inside Docker containers
[GH-1343]
* Added prepared queries which support service health queries with rich
features such as filters for multiple tags and failover to remote datacenters
based on network coordinates; these are available via HTTP as well as the
DNS interface [GH-1389]
BUG FIXES:
* Fixed expired certificates in unit tests [GH-979]
* Allow services with `/` characters in the UI [GH-988]
* Added SOA/NXDOMAIN records to negative DNS responses per RFC2308 [GH-995]
[GH-1142] [GH-1195] [GH-1217]
* Token hiding in HTTP logs bug fixed [GH-1020]
* RFC6598 addresses are accepted as private IPs [GH-1050]
* Fixed reverse DNS lookups to recursor [GH-1137]
* Removes the trailing `/` added by the `consul lock` command [GH-1145]
* Fixed bad lock handler execution during shutdown [GH-1080] [GH-1158] [GH-1214]
* Added missing support for AAAA queries for nodes [GH-1222]
* Tokens passed from the CLI or API work for maint mode [GH-1230]
* Fixed service deregister/reregister flaps that could happen during
`consul reload` [GH-1235]
* Fixed the Go API client to properly distinguish between expired sessions
and sessions that don't exist [GH-1041]
* Fixed the KV section of the UI to work on Safari [GH-1321]
* Cleaned up JavaScript for built-in UI with bug fixes [GH-1338]
IMPROVEMENTS:
* Added sorting of `consul members` command output [GH-969]
* Updated AWS templates for RHEL6, CentOS6 [GH-992] [GH-1002]
* Advertised gossip/rpc addresses can now be configured [GH-1004]
* Failed lock acquisition handling now responds based on type of failure
[GH-1006]
* Agents now remember check state across restarts [GH-1009]
* Always run ACL tests by default in API tests [GH-1030]
* Consul now refuses to start if there are multiple private IPs [GH-1099]
* Improved efficiency of servers managing incoming connections from agents
[GH-1170]
* Added logging of the DNS client addresses in error messages [GH-1166]
* Added `-http-port` option to change the HTTP API port number [GH-1167]
* Atlas integration options are reload-able via SIGHUP [GH-1199]
* Atlas endpoint is a configurable option and CLI arg [GH-1201]
* Added `-pass-stdin` option to `consul lock` command [GH-1200]
* Enables the `/v1/internal/ui/*` endpoints, even if `-ui-dir` isn't set
[GH-1215]
* Added HTTP method to Consul's log output for better debugging [GH-1270]
* Lock holders can `?acquire=<session>` a key again with the same session
that holds the lock to update a key's contents without releasing the
lock [GH-1291]
* Improved an O(n^2) algorithm in the agent's catalog sync code [GH-1296]
* Switched to net-rpc-msgpackrpc to reduce RPC overhead [GH-1307]
* Removed all uses of the http package's default client and transport in
Consul to avoid conflicts with other packages [GH-1310] [GH-1327]
* Added new `X-Consul-Token` HTTP header option to avoid passing tokens
in the query string [GH-1318]
* Increased session TTL max to 24 hours (use with caution, see note added
to the Session HTTP endpoint documentation) [GH-1412]
* Added support to the API client for retrying lock monitoring when Consul
is unavailable, helping prevent false indications of lost locks (eg. apps
like Vault can avoid failing over when a Consul leader election occurs)
[GH-1457]
* Added reap of receive buffer space for idle streams in the connection
pool [GH-1452]
MISC:
* Lots of docs fixes
* Lots of Vagrantfile cleanup
* Data migrator utility removed to eliminate cgo dependency [GH-1309]
UPGRADE NOTES:
* Consul will refuse to start if the data directory contains an "mdb" folder.
This folder was used in versions of Consul up to 0.5.1. Consul version 0.5.2
included a baked-in utility to automatically upgrade the data format, but
this has been removed in Consul 0.6 to eliminate the dependency on cgo.
* New service read, event firing, and keyring ACLs may require special steps to
perform during an upgrade if ACLs are enabled and set to deny by default.
* Consul will refuse to start if there are multiple private IPs available, so
if this is the case you will need to configure Consul's advertise or bind
addresses before upgrading.
See https://www.consul.io/docs/upgrade-specific.html for detailed upgrade
instructions.
## 0.5.2 (May 18, 2015)
FEATURES:
* Include datacenter in the `members` output
* HTTP Health Check sets user agent "Consul Health Check" [GH-951]
BUG FIXES:
* Fixed memory leak caused by blocking query [GH-939]
MISC:
* Remove unused constant [GH-941]
## 0.5.1 (May 13, 2015)
FEATURES:
* Ability to configure minimum session TTL. [GH-821]
* Ability to set the initial state of a health check when registering [GH-859]
* New `configtest` sub-command to verify config validity [GH-904]
* ACL enforcement is prefix based for service names [GH-905]
* ACLs support upsert for simpler restore and external generation [GH-909]
* ACL tokens can be provided per-service during registration [GH-891]
* Support for distinct LAN and WAN advertise addresses [GH-816]
* Migrating Raft log from LMDB to BoltDB [GH-857]
* `session_ttl_min` is now configurable to reduce the minimum TTL [GH-821]
* Adding `verify_server_hostname` to protect against server forging [GH-927]
BUG FIXES:
* Datacenter is lowercased, fixes DNS lookups [GH-761]
* Deregister all checks when service is deregistered [GH-918]
* Fixing issues with updates of persisted services [GH-910]
* Chained CNAME resolution fixes [GH-862]
* Tokens are filtered out of log messages [GH-860]
* Fixing anti-entropy issue if servers rollback Raft log [GH-850]
* Datacenter name is case insensitive for DNS lookups
* Queries for invalid datacenters do not leak sockets [GH-807]
IMPROVEMENTS:
* HTTP health checks more reliable, avoid KeepAlives [GH-824]
* Improved protection against a passive cluster merge
* SIGTERM is properly handled for graceful shutdown [GH-827]
* Better staggering of deferred updates to checks [GH-884]
* Configurable stats prefix [GH-902]
* Raft uses BoltDB as the backend store. [GH-857]
* API RenewPeriodic more resilient to transient errors [GH-912]
## 0.5.0 (February 19, 2015)
FEATURES:
* Key rotation support for gossip layer. This allows the `encrypt` key
to be changed globally. See "keyring" command. [GH-336]
* Options to join the WAN pool on start (`start_join_wan`, `retry_join_wan`) [GH-477]
* Optional HTTPS interface [GH-478]
* Ephemeral keys via "delete" session behavior. This allows keys to be deleted when
a session is invalidated instead of having the lock released. Adds new "Behavior"
field to Session which is configurable. [GH-487]
* Reverse DNS lookups via PTR for IPv4 and IPv6 [GH-475]
* API added checks and services are persisted. This means services and
checks will survive a crash or restart. [GH-497]
* ACLs can now protect service registration. Users in blacklist mode should
allow registrations before upgrading to prevent a service disruption. [GH-506] [GH-465]
* Sessions support a heartbeat failure detector via use of TTLs. This adds a new
"TTL" field to Sessions and a `/v1/session/renew` endpoint. Heartbeats act like a
failure detector (health check), but are managed by the servers. [GH-524] [GH-172]
* Support for service specific IP addresses. This allows the service to advertise an
address that is different from the agent. [GH-229] [GH-570]
* Support KV Delete with Check-And-Set [GH-589]
* Merge `armon/consul-api` into `api` as official Go client.
* Support for distributed locks and semaphores in API client [GH-594] [GH-600]
* Support for native HTTP health checks [GH-592]
* Support for node and service maintenance modes [GH-606]
* Added new "consul maint" command to easily toggle maintenance modes [GH-625]
* Added new "consul lock" command for simple highly-available deployments.
This lets Consul manage the leader election and easily handle N+1 deployments
without the applications being Consul aware. [GH-619]
* Multiple checks can be associated with a service [GH-591] [GH-230]
BUG FIXES:
* Fixed X-Consul-Index calculation for KV ListKeys
* Fixed errors under extremely high read parallelism
* Fixed issue causing event watches to not fire reliably [GH-479]
* Fixed non-monotonic X-Consul-Index with key deletion [GH-577] [GH-195]
* Fixed use of default instead of custom TLD in some DNS responses [GH-582]
* Fixed memory leaks in API client when an error response is returned [GH-608]
* Fixed issues with graceful leave in single-node bootstrap cluster [GH-621]
* Fixed issue preventing node reaping [GH-371]
* Fixed gossip stability at very large scale
* Fixed string of rpc error: rpc error: ... no known leader. [GH-611]
* Fixed panic in `exec` during cancellation
* Fixed health check state reset caused by SIGHUP [GH-693]
* Fixed bug in UI when multiple datacenters exist.
IMPROVEMENTS:
* Support "consul exec" in foreign datacenter [GH-584]
* Improved K/V blocking query performance [GH-578]
* CLI respects CONSUL_RPC_ADDR environment variable to load parameter [GH-542]
* Added support for multiple DNS recursors [GH-448]
* Added support for defining multiple services per configuration file [GH-433]
* Added support for defining multiple checks per configuration file [GH-433]
* Allow mixing of service and check definitions in a configuration file [GH-433]
* Allow notes for checks in service definition file [GH-449]
* Random stagger for agent checks to prevent thundering herd [GH-546]
* More useful metrics are sent to statsd/statsite
* Added configuration to set custom HTTP headers (CORS) [GH-558]
* Reject invalid configurations to simplify validation [GH-576]
* Guard against accidental cluster mixing [GH-580] [GH-260]
* Added option to filter DNS results on warning [GH-595]
* Improve write throughput with raft log caching [GH-604]
* Added ability to bind RPC and HTTP listeners to UNIX sockets [GH-587] [GH-612]
* K/V HTTP endpoint returns 400 on conflicting flags [GH-634] [GH-432]
MISC:
* UI confirms before deleting key sub-tree [GH-520]
* More useful output in "consul version" [GH-480]
* Many documentation improvements
* Reduce log messages when quorum member is logs [GH-566]
UPGRADE NOTES:
* If `acl_default_policy` is "deny", ensure tokens are updated to enable
service registration to avoid a service disruption. The new ACL policy
can be submitted with 0.4 before upgrading to 0.5 where it will be
enforced.
* Servers running 0.5.X cannot be mixed with older servers. (Any client
version is fine). There is a 15 minute upgrade window where mixed
versions are allowed before older servers will panic due to an unsupported
internal command. This is due to the new KV tombstones which are internal
to servers.
## 0.4.1 (October 20, 2014)
FEATURES:
* Adding flags for `-retry-join` to attempt a join with
configurable retry behavior. [GH-395]
BUG FIXES:
* Fixed ACL token in UI
* Fixed ACL reloading in UI [GH-323]
* Fixed long session names in UI [GH-353]
* Fixed exit code from remote exec [GH-346]
* Fixing only a single watch being run by an agent [GH-337]
* Fixing potential race in connection multiplexing
* Fixing issue with Session ID and ACL ID generation. [GH-391]
* Fixing multiple headers for /v1/event/list endpoint [GH-361]
* Fixing graceful leave of leader causing invalid Raft peers [GH-360]
* Fixing bug with closing TLS connection on error
* Fixing issue with node reaping [GH-371]
* Fixing aggressive deadlock time [GH-389]
* Fixing syslog filter level [GH-272]
* Serf snapshot compaction works on Windows [GH-332]
* Raft snapshots work on Windows [GH-265]
* Consul service entry clean by clients now possible
* Fixing improper deserialization
IMPROVEMENTS:
* Use "critical" health state instead of "unknown" [GH-341]
* Consul service can be targeted for exec [GH-344]
* Provide debug logging for session invalidation [GH-390]
* Added "Deregister" button to UI [GH-364]
* Added `enable_truncate` DNS configuration flag [GH-376]
* Reduce mmap() size on 32bit systems [GH-265]
* Temporary state is cleaned after an abort [GH-338] [GH-178]
MISC:
* Health state "unknown" being deprecated
## 0.4.0 (September 5, 2014)
FEATURES:
* Fine-grained ACL system to restrict access to KV store. Clients
use tokens which can be restricted to (read, write, deny) permissions
using longest-prefix matches.
* Watch mechanisms added to invoke a handler when data changes in consul.
Used with the `consul watch` command, or by specifying `watches` in
an agent configuration.
* Event system added to support custom user events. Events are fired using
the `consul event` command. They are handled using a standard watch.
* Remote execution using `consul exec`. This allows for command execution on remote
instances mediated through Consul.
* RFC-2782 style DNS lookups supported
* UI improvements, including support for ACLs.
IMPROVEMENTS:
* DNS case-insensitivity [GH-189]
* Support for HTTP `?pretty` parameter to pretty format JSON output.
* Use $SHELL when invoking handlers. [GH-237]
* Agent takes the `-encrypt` CLI Flag [GH-245]
* New `statsd_add` config for Statsd support. [GH-247]
* New `addresses` config for providing an override to `client_addr` for
DNS, HTTP, or RPC endpoints. [GH-301] [GH-253]
* Support [Checkpoint](http://checkpoint.hashicorp.com) for security bulletins
and update announcements.
BUG FIXES:
* Fixed race condition in `-bootstrap-expect` [GH-254]
* Require PUT to /v1/session/destroy [GH-285]
* Fixed registration race condition [GH-300] [GH-279]
UPGRADE NOTES:
* ACL support should not be enabled until all server nodes are running
Consul 0.4. Mixed server versions with ACL support enabled may result in
panics.
## 0.3.1 (July 21, 2014)
FEATURES:
* Improved bootstrapping process, thanks to @robxu9
BUG FIXES:
* Fixed issue with service re-registration [GH-216]
* Fixed handling of `-rejoin` flag
* Restored 0.2 TLS behavior, thanks to @nelhage [GH-233]
* Fix the statsite flags, thanks to @nelhage [GH-243]
* Fixed filters on critical / non-passing checks [GH-241]
* Fixed initial log compaction crash [GH-297]
IMPROVEMENTS:
* UI Improvements
* Improved handling of Serf snapshot data
* Increase reliability of failure detector
* More useful logging messages
## 0.3.0 (June 13, 2014)
FEATURES:
* Better, faster, cleaner UI [GH-194] [GH-196]
* Sessions, which act as a binding layer between
nodes, checks and KV data. [GH-162]
* Key locking. KV data integrates with sessions to
enable distributed locking. [GH-162]
* DNS lookups can do stale reads and TTLs. [GH-200]
* Added new /v1/agent/self endpoint [GH-173]
* `reload` command can be used to trigger configuration
reload from the CLI [GH-142]
IMPROVEMENTS:
* `members` has a much cleaner output format [GH-143]
* `info` includes build version information
* Sorted results for datacneter list [GH-198]
* Switch multiplexing to yamux
* Allow multiple CA certs in ca_file [GH-174]
* Enable logging to syslog. [GH-105]
* Allow raw key value lookup [GH-150]
* Log encryption enabled [GH-151]
* Support `-rejoin` to rejoin a cluster after a previous leave. [GH-110]
* Support the "any" wildcard for v1/health/state/ [GH-152]
* Defer sync of health check output [GH-157]
* Provide output for serfHealth check [GH-176]
* Datacenter name is validated [GH-169]
* Configurable syslog facilities [GH-170]
* Pipelining replication of writes
* Raft group commits
* Increased stability of leader terms
* Prevent previously left nodes from causing re-elections
BUG FIXES:
* Fixed memory leak in in-memory stats system
* Fixing race between RPC and Raft init [GH-160]
* Server-local RPC is avoids network [GH-148]
* Fixing builds for older OSX [GH-147]
MISC:
* Fixed missing prefixes on some log messages
* Removed the `-role` filter of `members` command
* Lots of docs fixes
## 0.2.1 (May 20, 2014)
IMPROVEMENTS:
* Improved the URL formatting for the key/value editor in the Web UI.
Importantly, the editor now allows editing keys with dashes in the
name. [GH-119]
* The web UI now has cancel and delete folder actions in the key/value
editor. [GH-124], [GH-122]
* Add flag to agent to write pid to a file. [GH-106]
* Time out commands if Raft exceeds command enqueue timeout
* Adding support for the `-advertise` CLI flag. [GH-156]
* Fixing potential name conflicts on the WAN gossip ring [GH-158]
* /v1/catalog/services returns an empty slice instead of null. [GH-145]
* `members` command returns exit code 2 if no results. [GH-116]
BUG FIXES:
* Renaming "separator" to "separator". This is the correct spelling,
but both spellings are respected for backwards compatibility. [GH-101]
* Private IP is properly found on Windows clients.
* Windows agents won't show "failed to decode" errors on every RPC
request.
* Fixed memory leak with RPC clients. [GH-149]
* Serf name conflict resolution disabled. [GH-97]
* Raft deadlock possibility fixed. [GH-141]
MISC:
* Updating to latest version of LMDB
* Reduced the limit of KV entries to 512KB. [GH-123].
* Warn if any Raft log exceeds 1MB
* Lots of docs fixes
## 0.2.0 (May 1, 2014)
FEATURES:
* Adding Web UI for Consul. This is enabled by providing the `-ui-dir` flag
with the path to the web directory. The UI is visited at the standard HTTP
address (Defaults to http://127.0.0.1:8500/). There is a demo
[available here](http://demo.consul.io).
* Adding new read consistency modes. `?consistent` can be used for strongly
consistent reads without caveats. `?stale` can be used for stale reads to
allow for higher throughput and read scalability. [GH-68]
* /v1/health/service/ endpoint can take an optional `?passing` flag
to filter to only nodes with passing results. [GH-57]
* The KV endpoint supports listing keys with the `?keys` query parameter,
and limited up to a separator using `?separator=`.
IMPROVEMENTS:
* Health check output goes into separate `Output` field instead
of overriding `Notes`. [GH-59]
* Adding a minimum check interval to prevent checks with extremely
low intervals fork bombing. [GH-64]
* Raft peer set cleared on leave. [GH-69]
* Case insensitive parsing checks. [GH-78]
* Increase limit of DB size and Raft log on 64bit systems. [GH-81]
* Output of health checks limited to 4K. [GH-83]
* More warnings if GOMAXPROCS == 1 [GH-87]
* Added runtime information to `consul info`
BUG FIXES:
* Fixed 404 on /v1/agent/service/deregister and
/v1/agent/check/deregister. [GH-95]
* Fixed JSON parsing for /v1/agent/check/register [GH-60]
* DNS parser can handler period in a tag name. [GH-39]
* "application/json" content-type is sent on HTTP requests. [GH-45]
* Work around for LMDB delete issue. [GH-85]
* Fixed tag gossip propagation for rapid restart. [GH-86]
MISC:
* More conservative timing values for Raft
* Provide a warning if attempting to commit a very large Raft entry
* Improved timeliness of registration when server is in bootstrap mode. [GH-72]
## 0.1.0 (April 17, 2014)
* Initial release

69
vendor/github.com/hashicorp/consul/GNUmakefile generated vendored Normal file
View File

@ -0,0 +1,69 @@
GOTOOLS = \
github.com/elazarl/go-bindata-assetfs/... \
github.com/jteeuwen/go-bindata/... \
github.com/mitchellh/gox \
golang.org/x/tools/cmd/cover \
golang.org/x/tools/cmd/stringer
TEST ?= ./...
GOTAGS ?= consul
GOFILES ?= $(shell go list $(TEST) | grep -v /vendor/)
# all builds binaries for all targets
all: bin
bin: tools
@mkdir -p bin/
@GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
# dev creates binaries for testing locally - these are put into ./bin and $GOPATH
dev: format
@CONSUL_DEV=1 GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
# dist builds binaries for all platforms and packages them for distribution
dist:
@GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
cov:
gocov test ${GOFILES} | gocov-html > /tmp/coverage.html
open /tmp/coverage.html
test:
@./scripts/verify_no_uuid.sh
@env \
GOTAGS="${GOTAGS}" \
GOFILES="${GOFILES}" \
TESTARGS="${TESTARGS}" \
sh -c "'$(CURDIR)/scripts/test.sh'"
cover:
go test ${GOFILES} --cover
format:
@echo "--> Running go fmt"
@go fmt ${GOFILES}
vet:
@echo "--> Running go vet"
@go vet ${GOFILES}; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
exit 1; \
fi
# build the static web ui and build static assets inside a Docker container, the
# same way a release build works
ui:
@sh -c "'$(CURDIR)/scripts/ui.sh'"
# generates the static web ui that's compiled into the binary
static-assets:
@echo "--> Generating static assets"
@go-bindata-assetfs -pkg agent -prefix pkg ./pkg/web_ui/...
@mv bindata_assetfs.go command/agent
$(MAKE) format
tools:
go get -u -v $(GOTOOLS)
.PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools

35
vendor/github.com/hashicorp/consul/ISSUE_TEMPLATE.md generated vendored Normal file
View File

@ -0,0 +1,35 @@
If you have a question, please direct it to the
[consul mailing list](https://www.consul.io/community.html) if it hasn't been
addressed in either the [FAQ](https://www.consul.io/docs/faq.html) or in one
of the [Consul Guides](https://www.consul.io/docs/guides/index.html).
When filing a bug, please include the following:
### `consul version` for both Client and Server
Client: `[client version here]`
Server: `[server version here]`
### `consul info` for both Client and Server
Client:
```
[Client `consul info` here]
```
Server:
```
[Server `consul info` here]
```
### Operating system and Environment details
### Description of the Issue (and unexpected/desired result)
### Reproduction steps
### Log Fragments or Link to [gist](https://gist.github.com/)
Include appropriate Client or Server log fragments. If the log is longer
than a few dozen lines, please include the URL to the
[gist](https://gist.github.com/).
TIP: Use `-log-level=TRACE` on the client and server to capture the maximum log detail.

87
vendor/github.com/hashicorp/consul/README.md generated vendored Normal file
View File

@ -0,0 +1,87 @@
# Consul [![Build Status](https://travis-ci.org/hashicorp/consul.svg?branch=master)](https://travis-ci.org/hashicorp/consul) [![Join the chat at https://gitter.im/hashicorp-consul/Lobby](https://badges.gitter.im/hashicorp-consul/Lobby.svg)](https://gitter.im/hashicorp-consul/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
* Website: https://www.consul.io
* Chat: [Gitter](https://gitter.im/hashicorp-consul/Lobby)
* Mailing list: [Google Groups](https://groups.google.com/group/consul-tool/)
Consul is a tool for service discovery and configuration. Consul is
distributed, highly available, and extremely scalable.
Consul provides several key features:
* **Service Discovery** - Consul makes it simple for services to register
themselves and to discover other services via a DNS or HTTP interface.
External services such as SaaS providers can be registered as well.
* **Health Checking** - Health Checking enables Consul to quickly alert
operators about any issues in a cluster. The integration with service
discovery prevents routing traffic to unhealthy hosts and enables service
level circuit breakers.
* **Key/Value Storage** - A flexible key/value store enables storing
dynamic configuration, feature flagging, coordination, leader election and
more. The simple HTTP API makes it easy to use anywhere.
* **Multi-Datacenter** - Consul is built to be datacenter aware, and can
support any number of regions without complex configuration.
Consul runs on Linux, Mac OS X, FreeBSD, Solaris, and Windows.
## Quick Start
An extensive quick start is viewable on the Consul website:
https://www.consul.io/intro/getting-started/install.html
## Documentation
Full, comprehensive documentation is viewable on the Consul website:
https://www.consul.io/docs
## Developing Consul
If you wish to work on Consul itself, you'll first need [Go](https://golang.org)
installed (version 1.8+ is _required_). Make sure you have Go properly installed,
including setting up your [GOPATH](https://golang.org/doc/code.html#GOPATH).
Next, clone this repository into `$GOPATH/src/github.com/hashicorp/consul` and
then just type `make`. In a few moments, you'll have a working `consul` executable:
```
$ make
...
$ bin/consul
...
```
*Note: `make` will also place a copy of the binary in the first part of your `$GOPATH`.*
You can run tests by typing `make test`.
If you make any changes to the code, run `make format` in order to automatically
format the code according to Go standards.
### Building Consul on Windows
Make sure Go 1.8+ is installed on your system and that the Go command is in your
%PATH%.
For building Consul on Windows, you also need to have MinGW installed.
[TDM-GCC](http://tdm-gcc.tdragon.net/) is a simple bundle installer which has all
the required tools for building Consul with MinGW.
Install TDM-GCC and make sure it has been added to your %PATH%.
If all goes well, you should be able to build Consul by running `make.bat` from a
command prompt.
See also [golang/winstrap](https://github.com/golang/winstrap) and
[golang/wiki/WindowsBuild](https://github.com/golang/go/wiki/WindowsBuild)
for more information of how to set up a general Go build environment on Windows
with MinGW.
## Vendoring
Consul currently uses [govendor](https://github.com/kardianos/govendor) for
vendoring.

View File

@ -25,6 +25,8 @@ type AgentService struct {
Port int Port int
Address string Address string
EnableTagOverride bool EnableTagOverride bool
CreateIndex uint64
ModifyIndex uint64
} }
// AgentMember represents a cluster member known to the agent // AgentMember represents a cluster member known to the agent

View File

@ -2,12 +2,11 @@ package api
import ( import (
"bytes" "bytes"
"context"
"crypto/tls" "crypto/tls"
"crypto/x509"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"net" "net"
"net/http" "net/http"
@ -18,6 +17,7 @@ import (
"time" "time"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
) )
const ( const (
@ -37,6 +37,26 @@ const (
// whether or not to use HTTPS. // whether or not to use HTTPS.
HTTPSSLEnvName = "CONSUL_HTTP_SSL" HTTPSSLEnvName = "CONSUL_HTTP_SSL"
// HTTPCAFile defines an environment variable name which sets the
// CA file to use for talking to Consul over TLS.
HTTPCAFile = "CONSUL_CACERT"
// HTTPCAPath defines an environment variable name which sets the
// path to a directory of CA certs to use for talking to Consul over TLS.
HTTPCAPath = "CONSUL_CAPATH"
// HTTPClientCert defines an environment variable name which sets the
// client cert file to use for talking to Consul over TLS.
HTTPClientCert = "CONSUL_CLIENT_CERT"
// HTTPClientKey defines an environment variable name which sets the
// client key file to use for talking to Consul over TLS.
HTTPClientKey = "CONSUL_CLIENT_KEY"
// HTTPTLSServerName defines an environment variable name which sets the
// server name to use as the SNI host when connecting via TLS
HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
// HTTPSSLVerifyEnvName defines an environment variable name which sets // HTTPSSLVerifyEnvName defines an environment variable name which sets
// whether or not to disable certificate checking. // whether or not to disable certificate checking.
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
@ -79,6 +99,11 @@ type QueryOptions struct {
// metadata key/value pairs. Currently, only one key/value pair can // metadata key/value pairs. Currently, only one key/value pair can
// be provided for filtering. // be provided for filtering.
NodeMeta map[string]string NodeMeta map[string]string
// RelayFactor is used in keyring operations to cause reponses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
} }
// WriteOptions are used to parameterize a write // WriteOptions are used to parameterize a write
@ -90,6 +115,11 @@ type WriteOptions struct {
// Token is used to provide a per-request ACL token // Token is used to provide a per-request ACL token
// which overrides the agent's default token. // which overrides the agent's default token.
Token string Token string
// RelayFactor is used in keyring operations to cause reponses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
} }
// QueryMeta is used to return meta data about a query // QueryMeta is used to return meta data about a query
@ -152,6 +182,8 @@ type Config struct {
// Token is used to provide a per-request ACL token // Token is used to provide a per-request ACL token
// which overrides the agent's default token. // which overrides the agent's default token.
Token string Token string
TLSConfig TLSConfig
} }
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to // TLSConfig is used to generate a TLSClientConfig that's useful for talking to
@ -166,6 +198,10 @@ type TLSConfig struct {
// communication, defaults to the system bundle if not specified. // communication, defaults to the system bundle if not specified.
CAFile string CAFile string
// CAPath is the optional path to a directory of CA certificates to use for
// Consul communication, defaults to the system bundle if not specified.
CAPath string
// CertFile is the optional path to the certificate for Consul // CertFile is the optional path to the certificate for Consul
// communication. If this is set then you need to also set KeyFile. // communication. If this is set then you need to also set KeyFile.
CertFile string CertFile string
@ -243,27 +279,28 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
} }
} }
if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" { if v := os.Getenv(HTTPTLSServerName); v != "" {
doVerify, err := strconv.ParseBool(verify) config.TLSConfig.Address = v
}
if v := os.Getenv(HTTPCAFile); v != "" {
config.TLSConfig.CAFile = v
}
if v := os.Getenv(HTTPCAPath); v != "" {
config.TLSConfig.CAPath = v
}
if v := os.Getenv(HTTPClientCert); v != "" {
config.TLSConfig.CertFile = v
}
if v := os.Getenv(HTTPClientKey); v != "" {
config.TLSConfig.KeyFile = v
}
if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
doVerify, err := strconv.ParseBool(v)
if err != nil { if err != nil {
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
} }
if !doVerify { if !doVerify {
tlsClientConfig, err := SetupTLSConfig(&TLSConfig{ config.TLSConfig.InsecureSkipVerify = true
InsecureSkipVerify: true,
})
// We don't expect this to fail given that we aren't
// parsing any of the input, but we panic just in case
// since this doesn't have an error return.
if err != nil {
panic(err)
}
transport := transportFn()
transport.TLSClientConfig = tlsClientConfig
config.HttpClient.Transport = transport
} }
} }
@ -298,17 +335,12 @@ func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
tlsClientConfig.Certificates = []tls.Certificate{tlsCert} tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
} }
if tlsConfig.CAFile != "" { rootConfig := &rootcerts.Config{
data, err := ioutil.ReadFile(tlsConfig.CAFile) CAFile: tlsConfig.CAFile,
if err != nil { CAPath: tlsConfig.CAPath,
return nil, fmt.Errorf("failed to read CA file: %v", err)
} }
if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
caPool := x509.NewCertPool() return nil, err
if !caPool.AppendCertsFromPEM(data) {
return nil, fmt.Errorf("failed to parse CA certificate")
}
tlsClientConfig.RootCAs = caPool
} }
return tlsClientConfig, nil return tlsClientConfig, nil
@ -336,14 +368,58 @@ func NewClient(config *Config) (*Client, error) {
config.HttpClient = defConfig.HttpClient config.HttpClient = defConfig.HttpClient
} }
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 { if config.TLSConfig.Address == "" {
config.TLSConfig.Address = defConfig.TLSConfig.Address
}
if config.TLSConfig.CAFile == "" {
config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
}
if config.TLSConfig.CAPath == "" {
config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
}
if config.TLSConfig.CertFile == "" {
config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
}
if config.TLSConfig.KeyFile == "" {
config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
}
if !config.TLSConfig.InsecureSkipVerify {
config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
}
tlsClientConfig, err := SetupTLSConfig(&config.TLSConfig)
// We don't expect this to fail given that we aren't
// parsing any of the input, but we panic just in case
// since this doesn't have an error return.
if err != nil {
return nil, err
}
config.HttpClient.Transport.(*http.Transport).TLSClientConfig = tlsClientConfig
parts := strings.SplitN(config.Address, "://", 2)
if len(parts) == 2 {
switch parts[0] {
case "http":
case "https":
config.Scheme = "https"
case "unix":
trans := cleanhttp.DefaultTransport() trans := cleanhttp.DefaultTransport()
trans.Dial = func(_, _ string) (net.Conn, error) { trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", parts[1]) return net.Dial("unix", parts[1])
} }
config.HttpClient = &http.Client{ config.HttpClient = &http.Client{
Transport: trans, Transport: trans,
} }
default:
return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
}
config.Address = parts[1] config.Address = parts[1]
} }
@ -396,6 +472,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
r.params.Add("node-meta", key+":"+value) r.params.Add("node-meta", key+":"+value)
} }
} }
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
} }
// durToMsec converts a duration to a millisecond specified string. If the // durToMsec converts a duration to a millisecond specified string. If the
@ -437,6 +516,9 @@ func (r *request) setWriteOptions(q *WriteOptions) {
if q.Token != "" { if q.Token != "" {
r.header.Set("X-Consul-Token", q.Token) r.header.Set("X-Consul-Token", q.Token)
} }
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
} }
// toHTTP converts the request to an HTTP request // toHTTP converts the request to an HTTP request

View File

@ -6,6 +6,8 @@ type Node struct {
Address string Address string
TaggedAddresses map[string]string TaggedAddresses map[string]string
Meta map[string]string Meta map[string]string
CreateIndex uint64
ModifyIndex uint64
} }
type CatalogService struct { type CatalogService struct {

View File

@ -10,10 +10,11 @@ type CoordinateEntry struct {
Coord *coordinate.Coordinate Coord *coordinate.Coordinate
} }
// CoordinateDatacenterMap represents a datacenter and its associated WAN // CoordinateDatacenterMap has the coordinates for servers in a given datacenter
// nodes and their associates coordinates. // and area. Network coordinates are only compatible within the same area.
type CoordinateDatacenterMap struct { type CoordinateDatacenterMap struct {
Datacenter string Datacenter string
AreaID string
Coordinates []CoordinateEntry Coordinates []CoordinateEntry
} }

View File

@ -9,155 +9,3 @@ type Operator struct {
func (c *Client) Operator() *Operator { func (c *Client) Operator() *Operator {
return &Operator{c} return &Operator{c}
} }
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID string
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address string
// Leader is true if this server is the current cluster leader.
Leader bool
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfigration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// keyringRequest is used for performing Keyring operations
type keyringRequest struct {
Key string
}
// KeyringResponse is returned when listing the gossip encryption keys
type KeyringResponse struct {
// Whether this response is for a WAN ring
WAN bool
// The datacenter name this request corresponds to
Datacenter string
// A map of the encryption keys to the number of nodes they're installed on
Keys map[string]int
// The total number of nodes in this ring
NumNodes int
}
// RaftGetConfiguration is used to query the current Raft peer set.
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
// TODO (slackpad) Currently we made address a query parameter. Once
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
r.params.Set("address", string(address))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringInstall is used to install a new gossip encryption key into the cluster
func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
r := op.c.newRequest("POST", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringList is used to list the gossip keys installed in the cluster
func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
r := op.c.newRequest("GET", "/v1/operator/keyring")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*KeyringResponse
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// KeyringRemove is used to remove a gossip encryption key from the cluster
func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringUse is used to change the active gossip encryption key
func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

168
vendor/github.com/hashicorp/consul/api/operator_area.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// The /v1/operator/area endpoints are available only in Consul Enterprise and
// interact with its network area subsystem. Network areas are used to link
// together Consul servers in different Consul datacenters. With network areas,
// Consul datacenters can be linked together in ways other than a fully-connected
// mesh, as is required for Consul's WAN.
package api
import (
"net"
"time"
)
// Area defines a network area.
type Area struct {
// ID is this identifier for an area (a UUID). This must be left empty
// when creating a new area.
ID string
// PeerDatacenter is the peer Consul datacenter that will make up the
// other side of this network area. Network areas always involve a pair
// of datacenters: the datacenter where the area was created, and the
// peer datacenter. This is required.
PeerDatacenter string
// RetryJoin specifies the address of Consul servers to join to, such as
// an IPs or hostnames with an optional port number. This is optional.
RetryJoin []string
}
// AreaJoinResponse is returned when a join occurs and gives the result for each
// address.
type AreaJoinResponse struct {
// The address that was joined.
Address string
// Whether or not the join was a success.
Joined bool
// If we couldn't join, this is the message with information.
Error string
}
// SerfMember is a generic structure for reporting information about members in
// a Serf cluster. This is only used by the area endpoints right now, but this
// could be expanded to other endpoints in the future.
type SerfMember struct {
// ID is the node identifier (a UUID).
ID string
// Name is the node name.
Name string
// Addr has the IP address.
Addr net.IP
// Port is the RPC port.
Port uint16
// Datacenter is the DC name.
Datacenter string
// Role is "client", "server", or "unknown".
Role string
// Build has the version of the Consul agent.
Build string
// Protocol is the protocol of the Consul agent.
Protocol int
// Status is the Serf health status "none", "alive", "leaving", "left",
// or "failed".
Status string
// RTT is the estimated round trip time from the server handling the
// request to the this member. This will be negative if no RTT estimate
// is available.
RTT time.Duration
}
// AreaCreate will create a new network area. The ID in the given structure must
// be empty and a generated ID will be returned on success.
func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("POST", "/v1/operator/area")
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaGet returns a single network area.
func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaList returns all the available network areas.
func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaDelete deletes the given network area.
func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// AreaJoin attempts to join the given set of join addresses to the given
// network area. See the Area structure for details about join addresses.
func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
r.setWriteOptions(q)
r.obj = addresses
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out []*AreaJoinResponse
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, wm, nil
}
// AreaMembers lists the Serf information about the members in the given area.
func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
var out []*SerfMember
qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -0,0 +1,215 @@
package api
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
// Autopilot helps manage operator tasks related to Consul servers like removing
// failed servers from the Raft quorum.
type AutopilotConfiguration struct {
// CleanupDeadServers controls whether to remove dead servers from the Raft
// peer list when a new server joins
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold *ReadableDuration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime *ReadableDuration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// CreateIndex holds the index corresponding the creation of this configuration.
// This is a read-only field.
CreateIndex uint64
// ModifyIndex will be set to the index of the last update when retrieving the
// Autopilot configuration. Resubmitting a configuration with
// AutopilotCASConfiguration will perform a check-and-set operation which ensures
// there hasn't been a subsequent update since the configuration was retrieved.
ModifyIndex uint64
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// Address is the address of the server.
Address string
// The status of the SerfHealth check for the server.
SerfStatus string
// Version is the Consul version of the server.
Version string
// Leader is whether this server is currently the leader.
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact *ReadableDuration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// Voter is whether this is a voting server.
Voter bool
// StableSince is the last time this server's Healthy value changed.
StableSince time.Time
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}
// ReadableDuration is a duration type that is serialized to JSON in human readable format.
type ReadableDuration time.Duration
func NewReadableDuration(dur time.Duration) *ReadableDuration {
d := ReadableDuration(dur)
return &d
}
func (d *ReadableDuration) String() string {
return d.Duration().String()
}
func (d *ReadableDuration) Duration() time.Duration {
if d == nil {
return time.Duration(0)
}
return time.Duration(*d)
}
func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
}
func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
if d == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
str := string(raw)
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
return fmt.Errorf("must be enclosed with quotes: %s", str)
}
dur, err := time.ParseDuration(str[1 : len(str)-1])
if err != nil {
return err
}
*d = ReadableDuration(dur)
return nil
}
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out AutopilotConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// AutopilotSetConfiguration is used to set the current Autopilot configuration.
func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
// Autopilot configuration. The ModifyIndex value will be respected. Returns
// true on success or false on failures.
func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return false, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(string(buf.Bytes()), "true")
return res, nil
}
// AutopilotServerHealth
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out OperatorHealthReply
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}

View File

@ -0,0 +1,83 @@
package api
// keyringRequest is used for performing Keyring operations
type keyringRequest struct {
Key string
}
// KeyringResponse is returned when listing the gossip encryption keys
type KeyringResponse struct {
// Whether this response is for a WAN ring
WAN bool
// The datacenter name this request corresponds to
Datacenter string
// A map of the encryption keys to the number of nodes they're installed on
Keys map[string]int
// The total number of nodes in this ring
NumNodes int
}
// KeyringInstall is used to install a new gossip encryption key into the cluster
func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
r := op.c.newRequest("POST", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringList is used to list the gossip keys installed in the cluster
func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
r := op.c.newRequest("GET", "/v1/operator/keyring")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*KeyringResponse
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// KeyringRemove is used to remove a gossip encryption key from the cluster
func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringUse is used to change the active gossip encryption key
func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -0,0 +1,86 @@
package api
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID string
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address string
// Leader is true if this server is the current cluster leader.
Leader bool
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfigration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftGetConfiguration is used to query the current Raft peer set.
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("address", string(address))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by ID.
func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("id", string(id))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

368
vendor/github.com/hashicorp/consul/commands.go generated vendored Normal file
View File

@ -0,0 +1,368 @@
package main
import (
"os"
"os/signal"
"syscall"
"github.com/hashicorp/consul/command"
"github.com/hashicorp/consul/command/agent"
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/version"
"github.com/mitchellh/cli"
)
// Commands is the mapping of all the available Consul commands.
var Commands map[string]cli.CommandFactory
func init() {
ui := &cli.BasicUi{Writer: os.Stdout, ErrorWriter: os.Stderr}
Commands = map[string]cli.CommandFactory{
"agent": func() (cli.Command, error) {
return &agent.Command{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
Revision: version.GitCommit,
Version: version.Version,
VersionPrerelease: version.VersionPrerelease,
HumanVersion: version.GetHumanVersion(),
ShutdownCh: make(chan struct{}),
}, nil
},
"configtest": func() (cli.Command, error) {
return &command.ConfigTestCommand{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
}, nil
},
"event": func() (cli.Command, error) {
return &command.EventCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"exec": func() (cli.Command, error) {
return &command.ExecCommand{
ShutdownCh: makeShutdownCh(),
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"force-leave": func() (cli.Command, error) {
return &command.ForceLeaveCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"info": func() (cli.Command, error) {
return &command.InfoCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetClientHTTP,
},
}, nil
},
"join": func() (cli.Command, error) {
return &command.JoinCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetClientHTTP,
},
}, nil
},
"keygen": func() (cli.Command, error) {
return &command.KeygenCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetNone,
},
}, nil
},
"keyring": func() (cli.Command, error) {
return &command.KeyringCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetClientHTTP,
},
}, nil
},
"kv": func() (cli.Command, error) {
return &command.KVCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetNone,
},
}, nil
},
"kv delete": func() (cli.Command, error) {
return &command.KVDeleteCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetHTTP,
},
}, nil
},
"kv get": func() (cli.Command, error) {
return &command.KVGetCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetHTTP,
},
}, nil
},
"kv put": func() (cli.Command, error) {
return &command.KVPutCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetHTTP,
},
}, nil
},
"kv export": func() (cli.Command, error) {
return &command.KVExportCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetHTTP,
},
}, nil
},
"kv import": func() (cli.Command, error) {
return &command.KVImportCommand{
Command: base.Command{
Ui: ui,
Flags: base.FlagSetHTTP,
},
}, nil
},
"leave": func() (cli.Command, error) {
return &command.LeaveCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"lock": func() (cli.Command, error) {
return &command.LockCommand{
ShutdownCh: makeShutdownCh(),
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"maint": func() (cli.Command, error) {
return &command.MaintCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"members": func() (cli.Command, error) {
return &command.MembersCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"monitor": func() (cli.Command, error) {
return &command.MonitorCommand{
ShutdownCh: makeShutdownCh(),
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"operator": func() (cli.Command, error) {
return &command.OperatorCommand{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
}, nil
},
"operator autopilot": func() (cli.Command, error) {
return &command.OperatorAutopilotCommand{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
}, nil
},
"operator autopilot get-config": func() (cli.Command, error) {
return &command.OperatorAutopilotGetCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"operator autopilot set-config": func() (cli.Command, error) {
return &command.OperatorAutopilotSetCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"operator raft": func() (cli.Command, error) {
return &command.OperatorRaftCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"operator raft list-peers": func() (cli.Command, error) {
return &command.OperatorRaftListCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"operator raft remove-peer": func() (cli.Command, error) {
return &command.OperatorRaftRemoveCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"reload": func() (cli.Command, error) {
return &command.ReloadCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"rtt": func() (cli.Command, error) {
return &command.RTTCommand{
Command: base.Command{
Flags: base.FlagSetClientHTTP,
Ui: ui,
},
}, nil
},
"snapshot": func() (cli.Command, error) {
return &command.SnapshotCommand{
Ui: ui,
}, nil
},
"snapshot restore": func() (cli.Command, error) {
return &command.SnapshotRestoreCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"snapshot save": func() (cli.Command, error) {
return &command.SnapshotSaveCommand{
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
"snapshot inspect": func() (cli.Command, error) {
return &command.SnapshotInspectCommand{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
}, nil
},
"validate": func() (cli.Command, error) {
return &command.ValidateCommand{
Command: base.Command{
Flags: base.FlagSetNone,
Ui: ui,
},
}, nil
},
"version": func() (cli.Command, error) {
return &command.VersionCommand{
HumanVersion: version.GetHumanVersion(),
Ui: ui,
}, nil
},
"watch": func() (cli.Command, error) {
return &command.WatchCommand{
ShutdownCh: makeShutdownCh(),
Command: base.Command{
Flags: base.FlagSetHTTP,
Ui: ui,
},
}, nil
},
}
}
// makeShutdownCh returns a channel that can be used for shutdown
// notifications for commands. This channel will send a message for every
// interrupt or SIGTERM received.
func makeShutdownCh() <-chan struct{} {
resultCh := make(chan struct{})
signalCh := make(chan os.Signal, 4)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
go func() {
for {
<-signalCh
resultCh <- struct{}{}
}
}()
return resultCh
}

View File

@ -1,9 +1,44 @@
package structs package structs
import ( import (
"time"
"github.com/hashicorp/raft" "github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
) )
// AutopilotConfig holds the Autopilot configuration for a cluster.
type AutopilotConfig struct {
// CleanupDeadServers controls whether to remove dead servers when a new
// server is added to the Raft peers.
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold time.Duration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime time.Duration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// RaftIndex stores the create/modify indexes of this configuration.
RaftIndex
}
// RaftServer has information about a server in the Raft configuration. // RaftServer has information about a server in the Raft configuration.
type RaftServer struct { type RaftServer struct {
// ID is the unique ID for the server. These are currently the same // ID is the unique ID for the server. These are currently the same
@ -38,20 +73,149 @@ type RaftConfigurationResponse struct {
Index uint64 Index uint64
} }
// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft // RaftRemovePeerRequest is used by the Operator endpoint to apply a Raft
// operation on a specific Raft peer by address in the form of "IP:port". // operation on a specific Raft peer by address in the form of "IP:port".
type RaftPeerByAddressRequest struct { type RaftRemovePeerRequest struct {
// Datacenter is the target this request is intended for. // Datacenter is the target this request is intended for.
Datacenter string Datacenter string
// Address is the peer to remove, in the form "IP:port". // Address is the peer to remove, in the form "IP:port".
Address raft.ServerAddress Address raft.ServerAddress
// ID is the peer ID to remove.
ID raft.ServerID
// WriteRequest holds the ACL token to go along with this request. // WriteRequest holds the ACL token to go along with this request.
WriteRequest WriteRequest
} }
// RequestDatacenter returns the datacenter for a given request. // RequestDatacenter returns the datacenter for a given request.
func (op *RaftPeerByAddressRequest) RequestDatacenter() string { func (op *RaftRemovePeerRequest) RequestDatacenter() string {
return op.Datacenter return op.Datacenter
} }
// AutopilotSetConfigRequest is used by the Operator endpoint to update the
// current Autopilot configuration of the cluster.
type AutopilotSetConfigRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// Config is the new Autopilot configuration to use.
Config AutopilotConfig
// CAS controls whether to use check-and-set semantics for this request.
CAS bool
// WriteRequest holds the ACL token to go along with this request.
WriteRequest
}
// RequestDatacenter returns the datacenter for a given request.
func (op *AutopilotSetConfigRequest) RequestDatacenter() string {
return op.Datacenter
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// Address is the address of the server.
Address string
// The status of the SerfHealth check for the server.
SerfStatus serf.MemberStatus
// Version is the Consul version of the server.
Version string
// Leader is whether this server is currently the leader.
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact time.Duration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// Voter is whether this is a voting server.
Voter bool
// StableSince is the last time this server's Healthy value changed.
StableSince time.Time
}
// IsHealthy determines whether this ServerHealth is considered healthy
// based on the given Autopilot config
func (h *ServerHealth) IsHealthy(lastTerm uint64, leaderLastIndex uint64, autopilotConf *AutopilotConfig) bool {
if h.SerfStatus != serf.StatusAlive {
return false
}
if h.LastContact > autopilotConf.LastContactThreshold || h.LastContact < 0 {
return false
}
if h.LastTerm != lastTerm {
return false
}
if leaderLastIndex > autopilotConf.MaxTrailingLogs && h.LastIndex < leaderLastIndex-autopilotConf.MaxTrailingLogs {
return false
}
return true
}
// IsStable returns true if the ServerHealth is in a stable, passing state
// according to the given AutopilotConfig
func (h *ServerHealth) IsStable(now time.Time, conf *AutopilotConfig) bool {
if h == nil {
return false
}
if !h.Healthy {
return false
}
if now.Sub(h.StableSince) < conf.ServerStabilizationTime {
return false
}
return true
}
// ServerStats holds miscellaneous Raft metrics for a server
type ServerStats struct {
// LastContact is the time since this node's last contact with the leader.
LastContact string
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}

View File

@ -40,6 +40,8 @@ const (
CoordinateBatchUpdateType CoordinateBatchUpdateType
PreparedQueryRequestType PreparedQueryRequestType
TxnRequestType TxnRequestType
AutopilotRequestType
AreaRequestType
) )
const ( const (
@ -199,6 +201,14 @@ type RegisterRequest struct {
Service *NodeService Service *NodeService
Check *HealthCheck Check *HealthCheck
Checks HealthChecks Checks HealthChecks
// SkipNodeUpdate can be used when a register request is intended for
// updating a service and/or checks, but doesn't want to overwrite any
// node information if the node is already registered. If the node
// doesn't exist, it will still be created, but if the node exists, any
// node portion of this update will not apply.
SkipNodeUpdate bool
WriteRequest WriteRequest
} }
@ -215,6 +225,12 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool {
return true return true
} }
// If we've been asked to skip the node update, then say there are no
// changes.
if r.SkipNodeUpdate {
return false
}
// Check if any of the node-level fields are being changed. // Check if any of the node-level fields are being changed.
if r.ID != node.ID || if r.ID != node.ID ||
r.Node != node.Node || r.Node != node.Node ||
@ -899,9 +915,11 @@ type IndexedCoordinates struct {
} }
// DatacenterMap is used to represent a list of nodes with their raw coordinates, // DatacenterMap is used to represent a list of nodes with their raw coordinates,
// associated with a datacenter. // associated with a datacenter. Coordinates are only compatible between nodes in
// the same area.
type DatacenterMap struct { type DatacenterMap struct {
Datacenter string Datacenter string
AreaID types.AreaID
Coordinates Coordinates Coordinates Coordinates
} }

28
vendor/github.com/hashicorp/consul/lib/rtt.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package lib
import (
"math"
"time"
"github.com/hashicorp/serf/coordinate"
)
// ComputeDistance returns the distance between the two network coordinates in
// seconds. If either of the coordinates is nil then this will return positive
// infinity.
func ComputeDistance(a *coordinate.Coordinate, b *coordinate.Coordinate) float64 {
if a == nil || b == nil {
return math.Inf(1.0)
}
return a.DistanceTo(b).Seconds()
}
// GenerateCoordinate creates a new coordinate with the given distance from the
// origin. This should only be used for tests.
func GenerateCoordinate(rtt time.Duration) *coordinate.Coordinate {
coord := coordinate.NewCoordinate(coordinate.DefaultConfig())
coord.Vec[0] = rtt.Seconds()
coord.Height = 0
return coord
}

61
vendor/github.com/hashicorp/consul/main.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package main
import (
"fmt"
"github.com/mitchellh/cli"
"io/ioutil"
"log"
"os"
"github.com/hashicorp/consul/lib"
)
func init() {
lib.SeedMathRand()
}
func main() {
os.Exit(realMain())
}
func realMain() int {
log.SetOutput(ioutil.Discard)
// Get the command line args. We shortcut "--version" and "-v" to
// just show the version.
args := os.Args[1:]
for _, arg := range args {
if arg == "--" {
break
}
if arg == "-v" || arg == "--version" {
newArgs := make([]string, len(args)+1)
newArgs[0] = "version"
copy(newArgs[1:], args)
args = newArgs
break
}
}
// Filter out the configtest command from the help display
var included []string
for command := range Commands {
if command != "configtest" {
included = append(included, command)
}
}
cli := &cli.CLI{
Args: args,
Commands: Commands,
HelpFunc: cli.FilteredHelpFunc(included, cli.BasicHelpFunc("consul")),
}
exitCode, err := cli.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error())
return 1
}
return exitCode
}

82
vendor/github.com/hashicorp/consul/make.bat generated vendored Normal file
View File

@ -0,0 +1,82 @@
@echo off
setlocal
set _EXITCODE=0
set _DEPSFILE=%TEMP%\consul-deps.txt
go list -f "{{range .TestImports}}{{.}} {{end}}" .\... >%_DEPSFILE%
set _PKGSFILE=%TEMP%\consul-pkgs.txt
go list .\... >%_PKGSFILE%
set _VETARGS=-asmdecl -atomic -bool -buildtags -copylocks -methods^
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
if defined VETARGS set _VETARGS=%VETARGS%
:deps
echo --^> Installing build dependencies
for /f "delims=" %%d in (%_DEPSFILE%) do go get -d -v .\... %%d
if [%1]==[] goto all
if x%1==xdeps goto end
goto args
:args
for %%a in (all,cover,test,vet,updatedeps) do (if x%1==x%%a goto %%a)
echo.
echo Unknown make target: %1
echo Expected one of "all", "cover", "deps", "test", "vet", or "updatedeps".
set _EXITCODE=1
goto end
:all
md bin 2>NUL
call .\scripts\windows\build.bat %CD%
if not errorlevel 1 goto end
echo.
echo BUILD FAILED
set _EXITCODE=%ERRORLEVEL%
goto end
:cover
set _COVER=--cover
go tool cover 2>NUL
if %ERRORLEVEL% EQU 3 go get golang.org/x/tools/cmd/cover
goto test
:test
call .\scripts\windows\verify_no_uuid.bat %CD%
if %ERRORLEVEL% EQU 0 goto _test
echo.
echo UUID verification failed.
set _EXITCODE=%ERRORLEVEL%
goto end
:_test
for /f "delims=" %%p in (%_PKGSFILE%) do (
go test %_COVER% %%p
if errorlevel 1 set _TESTFAIL=1
)
if x%_TESTFAIL%==x1 set _EXITCODE=1 && goto end
goto vet
:vet
go tool vet 2>NUL
if %ERRORLEVEL% EQU 3 go get golang.org/x/tools/cmd/vet
echo --^> Running go tool vet %_VETARGS%
go tool vet %_VETARGS% .
echo.
if %ERRORLEVEL% EQU 0 echo ALL TESTS PASSED && goto end
echo Vet found suspicious constructs. Please check the reported constructs
echo and fix them if necessary before submitting the code for reviewal.
set _EXITCODE=%ERRORLEVEL%
goto end
:updatedeps
echo --^> Updating build dependencies
for /f "delims=" %%d in (%_DEPSFILE%) do go get -d -f -u .\... %%d
goto end
:end
del /F %_DEPSFILE% %_PKGSFILE% 2>NUL
exit /B %_EXITCODE%

View File

@ -25,41 +25,51 @@ import (
"github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil"
) )
func TestMain(t *testing.T) { func TestFoo_bar(t *testing.T) {
// Create a test Consul server // Create a test Consul server
srv1 := testutil.NewTestServer(t) srv1, err := testutil.NewTestServer()
if err != nil {
t.Fatal(err)
}
defer srv1.Stop() defer srv1.Stop()
// Create a secondary server, passing in configuration // Create a secondary server, passing in configuration
// to avoid bootstrapping as we are forming a cluster. // to avoid bootstrapping as we are forming a cluster.
srv2 := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { srv2, err := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
c.Bootstrap = false c.Bootstrap = false
}) })
if err != nil {
t.Fatal(err)
}
defer srv2.Stop() defer srv2.Stop()
// Join the servers together // Join the servers together
srv1.JoinLAN(srv2.LANAddr) srv1.JoinLAN(t, srv2.LANAddr)
// Create a test key/value pair // Create a test key/value pair
srv1.SetKV("foo", []byte("bar")) srv1.SetKV(t, "foo", []byte("bar"))
// Create lots of test key/value pairs // Create lots of test key/value pairs
srv1.PopulateKV(map[string][]byte{ srv1.PopulateKV(t, map[string][]byte{
"bar": []byte("123"), "bar": []byte("123"),
"baz": []byte("456"), "baz": []byte("456"),
}) })
// Create a service // Create a service
srv1.AddService("redis", structs.HealthPassing, []string{"master"}) srv1.AddService(t, "redis", structs.HealthPassing, []string{"master"})
// Create a service check // Create a service check
srv1.AddCheck("service:redis", "redis", structs.HealthPassing) srv1.AddCheck(t, "service:redis", "redis", structs.HealthPassing)
// Create a node check // Create a node check
srv1.AddCheck("mem", "", structs.HealthCritical) srv1.AddCheck(t, "mem", "", structs.HealthCritical)
// The HTTPAddr field contains the address of the Consul // The HTTPAddr field contains the address of the Consul
// API on the new test server instance. // API on the new test server instance.
println(srv1.HTTPAddr) println(srv1.HTTPAddr)
// All functions also have a wrapper method to limit the passing of "t"
wrap := srv1.Wrap(t)
wrap.SetKV("foo", []byte("bar"))
} }
``` ```

View File

@ -12,8 +12,7 @@ package testutil
// otherwise cause an import cycle. // otherwise cause an import cycle.
import ( import (
"bytes" "context"
"encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -25,8 +24,9 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-uuid"
"github.com/pkg/errors"
) )
// TestPerformanceConfig configures the performance parameters. // TestPerformanceConfig configures the performance parameters.
@ -39,10 +39,13 @@ type TestPerformanceConfig struct {
type TestPortConfig struct { type TestPortConfig struct {
DNS int `json:"dns,omitempty"` DNS int `json:"dns,omitempty"`
HTTP int `json:"http,omitempty"` HTTP int `json:"http,omitempty"`
RPC int `json:"rpc,omitempty"` HTTPS int `json:"https,omitempty"`
SerfLan int `json:"serf_lan,omitempty"` SerfLan int `json:"serf_lan,omitempty"`
SerfWan int `json:"serf_wan,omitempty"` SerfWan int `json:"serf_wan,omitempty"`
Server int `json:"server,omitempty"` Server int `json:"server,omitempty"`
// Deprecated
RPC int `json:"rpc,omitempty"`
} }
// TestAddressConfig contains the bind addresses for various // TestAddressConfig contains the bind addresses for various
@ -54,6 +57,7 @@ type TestAddressConfig struct {
// TestServerConfig is the main server configuration struct. // TestServerConfig is the main server configuration struct.
type TestServerConfig struct { type TestServerConfig struct {
NodeName string `json:"node_name"` NodeName string `json:"node_name"`
NodeID string `json:"node_id"`
NodeMeta map[string]string `json:"node_meta,omitempty"` NodeMeta map[string]string `json:"node_meta,omitempty"`
Performance *TestPerformanceConfig `json:"performance,omitempty"` Performance *TestPerformanceConfig `json:"performance,omitempty"`
Bootstrap bool `json:"bootstrap,omitempty"` Bootstrap bool `json:"bootstrap,omitempty"`
@ -65,10 +69,17 @@ type TestServerConfig struct {
Bind string `json:"bind_addr,omitempty"` Bind string `json:"bind_addr,omitempty"`
Addresses *TestAddressConfig `json:"addresses,omitempty"` Addresses *TestAddressConfig `json:"addresses,omitempty"`
Ports *TestPortConfig `json:"ports,omitempty"` Ports *TestPortConfig `json:"ports,omitempty"`
RaftProtocol int `json:"raft_protocol,omitempty"`
ACLMasterToken string `json:"acl_master_token,omitempty"` ACLMasterToken string `json:"acl_master_token,omitempty"`
ACLDatacenter string `json:"acl_datacenter,omitempty"` ACLDatacenter string `json:"acl_datacenter,omitempty"`
ACLDefaultPolicy string `json:"acl_default_policy,omitempty"` ACLDefaultPolicy string `json:"acl_default_policy,omitempty"`
ACLEnforceVersion8 bool `json:"acl_enforce_version_8"`
Encrypt string `json:"encrypt,omitempty"` Encrypt string `json:"encrypt,omitempty"`
CAFile string `json:"ca_file,omitempty"`
CertFile string `json:"cert_file,omitempty"`
KeyFile string `json:"key_file,omitempty"`
VerifyIncoming bool `json:"verify_incoming,omitempty"`
VerifyOutgoing bool `json:"verify_outgoing,omitempty"`
Stdout, Stderr io.Writer `json:"-"` Stdout, Stderr io.Writer `json:"-"`
Args []string `json:"-"` Args []string `json:"-"`
} }
@ -80,8 +91,14 @@ type ServerConfigCallback func(c *TestServerConfig)
// defaultServerConfig returns a new TestServerConfig struct // defaultServerConfig returns a new TestServerConfig struct
// with all of the listen ports incremented by one. // with all of the listen ports incremented by one.
func defaultServerConfig() *TestServerConfig { func defaultServerConfig() *TestServerConfig {
nodeID, err := uuid.GenerateUUID()
if err != nil {
panic(err)
}
return &TestServerConfig{ return &TestServerConfig{
NodeName: fmt.Sprintf("node%d", randomPort()), NodeName: fmt.Sprintf("node%d", randomPort()),
NodeID: nodeID,
DisableCheckpoint: true, DisableCheckpoint: true,
Performance: &TestPerformanceConfig{ Performance: &TestPerformanceConfig{
RaftMultiplier: 1, RaftMultiplier: 1,
@ -94,10 +111,11 @@ func defaultServerConfig() *TestServerConfig {
Ports: &TestPortConfig{ Ports: &TestPortConfig{
DNS: randomPort(), DNS: randomPort(),
HTTP: randomPort(), HTTP: randomPort(),
RPC: randomPort(), HTTPS: randomPort(),
SerfLan: randomPort(), SerfLan: randomPort(),
SerfWan: randomPort(), SerfWan: randomPort(),
Server: randomPort(), Server: randomPort(),
RPC: randomPort(),
}, },
} }
} }
@ -129,15 +147,6 @@ type TestCheck struct {
TTL string `json:",omitempty"` TTL string `json:",omitempty"`
} }
// TestingT is an interface wrapper around TestingT
type TestingT interface {
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Fatal(args ...interface{})
Skip(args ...interface{})
}
// TestKVResponse is what we use to decode KV data. // TestKVResponse is what we use to decode KV data.
type TestKVResponse struct { type TestKVResponse struct {
Value string Value string
@ -147,9 +156,9 @@ type TestKVResponse struct {
type TestServer struct { type TestServer struct {
cmd *exec.Cmd cmd *exec.Cmd
Config *TestServerConfig Config *TestServerConfig
t TestingT
HTTPAddr string HTTPAddr string
HTTPSAddr string
LANAddr string LANAddr string
WANAddr string WANAddr string
@ -158,27 +167,29 @@ type TestServer struct {
// NewTestServer is an easy helper method to create a new Consul // NewTestServer is an easy helper method to create a new Consul
// test server with the most basic configuration. // test server with the most basic configuration.
func NewTestServer(t TestingT) *TestServer { func NewTestServer() (*TestServer, error) {
return NewTestServerConfig(t, nil) return NewTestServerConfig(nil)
} }
// NewTestServerConfig creates a new TestServer, and makes a call to // NewTestServerConfig creates a new TestServer, and makes a call to an optional
// an optional callback function to modify the configuration. // callback function to modify the configuration. If there is an error
func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { // configuring or starting the server, the server will NOT be running when the
// function returns (thus you do not need to stop it).
func NewTestServerConfig(cb ServerConfigCallback) (*TestServer, error) {
if path, err := exec.LookPath("consul"); err != nil || path == "" { if path, err := exec.LookPath("consul"); err != nil || path == "" {
t.Fatal("consul not found on $PATH - download and install " + return nil, fmt.Errorf("consul not found on $PATH - download and install " +
"consul or skip this test") "consul or skip this test")
} }
dataDir, err := ioutil.TempDir("", "consul") dataDir, err := ioutil.TempDir("", "consul")
if err != nil { if err != nil {
t.Fatalf("err: %s", err) return nil, errors.Wrap(err, "failed creating tempdir")
} }
configFile, err := ioutil.TempFile(dataDir, "config") configFile, err := ioutil.TempFile(dataDir, "config")
if err != nil { if err != nil {
defer os.RemoveAll(dataDir) defer os.RemoveAll(dataDir)
t.Fatalf("err: %s", err) return nil, errors.Wrap(err, "failed creating temp config")
} }
consulConfig := defaultServerConfig() consulConfig := defaultServerConfig()
@ -190,11 +201,13 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer {
configContent, err := json.Marshal(consulConfig) configContent, err := json.Marshal(consulConfig)
if err != nil { if err != nil {
t.Fatalf("err: %s", err) return nil, errors.Wrap(err, "failed marshaling json")
} }
if _, err := configFile.Write(configContent); err != nil { if _, err := configFile.Write(configContent); err != nil {
t.Fatalf("err: %s", err) defer configFile.Close()
defer os.RemoveAll(dataDir)
return nil, errors.Wrap(err, "failed writing config content")
} }
configFile.Close() configFile.Close()
@ -215,7 +228,7 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer {
cmd.Stdout = stdout cmd.Stdout = stdout
cmd.Stderr = stderr cmd.Stderr = stderr
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
t.Fatalf("err: %s", err) return nil, errors.Wrap(err, "failed starting command")
} }
var httpAddr string var httpAddr string
@ -223,7 +236,7 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer {
if strings.HasPrefix(consulConfig.Addresses.HTTP, "unix://") { if strings.HasPrefix(consulConfig.Addresses.HTTP, "unix://") {
httpAddr = consulConfig.Addresses.HTTP httpAddr = consulConfig.Addresses.HTTP
trans := cleanhttp.DefaultTransport() trans := cleanhttp.DefaultTransport()
trans.Dial = func(_, _ string) (net.Conn, error) { trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", httpAddr[7:]) return net.Dial("unix", httpAddr[7:])
} }
client = &http.Client{ client = &http.Client{
@ -237,9 +250,9 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer {
server := &TestServer{ server := &TestServer{
Config: consulConfig, Config: consulConfig,
cmd: cmd, cmd: cmd,
t: t,
HTTPAddr: httpAddr, HTTPAddr: httpAddr,
HTTPSAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.HTTPS),
LANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfLan), LANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfLan),
WANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfWan), WANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfWan),
@ -247,65 +260,77 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer {
} }
// Wait for the server to be ready // Wait for the server to be ready
var startErr error
if consulConfig.Bootstrap { if consulConfig.Bootstrap {
server.waitForLeader() startErr = server.waitForLeader()
} else { } else {
server.waitForAPI() startErr = server.waitForAPI()
}
if startErr != nil {
defer server.Stop()
return nil, errors.Wrap(startErr, "failed waiting for server to start")
} }
return server return server, nil
} }
// Stop stops the test Consul server, and removes the Consul data // Stop stops the test Consul server, and removes the Consul data
// directory once we are done. // directory once we are done.
func (s *TestServer) Stop() { func (s *TestServer) Stop() error {
defer os.RemoveAll(s.Config.DataDir) defer os.RemoveAll(s.Config.DataDir)
if s.cmd != nil {
if s.cmd.Process != nil {
if err := s.cmd.Process.Kill(); err != nil { if err := s.cmd.Process.Kill(); err != nil {
s.t.Errorf("err: %s", err) return errors.Wrap(err, "failed to kill consul server")
}
} }
// wait for the process to exit to be sure that the data dir can be // wait for the process to exit to be sure that the data dir can be
// deleted on all platforms. // deleted on all platforms.
s.cmd.Wait() return s.cmd.Wait()
}
// There was no process
return nil
} }
// waitForAPI waits for only the agent HTTP endpoint to start // waitForAPI waits for only the agent HTTP endpoint to start
// responding. This is an indication that the agent has started, // responding. This is an indication that the agent has started,
// but will likely return before a leader is elected. // but will likely return before a leader is elected.
func (s *TestServer) waitForAPI() { func (s *TestServer) waitForAPI() error {
WaitForResult(func() (bool, error) { if err := WaitForResult(func() (bool, error) {
resp, err := s.HttpClient.Get(s.url("/v1/agent/self")) resp, err := s.HttpClient.Get(s.url("/v1/agent/self"))
if err != nil { if err != nil {
return false, err return false, errors.Wrap(err, "failed http get")
} }
defer resp.Body.Close() defer resp.Body.Close()
if err := s.requireOK(resp); err != nil { if err := s.requireOK(resp); err != nil {
return false, err return false, errors.Wrap(err, "failed OK response")
} }
return true, nil return true, nil
}, func(err error) { }); err != nil {
defer s.Stop() return errors.Wrap(err, "failed waiting for API")
s.t.Fatalf("err: %s", err) }
}) return nil
} }
// waitForLeader waits for the Consul server's HTTP API to become // waitForLeader waits for the Consul server's HTTP API to become
// available, and then waits for a known leader and an index of // available, and then waits for a known leader and an index of
// 1 or more to be observed to confirm leader election is done. // 1 or more to be observed to confirm leader election is done.
// It then waits to ensure the anti-entropy sync has completed. // It then waits to ensure the anti-entropy sync has completed.
func (s *TestServer) waitForLeader() { func (s *TestServer) waitForLeader() error {
var index int64 var index int64
WaitForResult(func() (bool, error) { if err := WaitForResult(func() (bool, error) {
// Query the API and check the status code. // Query the API and check the status code.
url := s.url(fmt.Sprintf("/v1/catalog/nodes?index=%d&wait=10s", index)) url := s.url(fmt.Sprintf("/v1/catalog/nodes?index=%d&wait=2s", index))
resp, err := s.HttpClient.Get(url) resp, err := s.HttpClient.Get(url)
if err != nil { if err != nil {
return false, err return false, errors.Wrap(err, "failed http get")
} }
defer resp.Body.Close() defer resp.Body.Close()
if err := s.requireOK(resp); err != nil { if err := s.requireOK(resp); err != nil {
return false, err return false, errors.Wrap(err, "failed OK response")
} }
// Ensure we have a leader and a node registration. // Ensure we have a leader and a node registration.
@ -314,10 +339,10 @@ func (s *TestServer) waitForLeader() {
} }
index, err = strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64) index, err = strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64)
if err != nil { if err != nil {
return false, fmt.Errorf("Consul index was bad: %v", err) return false, errors.Wrap(err, "bad consul index")
} }
if index == 0 { if index == 0 {
return false, fmt.Errorf("Consul index is 0") return false, fmt.Errorf("consul index is 0")
} }
// Watch for the anti-entropy sync to finish. // Watch for the anti-entropy sync to finish.
@ -337,192 +362,8 @@ func (s *TestServer) waitForLeader() {
return false, fmt.Errorf("No lan tagged addresses") return false, fmt.Errorf("No lan tagged addresses")
} }
return true, nil return true, nil
}, func(err error) { }); err != nil {
defer s.Stop() return errors.Wrap(err, "failed waiting for leader")
s.t.Fatalf("err: %s", err)
})
}
// url is a helper function which takes a relative URL and
// makes it into a proper URL against the local Consul server.
func (s *TestServer) url(path string) string {
return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path)
}
// requireOK checks the HTTP response code and ensures it is acceptable.
func (s *TestServer) requireOK(resp *http.Response) error {
if resp.StatusCode != 200 {
return fmt.Errorf("Bad status code: %d", resp.StatusCode)
} }
return nil return nil
} }
// put performs a new HTTP PUT request.
func (s *TestServer) put(path string, body io.Reader) *http.Response {
req, err := http.NewRequest("PUT", s.url(path), body)
if err != nil {
s.t.Fatalf("err: %s", err)
}
resp, err := s.HttpClient.Do(req)
if err != nil {
s.t.Fatalf("err: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
s.t.Fatal(err)
}
return resp
}
// get performs a new HTTP GET request.
func (s *TestServer) get(path string) *http.Response {
resp, err := s.HttpClient.Get(s.url(path))
if err != nil {
s.t.Fatalf("err: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
s.t.Fatal(err)
}
return resp
}
// encodePayload returns a new io.Reader wrapping the encoded contents
// of the payload, suitable for passing directly to a new request.
func (s *TestServer) encodePayload(payload interface{}) io.Reader {
var encoded bytes.Buffer
enc := json.NewEncoder(&encoded)
if err := enc.Encode(payload); err != nil {
s.t.Fatalf("err: %s", err)
}
return &encoded
}
// JoinLAN is used to join nodes within the same datacenter.
func (s *TestServer) JoinLAN(addr string) {
resp := s.get("/v1/agent/join/" + addr)
resp.Body.Close()
}
// JoinWAN is used to join remote datacenters together.
func (s *TestServer) JoinWAN(addr string) {
resp := s.get("/v1/agent/join/" + addr + "?wan=1")
resp.Body.Close()
}
// SetKV sets an individual key in the K/V store.
func (s *TestServer) SetKV(key string, val []byte) {
resp := s.put("/v1/kv/"+key, bytes.NewBuffer(val))
resp.Body.Close()
}
// GetKV retrieves a single key and returns its value
func (s *TestServer) GetKV(key string) []byte {
resp := s.get("/v1/kv/" + key)
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
s.t.Fatalf("err: %s", err)
}
var result []*TestKVResponse
if err := json.Unmarshal(raw, &result); err != nil {
s.t.Fatalf("err: %s", err)
}
if len(result) < 1 {
s.t.Fatalf("key does not exist: %s", key)
}
v, err := base64.StdEncoding.DecodeString(result[0].Value)
if err != nil {
s.t.Fatalf("err: %s", err)
}
return v
}
// PopulateKV fills the Consul KV with data from a generic map.
func (s *TestServer) PopulateKV(data map[string][]byte) {
for k, v := range data {
s.SetKV(k, v)
}
}
// ListKV returns a list of keys present in the KV store. This will list all
// keys under the given prefix recursively and return them as a slice.
func (s *TestServer) ListKV(prefix string) []string {
resp := s.get("/v1/kv/" + prefix + "?keys")
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
s.t.Fatalf("err: %s", err)
}
var result []string
if err := json.Unmarshal(raw, &result); err != nil {
s.t.Fatalf("err: %s", err)
}
return result
}
// AddService adds a new service to the Consul instance. It also
// automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical".
func (s *TestServer) AddService(name, status string, tags []string) {
svc := &TestService{
Name: name,
Tags: tags,
}
payload := s.encodePayload(svc)
s.put("/v1/agent/service/register", payload)
chkName := "service:" + name
chk := &TestCheck{
Name: chkName,
ServiceID: name,
TTL: "10m",
}
payload = s.encodePayload(chk)
s.put("/v1/agent/check/register", payload)
switch status {
case structs.HealthPassing:
s.put("/v1/agent/check/pass/"+chkName, nil)
case structs.HealthWarning:
s.put("/v1/agent/check/warn/"+chkName, nil)
case structs.HealthCritical:
s.put("/v1/agent/check/fail/"+chkName, nil)
default:
s.t.Fatalf("Unrecognized status: %s", status)
}
}
// AddCheck adds a check to the Consul instance. If the serviceID is
// left empty (""), then the check will be associated with the node.
// The check status may be "passing", "warning", or "critical".
func (s *TestServer) AddCheck(name, serviceID, status string) {
chk := &TestCheck{
ID: name,
Name: name,
TTL: "10m",
}
if serviceID != "" {
chk.ServiceID = serviceID
}
payload := s.encodePayload(chk)
s.put("/v1/agent/check/register", payload)
switch status {
case structs.HealthPassing:
s.put("/v1/agent/check/pass/"+name, nil)
case structs.HealthWarning:
s.put("/v1/agent/check/warn/"+name, nil)
case structs.HealthCritical:
s.put("/v1/agent/check/fail/"+name, nil)
default:
s.t.Fatalf("Unrecognized status: %s", status)
}
}

View File

@ -0,0 +1,237 @@
package testutil
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"testing"
"github.com/hashicorp/consul/consul/structs"
"github.com/pkg/errors"
)
// JoinLAN is used to join local datacenters together.
func (s *TestServer) JoinLAN(t *testing.T, addr string) {
resp := s.get(t, "/v1/agent/join/"+addr)
defer resp.Body.Close()
}
// JoinWAN is used to join remote datacenters together.
func (s *TestServer) JoinWAN(t *testing.T, addr string) {
resp := s.get(t, "/v1/agent/join/"+addr+"?wan=1")
resp.Body.Close()
}
// SetKV sets an individual key in the K/V store.
func (s *TestServer) SetKV(t *testing.T, key string, val []byte) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val))
resp.Body.Close()
}
// SetKVString sets an individual key in the K/V store, but accepts a string
// instead of []byte.
func (s *TestServer) SetKVString(t *testing.T, key string, val string) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val))
resp.Body.Close()
}
// GetKV retrieves a single key and returns its value
func (s *TestServer) GetKV(t *testing.T, key string) []byte {
resp := s.get(t, "/v1/kv/"+key)
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to read body: %s", err)
}
var result []*TestKVResponse
if err := json.Unmarshal(raw, &result); err != nil {
t.Fatalf("failed to unmarshal: %s", err)
}
if len(result) < 1 {
t.Fatalf("key does not exist: %s", key)
}
v, err := base64.StdEncoding.DecodeString(result[0].Value)
if err != nil {
t.Fatalf("failed to base64 decode: %s", err)
}
return v
}
// GetKVString retrieves a value from the store, but returns as a string instead
// of []byte.
func (s *TestServer) GetKVString(t *testing.T, key string) string {
return string(s.GetKV(t, key))
}
// PopulateKV fills the Consul KV with data from a generic map.
func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) {
for k, v := range data {
s.SetKV(t, k, v)
}
}
// ListKV returns a list of keys present in the KV store. This will list all
// keys under the given prefix recursively and return them as a slice.
func (s *TestServer) ListKV(t *testing.T, prefix string) []string {
resp := s.get(t, "/v1/kv/"+prefix+"?keys")
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to read body: %s", err)
}
var result []string
if err := json.Unmarshal(raw, &result); err != nil {
t.Fatalf("failed to unmarshal: %s", err)
}
return result
}
// AddService adds a new service to the Consul instance. It also
// automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical".
func (s *TestServer) AddService(t *testing.T, name, status string, tags []string) {
svc := &TestService{
Name: name,
Tags: tags,
}
payload, err := s.encodePayload(svc)
if err != nil {
t.Fatal(err)
}
s.put(t, "/v1/agent/service/register", payload)
chkName := "service:" + name
chk := &TestCheck{
Name: chkName,
ServiceID: name,
TTL: "10m",
}
payload, err = s.encodePayload(chk)
if err != nil {
t.Fatal(err)
}
s.put(t, "/v1/agent/check/register", payload)
switch status {
case structs.HealthPassing:
s.put(t, "/v1/agent/check/pass/"+chkName, nil)
case structs.HealthWarning:
s.put(t, "/v1/agent/check/warn/"+chkName, nil)
case structs.HealthCritical:
s.put(t, "/v1/agent/check/fail/"+chkName, nil)
default:
t.Fatalf("Unrecognized status: %s", status)
}
}
// AddCheck adds a check to the Consul instance. If the serviceID is
// left empty (""), then the check will be associated with the node.
// The check status may be "passing", "warning", or "critical".
func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) {
chk := &TestCheck{
ID: name,
Name: name,
TTL: "10m",
}
if serviceID != "" {
chk.ServiceID = serviceID
}
payload, err := s.encodePayload(chk)
if err != nil {
t.Fatal(err)
}
s.put(t, "/v1/agent/check/register", payload)
switch status {
case structs.HealthPassing:
s.put(t, "/v1/agent/check/pass/"+name, nil)
case structs.HealthWarning:
s.put(t, "/v1/agent/check/warn/"+name, nil)
case structs.HealthCritical:
s.put(t, "/v1/agent/check/fail/"+name, nil)
default:
t.Fatalf("Unrecognized status: %s", status)
}
}
// put performs a new HTTP PUT request.
func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Response {
req, err := http.NewRequest("PUT", s.url(path), body)
if err != nil {
t.Fatalf("failed to create PUT request: %s", err)
}
resp, err := s.HttpClient.Do(req)
if err != nil {
t.Fatalf("failed to make PUT request: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
t.Fatalf("not OK PUT: %s", err)
}
return resp
}
// get performs a new HTTP GET request.
func (s *TestServer) get(t *testing.T, path string) *http.Response {
resp, err := s.HttpClient.Get(s.url(path))
if err != nil {
t.Fatalf("failed to create GET request: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
t.Fatalf("not OK GET: %s", err)
}
return resp
}
// encodePayload returns a new io.Reader wrapping the encoded contents
// of the payload, suitable for passing directly to a new request.
func (s *TestServer) encodePayload(payload interface{}) (io.Reader, error) {
var encoded bytes.Buffer
enc := json.NewEncoder(&encoded)
if err := enc.Encode(payload); err != nil {
return nil, errors.Wrap(err, "failed to encode payload")
}
return &encoded, nil
}
// url is a helper function which takes a relative URL and
// makes it into a proper URL against the local Consul server.
func (s *TestServer) url(path string) string {
if s == nil {
log.Fatal("s is nil")
}
if s.Config == nil {
log.Fatal("s.Config is nil")
}
if s.Config.Ports == nil {
log.Fatal("s.Config.Ports is nil")
}
if s.Config.Ports.HTTP == 0 {
log.Fatal("s.Config.Ports.HTTP is 0")
}
if path == "" {
log.Fatal("path is empty")
}
return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path)
}
// requireOK checks the HTTP response code and ensures it is acceptable.
func (s *TestServer) requireOK(resp *http.Response) error {
if resp.StatusCode != 200 {
return fmt.Errorf("Bad status code: %d", resp.StatusCode)
}
return nil
}

View File

@ -0,0 +1,96 @@
package testutil
import (
"testing"
)
type WrappedServer struct {
s *TestServer
t *testing.T
}
// Wrap wraps the test server in a `testing.t` for convenience.
//
// For example, the following code snippets are equivalent.
//
// server.JoinLAN(t, "1.2.3.4")
// server.Wrap(t).JoinLAN("1.2.3.4")
//
// This is useful when you are calling multiple functions and save the wrapped
// value as another variable to reduce the inclusion of "t".
func (s *TestServer) Wrap(t *testing.T) *WrappedServer {
return &WrappedServer{
s: s,
t: t,
}
}
// See Also
//
// TestServer.JoinLAN()
func (w *WrappedServer) JoinLAN(addr string) {
w.s.JoinLAN(w.t, addr)
}
// See Also
//
// TestServer.JoinWAN()
func (w *WrappedServer) JoinWAN(addr string) {
w.s.JoinWAN(w.t, addr)
}
// See Also
//
// TestServer.SetKV()
func (w *WrappedServer) SetKV(key string, val []byte) {
w.s.SetKV(w.t, key, val)
}
// See Also
//
// TestServer.SetKVString()
func (w *WrappedServer) SetKVString(key string, val string) {
w.s.SetKVString(w.t, key, val)
}
// See Also
//
// TestServer.GetKV()
func (w *WrappedServer) GetKV(key string) []byte {
return w.s.GetKV(w.t, key)
}
// See Also
//
// TestServer.GetKVString()
func (w *WrappedServer) GetKVString(key string) string {
return w.s.GetKVString(w.t, key)
}
// See Also
//
// TestServer.PopulateKV()
func (w *WrappedServer) PopulateKV(data map[string][]byte) {
w.s.PopulateKV(w.t, data)
}
// See Also
//
// TestServer.ListKV()
func (w *WrappedServer) ListKV(prefix string) []string {
return w.s.ListKV(w.t, prefix)
}
// See Also
//
// TestServer.AddService()
func (w *WrappedServer) AddService(name, status string, tags []string) {
w.s.AddService(w.t, name, status, tags)
}
// See Also
//
// TestServer.AddCheck()
func (w *WrappedServer) AddCheck(name, serviceID, status string) {
w.s.AddCheck(w.t, name, serviceID, status)
}

View File

@ -6,17 +6,17 @@ import (
"time" "time"
"github.com/hashicorp/consul/consul/structs" "github.com/hashicorp/consul/consul/structs"
"github.com/pkg/errors"
) )
type testFn func() (bool, error) type testFn func() (bool, error)
type errorFn func(error)
const ( const (
baseWait = 1 * time.Millisecond baseWait = 1 * time.Millisecond
maxWait = 100 * time.Millisecond maxWait = 100 * time.Millisecond
) )
func WaitForResult(try testFn, fail errorFn) { func WaitForResult(try testFn) error {
var err error var err error
wait := baseWait wait := baseWait
for retries := 100; retries > 0; retries-- { for retries := 100; retries > 0; retries-- {
@ -24,7 +24,7 @@ func WaitForResult(try testFn, fail errorFn) {
success, err = try() success, err = try()
if success { if success {
time.Sleep(25 * time.Millisecond) time.Sleep(25 * time.Millisecond)
return return nil
} }
time.Sleep(wait) time.Sleep(wait)
@ -33,14 +33,18 @@ func WaitForResult(try testFn, fail errorFn) {
wait = maxWait wait = maxWait
} }
} }
fail(err) if err != nil {
return errors.Wrap(err, "timed out with error")
} else {
return fmt.Errorf("timed out")
}
} }
type rpcFn func(string, interface{}, interface{}) error type rpcFn func(string, interface{}, interface{}) error
func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes { func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes {
var out structs.IndexedNodes var out structs.IndexedNodes
WaitForResult(func() (bool, error) { if err := WaitForResult(func() (bool, error) {
// Ensure we have a leader and a node registration. // Ensure we have a leader and a node registration.
args := &structs.DCSpecificRequest{ args := &structs.DCSpecificRequest{
Datacenter: dc, Datacenter: dc,
@ -55,8 +59,8 @@ func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes {
return false, fmt.Errorf("Consul index is 0") return false, fmt.Errorf("Consul index is 0")
} }
return true, nil return true, nil
}, func(err error) { }); err != nil {
t.Fatalf("failed to find leader: %v", err) t.Fatalf("failed to find leader: %v", err)
}) }
return out return out
} }

9
vendor/github.com/hashicorp/consul/types/area.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package types
// AreaID is a strongly-typed string used to uniquely represent a network area,
// which is a relationship between Consul servers.
type AreaID string
// This represents the existing WAN area that's built in to Consul. Consul
// Enterprise generalizes areas, which are represented with UUIDs.
const AreaWAN AreaID = "wan"

363
vendor/github.com/hashicorp/go-uuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,363 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

8
vendor/github.com/hashicorp/go-uuid/README.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes.
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).

65
vendor/github.com/hashicorp/go-uuid/uuid.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
package uuid
import (
"crypto/rand"
"encoding/hex"
"fmt"
)
// GenerateRandomBytes is used to generate random bytes of given size.
func GenerateRandomBytes(size int) ([]byte, error) {
buf := make([]byte, size)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("failed to read random bytes: %v", err)
}
return buf, nil
}
// GenerateUUID is used to generate a random UUID
func GenerateUUID() (string, error) {
buf, err := GenerateRandomBytes(16)
if err != nil {
return "", err
}
return FormatUUID(buf)
}
func FormatUUID(buf []byte) (string, error) {
if len(buf) != 16 {
return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16]), nil
}
func ParseUUID(uuid string) ([]byte, error) {
if len(uuid) != 36 {
return nil, fmt.Errorf("uuid string is wrong length")
}
hyph := []byte("-")
if uuid[8] != hyph[0] ||
uuid[13] != hyph[0] ||
uuid[18] != hyph[0] ||
uuid[23] != hyph[0] {
return nil, fmt.Errorf("uuid is improperly formatted")
}
hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
ret, err := hex.DecodeString(hexStr)
if err != nil {
return nil, err
}
if len(ret) != 16 {
return nil, fmt.Errorf("decoded hex is the wrong length")
}
return ret, nil
}

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- 1.3
- 1.4
script:
- go test

View File

@ -37,7 +37,7 @@ func init() {
} }
ops := make([]string, 0, len(constraintOperators)) ops := make([]string, 0, len(constraintOperators))
for k, _ := range constraintOperators { for k := range constraintOperators {
ops = append(ops, regexp.QuoteMeta(k)) ops = append(ops, regexp.QuoteMeta(k))
} }
@ -142,15 +142,37 @@ func constraintLessThanEqual(v, c *Version) bool {
} }
func constraintPessimistic(v, c *Version) bool { func constraintPessimistic(v, c *Version) bool {
// If the version being checked is naturally less than the constraint, then there
// is no way for the version to be valid against the constraint
if v.LessThan(c) { if v.LessThan(c) {
return false return false
} }
// We'll use this more than once, so grab the length now so it's a little cleaner
// to write the later checks
cs := len(c.segments)
// If the version being checked has less specificity than the constraint, then there
// is no way for the version to be valid against the constraint
if cs > len(v.segments) {
return false
}
// Check the segments in the constraint against those in the version. If the version
// being checked, at any point, does not have the same values in each index of the
// constraints segments, then it cannot be valid against the constraint.
for i := 0; i < c.si-1; i++ { for i := 0; i < c.si-1; i++ {
if v.segments[i] != c.segments[i] { if v.segments[i] != c.segments[i] {
return false return false
} }
} }
// Check the last part of the segment in the constraint. If the version segment at
// this index is less than the constraints segment at this index, then it cannot
// be valid against the constraint
if c.segments[cs-1] > v.segments[cs-1] {
return false
}
// If nothing has rejected the version by now, it's valid
return true return true
} }

View File

@ -14,8 +14,8 @@ var versionRegexp *regexp.Regexp
// The raw regular expression string used for testing the validity // The raw regular expression string used for testing the validity
// of a version. // of a version.
const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+){0,2})` + const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`?` `?`
@ -23,7 +23,7 @@ const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+){0,2})` +
type Version struct { type Version struct {
metadata string metadata string
pre string pre string
segments []int segments []int64
si int si int
} }
@ -38,20 +38,23 @@ func NewVersion(v string) (*Version, error) {
if matches == nil { if matches == nil {
return nil, fmt.Errorf("Malformed version: %s", v) return nil, fmt.Errorf("Malformed version: %s", v)
} }
segmentsStr := strings.Split(matches[1], ".") segmentsStr := strings.Split(matches[1], ".")
segments := make([]int, len(segmentsStr), 3) segments := make([]int64, len(segmentsStr))
si := 0 si := 0
for i, str := range segmentsStr { for i, str := range segmentsStr {
val, err := strconv.ParseInt(str, 10, 32) val, err := strconv.ParseInt(str, 10, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"Error parsing version: %s", err) "Error parsing version: %s", err)
} }
segments[i] = int(val) segments[i] = int64(val)
si += 1 si++
} }
// Even though we could support more than three segments, if we
// got less than three, pad it with 0s. This is to cover the basic
// default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
for i := len(segments); i < 3; i++ { for i := len(segments); i < 3; i++ {
segments = append(segments, 0) segments = append(segments, 0)
} }
@ -86,8 +89,8 @@ func (v *Version) Compare(other *Version) int {
return 0 return 0
} }
segmentsSelf := v.Segments() segmentsSelf := v.Segments64()
segmentsOther := other.Segments() segmentsOther := other.Segments64()
// If the segments are the same, we must compare on prerelease info // If the segments are the same, we must compare on prerelease info
if reflect.DeepEqual(segmentsSelf, segmentsOther) { if reflect.DeepEqual(segmentsSelf, segmentsOther) {
@ -106,21 +109,56 @@ func (v *Version) Compare(other *Version) int {
return comparePrereleases(preSelf, preOther) return comparePrereleases(preSelf, preOther)
} }
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
lenSelf := len(segmentsSelf)
lenOther := len(segmentsOther)
hS := lenSelf
if lenSelf < lenOther {
hS = lenOther
}
// Compare the segments // Compare the segments
for i := 0; i < len(segmentsSelf); i++ { // Because a constraint could have more/less specificity than the version it's
// checking, we need to account for a lopsided or jagged comparison
for i := 0; i < hS; i++ {
if i > lenSelf-1 {
// This means Self had the lower specificity
// Check to see if the remaining segments in Other are all zeros
if !allZero(segmentsOther[i:]) {
// if not, it means that Other has to be greater than Self
return -1
}
break
} else if i > lenOther-1 {
// this means Other had the lower specificity
// Check to see if the remaining segments in Self are all zeros -
if !allZero(segmentsSelf[i:]) {
//if not, it means that Self has to be greater than Other
return 1
}
break
}
lhs := segmentsSelf[i] lhs := segmentsSelf[i]
rhs := segmentsOther[i] rhs := segmentsOther[i]
if lhs == rhs { if lhs == rhs {
continue continue
} else if lhs < rhs { } else if lhs < rhs {
return -1 return -1
} else { }
// Otherwis, rhs was > lhs, they're not equal
return 1 return 1
} }
}
panic("should not be reached") // if we got this far, they're equal
return 0
}
func allZero(segs []int64) bool {
for _, s := range segs {
if s != 0 {
return false
}
}
return true
} }
func comparePart(preSelf string, preOther string) int { func comparePart(preSelf string, preOther string) int {
@ -128,24 +166,38 @@ func comparePart(preSelf string, preOther string) int {
return 0 return 0
} }
selfNumeric := true
_, err := strconv.ParseInt(preSelf, 10, 64)
if err != nil {
selfNumeric = false
}
otherNumeric := true
_, err = strconv.ParseInt(preOther, 10, 64)
if err != nil {
otherNumeric = false
}
// if a part is empty, we use the other to decide // if a part is empty, we use the other to decide
if preSelf == "" { if preSelf == "" {
_, notIsNumeric := strconv.ParseInt(preOther, 10, 64) if otherNumeric {
if notIsNumeric == nil {
return -1 return -1
} }
return 1 return 1
} }
if preOther == "" { if preOther == "" {
_, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) if selfNumeric {
if notIsNumeric == nil {
return 1 return 1
} }
return -1 return -1
} }
if preSelf > preOther { if selfNumeric && !otherNumeric {
return -1
} else if !selfNumeric && otherNumeric {
return 1
} else if preSelf > preOther {
return 1 return 1
} }
@ -226,12 +278,25 @@ func (v *Version) Prerelease() string {
return v.pre return v.pre
} }
// Segments returns the numeric segments of the version as a slice. // Segments returns the numeric segments of the version as a slice of ints.
// //
// This excludes any metadata or pre-release information. For example, // This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of // for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3. // 1, 2, 3.
func (v *Version) Segments() []int { func (v *Version) Segments() []int {
segmentSlice := make([]int, len(v.segments))
for i, v := range v.segments {
segmentSlice[i] = int(v)
}
return segmentSlice
}
// Segments64 returns the numeric segments of the version as a slice of int64s.
//
// This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3.
func (v *Version) Segments64() []int64 {
return v.segments return v.segments
} }
@ -239,7 +304,13 @@ func (v *Version) Segments() []int {
// and metadata information. // and metadata information.
func (v *Version) String() string { func (v *Version) String() string {
var buf bytes.Buffer var buf bytes.Buffer
fmt.Fprintf(&buf, "%d.%d.%d", v.segments[0], v.segments[1], v.segments[2]) fmtParts := make([]string, len(v.segments))
for i, s := range v.segments {
// We can ignore err here since we've pre-parsed the values in segments
str := strconv.FormatInt(s, 10)
fmtParts[i] = str
}
fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
if v.pre != "" { if v.pre != "" {
fmt.Fprintf(&buf, "-%s", v.pre) fmt.Fprintf(&buf, "-%s", v.pre)
} }

50
vendor/vendor.json vendored
View File

@ -25,10 +25,10 @@
"revisionTime": "2016-08-22T16:14:30Z" "revisionTime": "2016-08-22T16:14:30Z"
}, },
{ {
"checksumSHA1": "L9njXCkN30+qHaLayiiA2Q9jDlY=", "checksumSHA1": "Rjy2uYZkQ8Kjht6ZFU0qzm2I/kI=",
"path": "github.com/Microsoft/go-winio", "path": "github.com/Microsoft/go-winio",
"revision": "fff283ad5116362ca252298cfc9b95828956d85d", "revision": "d311c76e775b5092c023569caacdbb4e569c3243",
"revisionTime": "2017-02-01T00:43:30Z" "revisionTime": "2017-05-08T21:01:43Z"
}, },
{ {
"checksumSHA1": "XeG94RjA9o/0wo9Fuw6NSRGYnjk=", "checksumSHA1": "XeG94RjA9o/0wo9Fuw6NSRGYnjk=",
@ -635,38 +635,38 @@
{ {
"checksumSHA1": "jfELEMRhiTcppZmRH+ZwtkVS5Uw=", "checksumSHA1": "jfELEMRhiTcppZmRH+ZwtkVS5Uw=",
"path": "github.com/hashicorp/consul/acl", "path": "github.com/hashicorp/consul/acl",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"checksumSHA1": "ygEjA1d52B1RDmZu8+1WTwkrYDQ=", "checksumSHA1": "k8spDLTgdEFy15C1AdBJLAW+Zng=",
"path": "github.com/hashicorp/consul/api", "path": "github.com/hashicorp/consul/api",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"checksumSHA1": "nomqbPd9j3XelMMcv7+vTEPsdr4=", "checksumSHA1": "Z1N3jX/5B7GbLNfNp5GTxrsJItc=",
"path": "github.com/hashicorp/consul/consul/structs", "path": "github.com/hashicorp/consul/consul/structs",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"checksumSHA1": "HDRi8BjyCm/zCYGA8l/40GMuWN8=", "checksumSHA1": "XTA8JEhsuJGTUTchjM++oEG7B14=",
"path": "github.com/hashicorp/consul/lib", "path": "github.com/hashicorp/consul/lib",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"checksumSHA1": "+uk5NnYcshPhdCs8hDCHYeT0xWQ=", "checksumSHA1": "yG7c7ZInBE36kw8IPvYb0smctRw=",
"path": "github.com/hashicorp/consul/testutil", "path": "github.com/hashicorp/consul/testutil",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"checksumSHA1": "ZPDLNuKJGZJFV9HlJ/V0O4/c/Ko=", "checksumSHA1": "bYK/7DsyTM3YDjvc0RRUH4I+jic=",
"path": "github.com/hashicorp/consul/types", "path": "github.com/hashicorp/consul/types",
"revision": "21f2d5ad0c02af6c4b32d8fd04f7c81e9b002d41", "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-02-15T04:07:11Z" "revisionTime": "2017-04-17T18:01:43Z"
}, },
{ {
"path": "github.com/hashicorp/errwrap", "path": "github.com/hashicorp/errwrap",
@ -743,8 +743,16 @@
"revision": "42a2b573b664dbf281bd48c3cc12c086b17a39ba" "revision": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
}, },
{ {
"checksumSHA1": "mAkPa/RLuIwN53GbwIEMATexams=",
"path": "github.com/hashicorp/go-uuid",
"revision": "64130c7a86d732268a38cb04cfbaf0cc987fda98",
"revisionTime": "2016-07-17T02:21:40Z"
},
{
"checksumSHA1": "tUGxc7rfX0cmhOOUDhMuAZ9rWsA=",
"path": "github.com/hashicorp/go-version", "path": "github.com/hashicorp/go-version",
"revision": "2e7f5ea8e27bb3fdf9baa0881d16757ac4637332" "revision": "03c5bf6be031b6dd45afec16b1cf94fc8938bc77",
"revisionTime": "2017-02-02T08:07:59Z"
}, },
{ {
"checksumSHA1": "d9PxF1XQGLMJZRct2R8qVM/eYlE=", "checksumSHA1": "d9PxF1XQGLMJZRct2R8qVM/eYlE=",

View File

@ -44,7 +44,8 @@ configuration, Nomad will automatically connect and configure with Consul.
## `consul` Parameters ## `consul` Parameters
- `address` `(string: "127.0.0.1:8500")` - Specifies the address to the local - `address` `(string: "127.0.0.1:8500")` - Specifies the address to the local
Consul agent, given in the format `host:port`. Consul agent, given in the format `host:port`. Supports Unix sockets with the
format: `unix:///tmp/consul/consul.sock`
- `auth` `(string: "")` - Specifies the HTTP Basic Authentication information to - `auth` `(string: "")` - Specifies the HTTP Basic Authentication information to
use for access to the Consul Agent, given in the format `username:password`. use for access to the Consul Agent, given in the format `username:password`.

View File

@ -54,6 +54,10 @@ the [Agent's Gossip and RPC Encryption](/docs/agent/encryption.html).
a Nomad client makes the client use TLS for making RPC requests to the Nomad a Nomad client makes the client use TLS for making RPC requests to the Nomad
servers. servers.
- `verify_https_client` `(bool: false)` - Specifies agents should require
client certificates for all incoming HTTPS requests. The client certificates
must be signed by the same CA as Nomad.
- `verify_server_hostname` `(bool: false)` - Specifies if outgoing TLS - `verify_server_hostname` `(bool: false)` - Specifies if outgoing TLS
connections should verify the server's hostname. connections should verify the server's hostname.

View File

@ -69,9 +69,19 @@ export NOMAD_CACERT=/path/to/ca.pem
Run any command except `agent` with `-h` to see all environment variables and Run any command except `agent` with `-h` to see all environment variables and
flags. For example: `nomad status -h` flags. For example: `nomad status -h`
Since HTTPS currently does not validate client certificates you do not need to By default HTTPS does not validate client certificates, so you do not need to
give the command line tool access to any private keys. give the command line tool access to any private keys.
### Network Isolation with TLS
If you want to isolate Nomad agents on a network with TLS you need to enable
both [`verify_https_client`][tls] and [`verify_server_hostname`][tls]. This
will cause agents to require client certificates for all incoming HTTPS
connections as well as verify proper names on all other certificates.
Consul will not attempt to health check agents with `verify_https_client` set
as it is unable to use client certificates.
## Encryption Examples ## Encryption Examples
### TLS Configuration using `cfssl` ### TLS Configuration using `cfssl`

View File

@ -149,6 +149,24 @@ ed3665f5 8bf94335 example cache run running
24cfd201 8bf94335 example cache run running 24cfd201 8bf94335 example cache run running
``` ```
You will note that in the above examples, the **Allocations** output contains
columns labeled **Desired Status** and **Client status**.
Desired Status represents the goal of the scheduler on the allocation with
the following valid statuses:
- *run*: The allocation should run
- *stop*: The allocation should stop
Client Status represents the emergent state of the allocation and include
the following:
- *pending*: The allocation is pending and will be running
- *running*: The allocation is currently running
- *complete*: The allocation was running and completed successfully
- *failed*: The allocation was running and completed with a non-zero exit code
- *lost*: The node that was running the allocation has failed or has been partitioned
Using `-stats` to see detailed to resource usage information on the node: Using `-stats` to see detailed to resource usage information on the node:
``` ```

View File

@ -81,7 +81,7 @@ will restrict the tasks to 64-bit operating systems.
```hcl ```hcl
group "example" { group "example" {
constraint { constraint {
attribute = "${attr.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }

View File

@ -49,7 +49,9 @@ README][ct].
## `template` Parameters ## `template` Parameters
- `change_mode` `(string: "restart")` - Specifies the behavior Nomad should take - `change_mode` `(string: "restart")` - Specifies the behavior Nomad should take
if the rendered template changes. The possible values are: if the rendered template changes. Nomad will always write the new contents of
the template to the specified destination. The possible values below describe
Nomad's action after writing the template to disk.
- `"noop"` - take no action (continue running the task) - `"noop"` - take no action (continue running the task)
- `"restart"` - restart the task - `"restart"` - restart the task

View File

@ -70,7 +70,7 @@ Nomad has many [drivers](/docs/drivers/index.html), and most support passing
arguments to their tasks via the `args` parameter. This option also optionally arguments to their tasks via the `args` parameter. This option also optionally
accepts [Nomad interpolation](/docs/runtime/interpolation.html). For example, if accepts [Nomad interpolation](/docs/runtime/interpolation.html). For example, if
you wanted Nomad to dynamically allocate a high port to bind the service on you wanted Nomad to dynamically allocate a high port to bind the service on
intead of relying on a static port for the previous job: instead of relying on a static port for the previous job:
```hcl ```hcl
job "docs" { job "docs" {

View File

@ -60,6 +60,14 @@ environment variables.
<td>`NOMAD_JOB_NAME`</td> <td>`NOMAD_JOB_NAME`</td>
<td>The job's name</td> <td>The job's name</td>
</tr> </tr>
<tr>
<td>`NOMAD_DC`</td>
<td>The datacenter in which the allocation is running</td>
</tr>
<tr>
<td>`NOMAD_REGION`</td>
<td>The region in which the allocation is running</td>
</tr>
<tr> <tr>
<td>`NOMAD_IP_<label>`</td> <td>`NOMAD_IP_<label>`</td>
<td>The IP of the port with the given label</td> <td>The IP of the port with the given label</td>

View File

@ -94,7 +94,7 @@ driver.
<tr> <tr>
<td><tt>${attr.&lt;property&gt;}</tt></td> <td><tt>${attr.&lt;property&gt;}</tt></td>
<td>Property given by <tt>property</tt> on the client</td> <td>Property given by <tt>property</tt> on the client</td>
<td><tt>${attr.arch} => amd64</tt></td> <td><tt>${attr.cpu.arch} => amd64</tt></td>
</tr> </tr>
<tr> <tr>
<td><tt>${meta.&lt;key&gt;}</tt></td> <td><tt>${meta.&lt;key&gt;}</tt></td>
@ -111,7 +111,7 @@ Below is a table documenting common node properties:
<th>Description</th> <th>Description</th>
</tr> </tr>
<tr> <tr>
<td><tt>${attr.arch}</tt></td> <td><tt>${attr.cpu.arch}</tt></td>
<td>CPU architecture of the client (e.g. <tt>amd64</tt>, <tt>386</tt>)</td> <td>CPU architecture of the client (e.g. <tt>amd64</tt>, <tt>386</tt>)</td>
</tr> </tr>
<tr> <tr>
@ -166,7 +166,7 @@ Here are some examples of using node attributes and properties in a job file:
job "docs" { job "docs" {
# This will constrain this job to only run on 64-bit clients. # This will constrain this job to only run on 64-bit clients.
constraint { constraint {
attribute = "${attr.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }

View File

@ -25,8 +25,24 @@ images to load, in 0.5.5 it has been changed to a single string. No
functionality was changed. Even if more than one item was specificed prior to functionality was changed. Even if more than one item was specificed prior to
0.5.5 only the first item was used. 0.5.5 only the first item was used.
If you have jobs that use the `load` option first upgrade all nodes to 0.5.5, To do a zero-downtime deploy with jobs that use the `load` option:
then resubmit those jobs with `load` as a single string.
* Upgrade servers to version 0.5.5 or later.
* Deploy new client nodes on the same version as the servers.
* Resubmit jobs with the `load` option fixed and a constraint to only run on
version 0.5.5 or later:
```hcl
constraint {
attribute = "${attr.nomad.version}"
operator = "version"
value = ">= 0.5.5"
}
```
* Drain and shutdown old client nodes.
### Validation changes ### Validation changes

View File

@ -34,7 +34,7 @@ Nomad combines a lightweight resource manager and a sophisticated scheduler
into a single system. By default, Nomad is distributed, highly available, into a single system. By default, Nomad is distributed, highly available,
and operationally simple. and operationally simple.
Kubernetes documentation states they can support clusters greater than 1,000 nodes Kubernetes documentation states they can support clusters greater than 5,000 nodes
and they support a multi-AZ/multi-region configuration. Nomad has been tested and they support a multi-AZ/multi-region configuration. Nomad has been tested
on clusters up to 5,000 nodes, but is expected to work on much larger clusters as on clusters up to 5,000 nodes, but is expected to work on much larger clusters as
well. Nomad also supports multi-datacenter and multi-region configurations. well. Nomad also supports multi-datacenter and multi-region configurations.

View File

@ -7,7 +7,7 @@
<ul class="nav sidebar-nav"> <ul class="nav sidebar-nav">
<li><a href="/intro/index.html">Intro</a></li> <li><a href="/intro/index.html">Intro</a></li>
<li><a href="/guides/index.html">Docs</a></li> <li><a href="/guides/index.html">Guides</a></li>
<li><a href="/docs/index.html">Docs</a></li> <li><a href="/docs/index.html">Docs</a></li>
<li><a href="/community.html">Community</a></li> <li><a href="/community.html">Community</a></li>
<li><a href="/security.html">Security</a></li> <li><a href="/security.html">Security</a></li>
@ -23,7 +23,7 @@
</a> </a>
</li> </li>
<li> <li>
<a href="https://github.com/mitchellh/vagrant"> <a href="https://github.com/hashicorp/nomad">
<%= inline_svg "github.svg" %> GitHub <%= inline_svg "github.svg" %> GitHub
</a> </a>
</li> </li>