Merge branch 'master' into yishan/revised-nomadproject-structure
This commit is contained in:
commit
7df9d2570f
24
CHANGELOG.md
24
CHANGELOG.md
|
@ -1,24 +1,42 @@
|
|||
## 0.9.2 (Unreleased)
|
||||
|
||||
__BACKWARDS INCOMPATIBILITIES:__
|
||||
|
||||
* client: The format of service IDs in Consul has changed. If you rely upon
|
||||
Nomad's service IDs (*not* service names; those are stable), you will need
|
||||
to update your code. [[GH-5536](https://github.com/hashicorp/nomad/pull/5536)]
|
||||
* client: The format of check IDs in Consul has changed. If you rely upon
|
||||
Nomad's check IDs you will need to update your code. [[GH-5536](https://github.com/hashicorp/nomad/pull/5536)]
|
||||
|
||||
FEATURES:
|
||||
|
||||
* vault: Add initial support for Vault namespaces [[GH-5520](https://github.com/hashicorp/nomad/pull/5520)]
|
||||
* core: Add `nomad alloc restart` command to restart allocs and tasks [[GH-5502](https://github.com/hashicorp/nomad/pull/5502)]
|
||||
* core: Add `nomad alloc stop` command to reschedule allocs [[GH-5512](https://github.com/hashicorp/nomad/pull/5512)]
|
||||
* core: Add `nomad alloc signal` command to signal allocs and tasks [[GH-5515](https://github.com/hashicorp/nomad/pull/5515)]
|
||||
* vault: Add initial support for Vault namespaces [[GH-5520](https://github.com/hashicorp/nomad/pull/5520)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* core: Add node name to output of `nomad node status` command in verbose mode [[GH-5224](https://github.com/hashicorp/nomad/pull/5224)]
|
||||
* core: Add `-verbose` flag to `nomad status` wrapper command [[GH-5516](https://github.com/hashicorp/nomad/pull/5516)]
|
||||
* core: Reduce the size of the raft transaction for plans by only sending fields updated by the plan applier [[GH-5602](https://github.com/hashicorp/nomad/pull/5602)]
|
||||
* client: Reduce unnecessary lost nodes on server failure [[GH-5654](https://github.com/hashicorp/nomad/issues/5654)]
|
||||
* api: Add preemption related fields to API results that return an allocation list. [[GH-5580](https://github.com/hashicorp/nomad/pull/5580)]
|
||||
* api: Add additional config options to scheduler configuration endpoint to disable preemption [[GH-5628](https://github.com/hashicorp/nomad/issues/5628)]
|
||||
* client: Allow use of maintenance mode and externally registered checks against Nomad-registered consul services [[GH-4537](https://github.com/hashicorp/nomad/issues/4537)]
|
||||
* client: Canary Promotion no longer causes services registered in Consul to become unhealthy [[GH-4566](https://github.com/hashicorp/nomad/issues/4566)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* vault: Fix renewal time to be 1/2 lease duration with jitter [[GH-5479](https://github.com/hashicorp/nomad/issues/5479)]
|
||||
* metrics: Fixed stale metrics [[GH-5540](https://github.com/hashicorp/nomad/issues/5540)]
|
||||
* core: Fixed accounting of allocated resources in metrics. [[GH-5637](https://github.com/hashicorp/nomad/issues/5637)]
|
||||
* core: Fixed disaster recovering with raft 3 protocol peers.json [[GH-5629](https://github.com/hashicorp/nomad/issues/5629)], [[GH-5651](https://github.com/hashicorp/nomad/issues/5651)]
|
||||
* core: Change configuration parsing to use the HCL library's decode, improving JSON support [[GH-1290](https://github.com/hashicorp/nomad/issues/1290)]
|
||||
* cli: Fix output and exit status for system jobs with constraints [[GH-2381](https://github.com/hashicorp/nomad/issues/2381)] and [[GH-5169](https://github.com/hashicorp/nomad/issues/5169])]
|
||||
* client: Fix network fingerprinting to honor manual configuration [[GH-2619](https://github.com/hashicorp/nomad/issues/2619)]
|
||||
* client: Fix network port mapping related environment variables when running with Nomad 0.8 servers [[GH-5587](https://github.com/hashicorp/nomad/issues/5587)]
|
||||
* client: Fix issue with terminal state deployments being modified when allocation subsequently fails [[GH-5645](https://github.com/hashicorp/nomad/issues/5645)]
|
||||
* metrics: Fixed stale metrics [[GH-5540](https://github.com/hashicorp/nomad/issues/5540)]
|
||||
* vault: Fix renewal time to be 1/2 lease duration with jitter [[GH-5479](https://github.com/hashicorp/nomad/issues/5479)]
|
||||
|
||||
## 0.9.1 (April 29, 2019)
|
||||
|
||||
|
|
|
@ -1,5 +1,17 @@
|
|||
package api
|
||||
|
||||
const (
|
||||
ConstraintDistinctProperty = "distinct_property"
|
||||
ConstraintDistinctHosts = "distinct_hosts"
|
||||
ConstraintRegex = "regexp"
|
||||
ConstraintVersion = "version"
|
||||
ConstraintSetContains = "set_contains"
|
||||
ConstraintSetContainsAll = "set_contains_all"
|
||||
ConstraintSetContainsAny = "set_contains_any"
|
||||
ConstraintAttributeIsSet = "is_set"
|
||||
ConstraintAttributeIsNotSet = "is_not_set"
|
||||
)
|
||||
|
||||
// Constraint is used to serialize a job placement constraint.
|
||||
type Constraint struct {
|
||||
LTarget string
|
||||
|
|
|
@ -139,7 +139,9 @@ type SchedulerSetConfigurationResponse struct {
|
|||
|
||||
// PreemptionConfig specifies whether preemption is enabled based on scheduler type
|
||||
type PreemptionConfig struct {
|
||||
SystemSchedulerEnabled bool
|
||||
SystemSchedulerEnabled bool
|
||||
BatchSchedulerEnabled bool
|
||||
ServiceSchedulerEnabled bool
|
||||
}
|
||||
|
||||
// SchedulerGetConfiguration is used to query the current Scheduler configuration.
|
||||
|
|
|
@ -59,3 +59,13 @@ func (s *State) Copy() *State {
|
|||
TaskStates: taskStates,
|
||||
}
|
||||
}
|
||||
|
||||
// ClientTerminalStatus returns if the client status is terminal and will no longer transition
|
||||
func (a *State) ClientTerminalStatus() bool {
|
||||
switch a.ClientStatus {
|
||||
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed, structs.AllocClientStatusLost:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,11 +14,11 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics"
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
|
@ -315,6 +315,9 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic
|
|||
// Initialize the server manager
|
||||
c.servers = servers.New(c.logger, c.shutdownCh, c)
|
||||
|
||||
// Start server manager rebalancing go routine
|
||||
go c.servers.Start()
|
||||
|
||||
// Initialize the client
|
||||
if err := c.init(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize client: %v", err)
|
||||
|
@ -1345,7 +1348,6 @@ func (c *Client) registerAndHeartbeat() {
|
|||
case <-c.shutdownCh:
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.updateNodeStatus(); err != nil {
|
||||
// The servers have changed such that this node has not been
|
||||
// registered before
|
||||
|
@ -2342,13 +2344,6 @@ func (c *Client) consulDiscovery() {
|
|||
func (c *Client) consulDiscoveryImpl() error {
|
||||
consulLogger := c.logger.Named("consul")
|
||||
|
||||
// Acquire heartbeat lock to prevent heartbeat from running
|
||||
// concurrently with discovery. Concurrent execution is safe, however
|
||||
// discovery is usually triggered when heartbeating has failed so
|
||||
// there's no point in allowing it.
|
||||
c.heartbeatLock.Lock()
|
||||
defer c.heartbeatLock.Unlock()
|
||||
|
||||
dcs, err := c.consulCatalog.Datacenters()
|
||||
if err != nil {
|
||||
return fmt.Errorf("client.consul: unable to query Consul datacenters: %v", err)
|
||||
|
@ -2709,11 +2704,11 @@ func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Comparab
|
|||
}
|
||||
|
||||
// Sum the allocated resources
|
||||
allocs := c.allAllocs()
|
||||
var allocated structs.ComparableResources
|
||||
allocatedDeviceMbits := make(map[string]int)
|
||||
for _, alloc := range allocs {
|
||||
if alloc.TerminalStatus() {
|
||||
for _, ar := range c.getAllocRunners() {
|
||||
alloc := ar.Alloc()
|
||||
if alloc.ServerTerminalStatus() || ar.AllocState().ClientTerminalStatus() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -2760,17 +2755,6 @@ func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Comparab
|
|||
return &allocated
|
||||
}
|
||||
|
||||
// allAllocs returns all the allocations managed by the client
|
||||
func (c *Client) allAllocs() map[string]*structs.Allocation {
|
||||
ars := c.getAllocRunners()
|
||||
allocs := make(map[string]*structs.Allocation, len(ars))
|
||||
for _, ar := range ars {
|
||||
a := ar.Alloc()
|
||||
allocs[a.ID] = a
|
||||
}
|
||||
return allocs
|
||||
}
|
||||
|
||||
// GetTaskEventHandler returns an event handler for the given allocID and task name
|
||||
func (c *Client) GetTaskEventHandler(allocID, taskName string) drivermanager.EventHandler {
|
||||
c.allocLock.RLock()
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
consulApi "github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/fingerprint"
|
||||
|
@ -26,7 +26,7 @@ import (
|
|||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
cstate "github.com/hashicorp/nomad/client/state"
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -893,7 +893,7 @@ func TestClient_BlockedAllocations(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Enusre that the chained allocation is being tracked as blocked
|
||||
// Ensure that the chained allocation is being tracked as blocked
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ar := c1.getAllocRunners()[alloc2.ID]
|
||||
if ar == nil {
|
||||
|
@ -1414,6 +1414,116 @@ func TestClient_computeAllocatedDeviceStats(t *testing.T) {
|
|||
assert.EqualValues(t, expected, result)
|
||||
}
|
||||
|
||||
func TestClient_getAllocatedResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
allocStops := mock.BatchAlloc()
|
||||
allocStops.Job.TaskGroups[0].Count = 1
|
||||
allocStops.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
||||
allocStops.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"run_for": "1ms",
|
||||
"exit_code": "0",
|
||||
}
|
||||
allocStops.Job.TaskGroups[0].RestartPolicy.Attempts = 0
|
||||
allocStops.AllocatedResources.Shared.DiskMB = 64
|
||||
allocStops.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 64}
|
||||
allocStops.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 64}
|
||||
require.Nil(client.addAlloc(allocStops, ""))
|
||||
|
||||
allocFails := mock.BatchAlloc()
|
||||
allocFails.Job.TaskGroups[0].Count = 1
|
||||
allocFails.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
||||
allocFails.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"run_for": "1ms",
|
||||
"exit_code": "1",
|
||||
}
|
||||
allocFails.Job.TaskGroups[0].RestartPolicy.Attempts = 0
|
||||
allocFails.AllocatedResources.Shared.DiskMB = 128
|
||||
allocFails.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 128}
|
||||
allocFails.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 128}
|
||||
require.Nil(client.addAlloc(allocFails, ""))
|
||||
|
||||
allocRuns := mock.Alloc()
|
||||
allocRuns.Job.TaskGroups[0].Count = 1
|
||||
allocRuns.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
||||
allocRuns.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"run_for": "3s",
|
||||
}
|
||||
allocRuns.AllocatedResources.Shared.DiskMB = 256
|
||||
allocRuns.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 256}
|
||||
allocRuns.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 256}
|
||||
require.Nil(client.addAlloc(allocRuns, ""))
|
||||
|
||||
allocPends := mock.Alloc()
|
||||
allocPends.Job.TaskGroups[0].Count = 1
|
||||
allocPends.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
||||
allocPends.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"run_for": "5s",
|
||||
"start_block_for": "10s",
|
||||
}
|
||||
allocPends.AllocatedResources.Shared.DiskMB = 512
|
||||
allocPends.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 512}
|
||||
allocPends.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 512}
|
||||
require.Nil(client.addAlloc(allocPends, ""))
|
||||
|
||||
// wait for allocStops to stop running and for allocRuns to be pending/running
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
as, err := client.GetAllocState(allocPends.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if as.ClientStatus != structs.AllocClientStatusPending {
|
||||
return false, fmt.Errorf("allocPends not yet pending: %#v", as)
|
||||
}
|
||||
|
||||
as, err = client.GetAllocState(allocRuns.ID)
|
||||
if as.ClientStatus != structs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("allocRuns not yet running: %#v", as)
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
as, err = client.GetAllocState(allocStops.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if as.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("allocStops not yet complete: %#v", as)
|
||||
}
|
||||
|
||||
as, err = client.GetAllocState(allocFails.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if as.ClientStatus != structs.AllocClientStatusFailed {
|
||||
return false, fmt.Errorf("allocFails not yet failed: %#v", as)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
result := client.getAllocatedResources(client.config.Node)
|
||||
|
||||
expected := structs.ComparableResources{
|
||||
Flattened: structs.AllocatedTaskResources{
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 768,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 768,
|
||||
},
|
||||
Networks: nil,
|
||||
},
|
||||
Shared: structs.AllocatedSharedResources{
|
||||
DiskMB: 768,
|
||||
},
|
||||
}
|
||||
|
||||
assert.EqualValues(t, expected, *result)
|
||||
}
|
||||
|
||||
func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
client, cleanup := TestClient(t, nil)
|
||||
|
|
|
@ -201,12 +201,16 @@ func (m *Manager) SetServers(servers Servers) bool {
|
|||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
// Sort both the existing and incoming servers
|
||||
servers.Sort()
|
||||
m.servers.Sort()
|
||||
|
||||
// Determine if they are equal
|
||||
equal := servers.Equal(m.servers)
|
||||
equal := m.serversAreEqual(servers)
|
||||
|
||||
// If server list is equal don't change the list and return immediately
|
||||
// This prevents unnecessary shuffling of a failed server that was moved to the
|
||||
// bottom of the list
|
||||
if equal {
|
||||
m.logger.Debug("Not replacing server list, current server list is identical to servers discovered in Consul")
|
||||
return !equal
|
||||
}
|
||||
|
||||
// Randomize the incoming servers
|
||||
servers.shuffle()
|
||||
|
@ -215,6 +219,23 @@ func (m *Manager) SetServers(servers Servers) bool {
|
|||
return !equal
|
||||
}
|
||||
|
||||
// Method to check if the arg list of servers is equal to the one we already have
|
||||
func (m *Manager) serversAreEqual(servers Servers) bool {
|
||||
// We use a copy of the server list here because determining
|
||||
// equality requires a sort step which modifies the order of the server list
|
||||
var copy Servers
|
||||
copy = make([]*Server, 0, len(m.servers))
|
||||
for _, s := range m.servers {
|
||||
copy = append(copy, s.Copy())
|
||||
}
|
||||
|
||||
// Sort both the existing and incoming servers
|
||||
copy.Sort()
|
||||
servers.Sort()
|
||||
|
||||
return copy.Equal(servers)
|
||||
}
|
||||
|
||||
// FindServer returns a server to send an RPC too. If there are no servers, nil
|
||||
// is returned.
|
||||
func (m *Manager) FindServer() *Server {
|
||||
|
|
|
@ -66,6 +66,19 @@ func TestServers_SetServers(t *testing.T) {
|
|||
require.True(m.SetServers([]*servers.Server{s1}))
|
||||
require.Equal(1, m.NumServers())
|
||||
require.Len(m.GetServers(), 1)
|
||||
|
||||
// Test that the list of servers does not get shuffled
|
||||
// as a side effect when incoming list is equal
|
||||
require.True(m.SetServers([]*servers.Server{s1, s2}))
|
||||
before := m.GetServers()
|
||||
require.False(m.SetServers([]*servers.Server{s1, s2}))
|
||||
after := m.GetServers()
|
||||
require.Equal(before, after)
|
||||
|
||||
// Send a shuffled list, verify original order doesn't change
|
||||
require.False(m.SetServers([]*servers.Server{s2, s1}))
|
||||
afterShuffledInput := m.GetServers()
|
||||
require.Equal(after, afterShuffledInput)
|
||||
}
|
||||
|
||||
func TestServers_FindServer(t *testing.T) {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -29,6 +30,10 @@ const (
|
|||
// for tasks.
|
||||
nomadTaskPrefix = nomadServicePrefix + "-task-"
|
||||
|
||||
// nomadCheckPrefix is the prefix that scopes Nomad registered checks for
|
||||
// services.
|
||||
nomadCheckPrefix = nomadServicePrefix + "-check-"
|
||||
|
||||
// defaultRetryInterval is how quickly to retry syncing services and
|
||||
// checks to Consul when an error occurs. Will backoff up to a max.
|
||||
defaultRetryInterval = time.Second
|
||||
|
@ -83,6 +88,15 @@ type AgentAPI interface {
|
|||
UpdateTTL(id, output, status string) error
|
||||
}
|
||||
|
||||
func agentServiceUpdateRequired(reg *api.AgentServiceRegistration, svc *api.AgentService) bool {
|
||||
return !(reg.Kind == svc.Kind &&
|
||||
reg.ID == svc.ID &&
|
||||
reg.Port == svc.Port &&
|
||||
reg.Address == svc.Address &&
|
||||
reg.Name == svc.Service &&
|
||||
reflect.DeepEqual(reg.Tags, svc.Tags))
|
||||
}
|
||||
|
||||
// operations are submitted to the main loop via commit() for synchronizing
|
||||
// with Consul.
|
||||
type operations struct {
|
||||
|
@ -466,16 +480,26 @@ func (c *ServiceClient) sync() error {
|
|||
metrics.IncrCounter([]string{"client", "consul", "service_deregistrations"}, 1)
|
||||
}
|
||||
|
||||
// Add Nomad services missing from Consul
|
||||
// Add Nomad services missing from Consul, or where the service has been updated.
|
||||
for id, locals := range c.services {
|
||||
if _, ok := consulServices[id]; !ok {
|
||||
if err = c.client.ServiceRegister(locals); err != nil {
|
||||
metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
|
||||
return err
|
||||
existingSvc, ok := consulServices[id]
|
||||
|
||||
if ok {
|
||||
// There is an existing registration of this service in Consul, so here
|
||||
// we validate to see if the service has been invalidated to see if it
|
||||
// should be updated.
|
||||
if !agentServiceUpdateRequired(locals, existingSvc) {
|
||||
// No Need to update services that have not changed
|
||||
continue
|
||||
}
|
||||
sreg++
|
||||
metrics.IncrCounter([]string{"client", "consul", "service_registrations"}, 1)
|
||||
}
|
||||
|
||||
if err = c.client.ServiceRegister(locals); err != nil {
|
||||
metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
|
||||
return err
|
||||
}
|
||||
sreg++
|
||||
metrics.IncrCounter([]string{"client", "consul", "service_registrations"}, 1)
|
||||
}
|
||||
|
||||
// Remove Nomad checks in Consul but unknown locally
|
||||
|
@ -489,7 +513,7 @@ func (c *ServiceClient) sync() error {
|
|||
// Nomad managed checks if this is not a client agent.
|
||||
// This is to prevent server agents from removing checks
|
||||
// registered by client agents
|
||||
if !isNomadService(check.ServiceID) || !c.isClientAgent {
|
||||
if !isNomadService(check.ServiceID) || !c.isClientAgent || !isNomadCheck(check.CheckID) {
|
||||
// Service not managed by Nomad, skip
|
||||
continue
|
||||
}
|
||||
|
@ -809,10 +833,10 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
|
|||
newIDs[makeTaskServiceID(newTask.AllocID, newTask.Name, s, newTask.Canary)] = s
|
||||
}
|
||||
|
||||
// Loop over existing Service IDs to see if they have been removed or
|
||||
// updated.
|
||||
// Loop over existing Service IDs to see if they have been removed
|
||||
for existingID, existingSvc := range existingIDs {
|
||||
newSvc, ok := newIDs[existingID]
|
||||
|
||||
if !ok {
|
||||
// Existing service entry removed
|
||||
ops.deregServices = append(ops.deregServices, existingID)
|
||||
|
@ -828,8 +852,12 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
|
|||
continue
|
||||
}
|
||||
|
||||
// Service exists and hasn't changed, don't re-add it later
|
||||
delete(newIDs, existingID)
|
||||
oldHash := existingSvc.Hash(old.AllocID, old.Name, old.Canary)
|
||||
newHash := newSvc.Hash(newTask.AllocID, newTask.Name, newTask.Canary)
|
||||
if oldHash == newHash {
|
||||
// Service exists and hasn't changed, don't re-add it later
|
||||
delete(newIDs, existingID)
|
||||
}
|
||||
|
||||
// Service still exists so add it to the task's registration
|
||||
sreg := &ServiceRegistration{
|
||||
|
@ -848,7 +876,8 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
|
|||
for _, check := range newSvc.Checks {
|
||||
checkID := makeCheckID(existingID, check)
|
||||
if _, exists := existingChecks[checkID]; exists {
|
||||
// Check exists, so don't remove it
|
||||
// Check is still required. Remove it from the map so it doesn't get
|
||||
// deleted later.
|
||||
delete(existingChecks, checkID)
|
||||
sreg.checkIDs[checkID] = struct{}{}
|
||||
}
|
||||
|
@ -861,7 +890,6 @@ func (c *ServiceClient) UpdateTask(old, newTask *TaskServices) error {
|
|||
|
||||
for _, checkID := range newCheckIDs {
|
||||
sreg.checkIDs[checkID] = struct{}{}
|
||||
|
||||
}
|
||||
|
||||
// Update all watched checks as CheckRestart fields aren't part of ID
|
||||
|
@ -1082,14 +1110,16 @@ func makeAgentServiceID(role string, service *structs.Service) string {
|
|||
// Consul. All structs.Service fields are included in the ID's hash except
|
||||
// Checks. This allows updates to merely compare IDs.
|
||||
//
|
||||
// Example Service ID: _nomad-task-TNM333JKJPM5AK4FAS3VXQLXFDWOF4VH
|
||||
// Example Service ID: _nomad-task-b4e61df9-b095-d64e-f241-23860da1375f-redis-http
|
||||
func makeTaskServiceID(allocID, taskName string, service *structs.Service, canary bool) string {
|
||||
return nomadTaskPrefix + service.Hash(allocID, taskName, canary)
|
||||
return fmt.Sprintf("%s%s-%s-%s", nomadTaskPrefix, allocID, taskName, service.Name)
|
||||
}
|
||||
|
||||
// makeCheckID creates a unique ID for a check.
|
||||
//
|
||||
// Example Check ID: _nomad-check-434ae42f9a57c5705344974ac38de2aee0ee089d
|
||||
func makeCheckID(serviceID string, check *structs.ServiceCheck) string {
|
||||
return check.Hash(serviceID)
|
||||
return fmt.Sprintf("%s%s", nomadCheckPrefix, check.Hash(serviceID))
|
||||
}
|
||||
|
||||
// createCheckReg creates a Check that can be registered with Consul.
|
||||
|
@ -1154,6 +1184,12 @@ func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host
|
|||
return &chkReg, nil
|
||||
}
|
||||
|
||||
// isNomadCheck returns true if the ID matches the pattern of a Nomad managed
|
||||
// check.
|
||||
func isNomadCheck(id string) bool {
|
||||
return strings.HasPrefix(id, nomadCheckPrefix)
|
||||
}
|
||||
|
||||
// isNomadService returns true if the ID matches the pattern of a Nomad managed
|
||||
// service (new or old formats). Agent services return false as independent
|
||||
// client and server agents may be running on the same machine. #2827
|
||||
|
|
|
@ -128,97 +128,38 @@ func setupFake(t *testing.T) *testFakeCtx {
|
|||
|
||||
func TestConsul_ChangeTags(t *testing.T) {
|
||||
ctx := setupFake(t)
|
||||
require := require.New(t)
|
||||
|
||||
if err := ctx.ServiceClient.RegisterTask(ctx.Task); err != nil {
|
||||
t.Fatalf("unexpected error registering task: %v", err)
|
||||
}
|
||||
require.NoError(ctx.ServiceClient.RegisterTask(ctx.Task))
|
||||
require.NoError(ctx.syncOnce())
|
||||
require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
|
||||
|
||||
if err := ctx.syncOnce(); err != nil {
|
||||
t.Fatalf("unexpected error syncing task: %v", err)
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.services); n != 1 {
|
||||
t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
|
||||
}
|
||||
|
||||
// Query the allocs registrations and then again when we update. The IDs
|
||||
// should change
|
||||
// Validate the alloc registration
|
||||
reg1, err := ctx.ServiceClient.AllocRegistrations(ctx.Task.AllocID)
|
||||
if err != nil {
|
||||
t.Fatalf("Looking up alloc registration failed: %v", err)
|
||||
}
|
||||
if reg1 == nil {
|
||||
t.Fatalf("Nil alloc registrations: %v", err)
|
||||
}
|
||||
if num := reg1.NumServices(); num != 1 {
|
||||
t.Fatalf("Wrong number of services: got %d; want 1", num)
|
||||
}
|
||||
if num := reg1.NumChecks(); num != 0 {
|
||||
t.Fatalf("Wrong number of checks: got %d; want 0", num)
|
||||
}
|
||||
|
||||
origKey := ""
|
||||
for k, v := range ctx.FakeConsul.services {
|
||||
origKey = k
|
||||
if v.Name != ctx.Task.Services[0].Name {
|
||||
t.Errorf("expected Name=%q != %q", ctx.Task.Services[0].Name, v.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Tags, ctx.Task.Services[0].Tags) {
|
||||
t.Errorf("expected Tags=%v != %v", ctx.Task.Services[0].Tags, v.Tags)
|
||||
}
|
||||
require.NoError(err)
|
||||
require.NotNil(reg1, "Unexpected nil alloc registration")
|
||||
require.Equal(1, reg1.NumServices())
|
||||
require.Equal(0, reg1.NumChecks())
|
||||
|
||||
for _, v := range ctx.FakeConsul.services {
|
||||
require.Equal(v.Name, ctx.Task.Services[0].Name)
|
||||
require.Equal(v.Tags, ctx.Task.Services[0].Tags)
|
||||
}
|
||||
|
||||
// Update the task definition
|
||||
origTask := ctx.Task.Copy()
|
||||
ctx.Task.Services[0].Tags[0] = "newtag"
|
||||
if err := ctx.ServiceClient.UpdateTask(origTask, ctx.Task); err != nil {
|
||||
t.Fatalf("unexpected error registering task: %v", err)
|
||||
}
|
||||
if err := ctx.syncOnce(); err != nil {
|
||||
t.Fatalf("unexpected error syncing task: %v", err)
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.services); n != 1 {
|
||||
t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
|
||||
}
|
||||
// Register and sync the update
|
||||
require.NoError(ctx.ServiceClient.UpdateTask(origTask, ctx.Task))
|
||||
require.NoError(ctx.syncOnce())
|
||||
require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
|
||||
|
||||
for k, v := range ctx.FakeConsul.services {
|
||||
if k == origKey {
|
||||
t.Errorf("expected key to change but found %q", k)
|
||||
}
|
||||
if v.Name != ctx.Task.Services[0].Name {
|
||||
t.Errorf("expected Name=%q != %q", ctx.Task.Services[0].Name, v.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Tags, ctx.Task.Services[0].Tags) {
|
||||
t.Errorf("expected Tags=%v != %v", ctx.Task.Services[0].Tags, v.Tags)
|
||||
}
|
||||
}
|
||||
|
||||
// Check again and ensure the IDs changed
|
||||
reg2, err := ctx.ServiceClient.AllocRegistrations(ctx.Task.AllocID)
|
||||
if err != nil {
|
||||
t.Fatalf("Looking up alloc registration failed: %v", err)
|
||||
}
|
||||
if reg2 == nil {
|
||||
t.Fatalf("Nil alloc registrations: %v", err)
|
||||
}
|
||||
if num := reg2.NumServices(); num != 1 {
|
||||
t.Fatalf("Wrong number of services: got %d; want 1", num)
|
||||
}
|
||||
if num := reg2.NumChecks(); num != 0 {
|
||||
t.Fatalf("Wrong number of checks: got %d; want 0", num)
|
||||
}
|
||||
|
||||
for task, treg := range reg1.Tasks {
|
||||
otherTaskReg, ok := reg2.Tasks[task]
|
||||
if !ok {
|
||||
t.Fatalf("Task %q not in second reg", task)
|
||||
}
|
||||
|
||||
for sID := range treg.Services {
|
||||
if _, ok := otherTaskReg.Services[sID]; ok {
|
||||
t.Fatalf("service ID didn't change")
|
||||
}
|
||||
}
|
||||
// Validate the metadata changed
|
||||
for _, v := range ctx.FakeConsul.services {
|
||||
require.Equal(v.Name, ctx.Task.Services[0].Name)
|
||||
require.Equal(v.Tags, ctx.Task.Services[0].Tags)
|
||||
require.Equal("newtag", v.Tags[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,6 +168,8 @@ func TestConsul_ChangeTags(t *testing.T) {
|
|||
// slightly different code path than changing tags.
|
||||
func TestConsul_ChangePorts(t *testing.T) {
|
||||
ctx := setupFake(t)
|
||||
require := require.New(t)
|
||||
|
||||
ctx.Task.Services[0].Checks = []*structs.ServiceCheck{
|
||||
{
|
||||
Name: "c1",
|
||||
|
@ -252,35 +195,17 @@ func TestConsul_ChangePorts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
if err := ctx.ServiceClient.RegisterTask(ctx.Task); err != nil {
|
||||
t.Fatalf("unexpected error registering task: %v", err)
|
||||
require.NoError(ctx.ServiceClient.RegisterTask(ctx.Task))
|
||||
require.NoError(ctx.syncOnce())
|
||||
require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
|
||||
|
||||
for _, v := range ctx.FakeConsul.services {
|
||||
require.Equal(ctx.Task.Services[0].Name, v.Name)
|
||||
require.Equal(ctx.Task.Services[0].Tags, v.Tags)
|
||||
require.Equal(xPort, v.Port)
|
||||
}
|
||||
|
||||
if err := ctx.syncOnce(); err != nil {
|
||||
t.Fatalf("unexpected error syncing task: %v", err)
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.services); n != 1 {
|
||||
t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
|
||||
}
|
||||
|
||||
origServiceKey := ""
|
||||
for k, v := range ctx.FakeConsul.services {
|
||||
origServiceKey = k
|
||||
if v.Name != ctx.Task.Services[0].Name {
|
||||
t.Errorf("expected Name=%q != %q", ctx.Task.Services[0].Name, v.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Tags, ctx.Task.Services[0].Tags) {
|
||||
t.Errorf("expected Tags=%v != %v", ctx.Task.Services[0].Tags, v.Tags)
|
||||
}
|
||||
if v.Port != xPort {
|
||||
t.Errorf("expected Port x=%v but found: %v", xPort, v.Port)
|
||||
}
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.checks); n != 3 {
|
||||
t.Fatalf("expected 3 checks but found %d:\n%#v", n, ctx.FakeConsul.checks)
|
||||
}
|
||||
require.Equal(3, len(ctx.FakeConsul.checks))
|
||||
|
||||
origTCPKey := ""
|
||||
origScriptKey := ""
|
||||
|
@ -289,29 +214,28 @@ func TestConsul_ChangePorts(t *testing.T) {
|
|||
switch v.Name {
|
||||
case "c1":
|
||||
origTCPKey = k
|
||||
if expected := fmt.Sprintf(":%d", xPort); v.TCP != expected {
|
||||
t.Errorf("expected Port x=%v but found: %v", expected, v.TCP)
|
||||
}
|
||||
require.Equal(fmt.Sprintf(":%d", xPort), v.TCP)
|
||||
case "c2":
|
||||
origScriptKey = k
|
||||
select {
|
||||
case <-ctx.MockExec.execs:
|
||||
if n := len(ctx.MockExec.execs); n > 0 {
|
||||
t.Errorf("expected 1 exec but found: %d", n+1)
|
||||
}
|
||||
// Here we validate there is nothing left on the channel
|
||||
require.Equal(0, len(ctx.MockExec.execs))
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Errorf("script not called in time")
|
||||
t.Fatalf("script not called in time")
|
||||
}
|
||||
case "c3":
|
||||
origHTTPKey = k
|
||||
if expected := fmt.Sprintf("http://:%d/", yPort); v.HTTP != expected {
|
||||
t.Errorf("expected Port y=%v but found: %v", expected, v.HTTP)
|
||||
}
|
||||
require.Equal(fmt.Sprintf("http://:%d/", yPort), v.HTTP)
|
||||
default:
|
||||
t.Fatalf("unexpected check: %q", v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEmpty(origTCPKey)
|
||||
require.NotEmpty(origScriptKey)
|
||||
require.NotEmpty(origHTTPKey)
|
||||
|
||||
// Now update the PortLabel on the Service and Check c3
|
||||
origTask := ctx.Task.Copy()
|
||||
ctx.Task.Services[0].PortLabel = "y"
|
||||
|
@ -339,64 +263,31 @@ func TestConsul_ChangePorts(t *testing.T) {
|
|||
// Removed PortLabel; should default to service's (y)
|
||||
},
|
||||
}
|
||||
if err := ctx.ServiceClient.UpdateTask(origTask, ctx.Task); err != nil {
|
||||
t.Fatalf("unexpected error registering task: %v", err)
|
||||
}
|
||||
if err := ctx.syncOnce(); err != nil {
|
||||
t.Fatalf("unexpected error syncing task: %v", err)
|
||||
|
||||
require.NoError(ctx.ServiceClient.UpdateTask(origTask, ctx.Task))
|
||||
require.NoError(ctx.syncOnce())
|
||||
require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
|
||||
|
||||
for _, v := range ctx.FakeConsul.services {
|
||||
require.Equal(ctx.Task.Services[0].Name, v.Name)
|
||||
require.Equal(ctx.Task.Services[0].Tags, v.Tags)
|
||||
require.Equal(yPort, v.Port)
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.services); n != 1 {
|
||||
t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
|
||||
}
|
||||
|
||||
for k, v := range ctx.FakeConsul.services {
|
||||
if k == origServiceKey {
|
||||
t.Errorf("expected key change; still: %q", k)
|
||||
}
|
||||
if v.Name != ctx.Task.Services[0].Name {
|
||||
t.Errorf("expected Name=%q != %q", ctx.Task.Services[0].Name, v.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Tags, ctx.Task.Services[0].Tags) {
|
||||
t.Errorf("expected Tags=%v != %v", ctx.Task.Services[0].Tags, v.Tags)
|
||||
}
|
||||
if v.Port != yPort {
|
||||
t.Errorf("expected Port y=%v but found: %v", yPort, v.Port)
|
||||
}
|
||||
}
|
||||
|
||||
if n := len(ctx.FakeConsul.checks); n != 3 {
|
||||
t.Fatalf("expected 3 check but found %d:\n%#v", n, ctx.FakeConsul.checks)
|
||||
}
|
||||
require.Equal(3, len(ctx.FakeConsul.checks))
|
||||
|
||||
for k, v := range ctx.FakeConsul.checks {
|
||||
switch v.Name {
|
||||
case "c1":
|
||||
if k == origTCPKey {
|
||||
t.Errorf("expected key change for %s from %q", v.Name, origTCPKey)
|
||||
}
|
||||
if expected := fmt.Sprintf(":%d", xPort); v.TCP != expected {
|
||||
t.Errorf("expected Port x=%v but found: %v", expected, v.TCP)
|
||||
}
|
||||
// C1 is not changed
|
||||
require.Equal(origTCPKey, k)
|
||||
require.Equal(fmt.Sprintf(":%d", xPort), v.TCP)
|
||||
case "c2":
|
||||
if k == origScriptKey {
|
||||
t.Errorf("expected key change for %s from %q", v.Name, origScriptKey)
|
||||
}
|
||||
select {
|
||||
case <-ctx.MockExec.execs:
|
||||
if n := len(ctx.MockExec.execs); n > 0 {
|
||||
t.Errorf("expected 1 exec but found: %d", n+1)
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Errorf("script not called in time")
|
||||
}
|
||||
// C2 is not changed and should not have been re-registered
|
||||
require.Equal(origScriptKey, k)
|
||||
case "c3":
|
||||
if k == origHTTPKey {
|
||||
t.Errorf("expected %s key to change from %q", v.Name, k)
|
||||
}
|
||||
if expected := fmt.Sprintf("http://:%d/", yPort); v.HTTP != expected {
|
||||
t.Errorf("expected Port y=%v but found: %v", expected, v.HTTP)
|
||||
}
|
||||
require.NotEqual(origHTTPKey, k)
|
||||
require.Equal(fmt.Sprintf("http://:%d/", yPort), v.HTTP)
|
||||
default:
|
||||
t.Errorf("Unknown check: %q", k)
|
||||
}
|
||||
|
|
|
@ -250,7 +250,10 @@ func (s *HTTPServer) schedulerUpdateConfig(resp http.ResponseWriter, req *http.R
|
|||
}
|
||||
|
||||
args.Config = structs.SchedulerConfiguration{
|
||||
PreemptionConfig: structs.PreemptionConfig{SystemSchedulerEnabled: conf.PreemptionConfig.SystemSchedulerEnabled},
|
||||
PreemptionConfig: structs.PreemptionConfig{
|
||||
SystemSchedulerEnabled: conf.PreemptionConfig.SystemSchedulerEnabled,
|
||||
BatchSchedulerEnabled: conf.PreemptionConfig.BatchSchedulerEnabled,
|
||||
ServiceSchedulerEnabled: conf.PreemptionConfig.ServiceSchedulerEnabled},
|
||||
}
|
||||
|
||||
// Check for cas value
|
||||
|
|
|
@ -272,6 +272,8 @@ func TestOperator_SchedulerGetConfiguration(t *testing.T) {
|
|||
out, ok := obj.(structs.SchedulerConfigurationResponse)
|
||||
require.True(ok)
|
||||
require.True(out.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.True(out.SchedulerConfig.PreemptionConfig.BatchSchedulerEnabled)
|
||||
require.True(out.SchedulerConfig.PreemptionConfig.ServiceSchedulerEnabled)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -280,7 +282,8 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) {
|
|||
httpTest(t, nil, func(s *TestAgent) {
|
||||
require := require.New(t)
|
||||
body := bytes.NewBuffer([]byte(`{"PreemptionConfig": {
|
||||
"SystemSchedulerEnabled": true
|
||||
"SystemSchedulerEnabled": true,
|
||||
"ServiceSchedulerEnabled": true
|
||||
}}`))
|
||||
req, _ := http.NewRequest("PUT", "/v1/operator/scheduler/configuration", body)
|
||||
resp := httptest.NewRecorder()
|
||||
|
@ -301,6 +304,7 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) {
|
|||
err = s.RPC("Operator.SchedulerGetConfiguration", &args, &reply)
|
||||
require.Nil(err)
|
||||
require.True(reply.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.True(reply.SchedulerConfig.PreemptionConfig.ServiceSchedulerEnabled)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -309,7 +313,8 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) {
|
|||
httpTest(t, nil, func(s *TestAgent) {
|
||||
require := require.New(t)
|
||||
body := bytes.NewBuffer([]byte(`{"PreemptionConfig": {
|
||||
"SystemSchedulerEnabled": true
|
||||
"SystemSchedulerEnabled": true,
|
||||
"BatchSchedulerEnabled":true
|
||||
}}`))
|
||||
req, _ := http.NewRequest("PUT", "/v1/operator/scheduler/configuration", body)
|
||||
resp := httptest.NewRecorder()
|
||||
|
@ -331,11 +336,13 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.True(reply.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.True(reply.SchedulerConfig.PreemptionConfig.BatchSchedulerEnabled)
|
||||
|
||||
// Create a CAS request, bad index
|
||||
{
|
||||
buf := bytes.NewBuffer([]byte(`{"PreemptionConfig": {
|
||||
"SystemSchedulerEnabled": false
|
||||
"SystemSchedulerEnabled": false,
|
||||
"BatchSchedulerEnabled":true
|
||||
}}`))
|
||||
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/operator/scheduler/configuration?cas=%d", reply.QueryMeta.Index-1), buf)
|
||||
resp := httptest.NewRecorder()
|
||||
|
@ -351,7 +358,8 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) {
|
|||
// Create a CAS request, good index
|
||||
{
|
||||
buf := bytes.NewBuffer([]byte(`{"PreemptionConfig": {
|
||||
"SystemSchedulerEnabled": false
|
||||
"SystemSchedulerEnabled": false,
|
||||
"BatchSchedulerEnabled":false
|
||||
}}`))
|
||||
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/operator/scheduler/configuration?cas=%d", reply.QueryMeta.Index), buf)
|
||||
resp := httptest.NewRecorder()
|
||||
|
@ -369,5 +377,6 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.False(reply.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.False(reply.SchedulerConfig.PreemptionConfig.BatchSchedulerEnabled)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,6 +7,9 @@ data_dir = "/tmp/client1"
|
|||
# Give the agent a unique name. Defaults to hostname
|
||||
name = "client1"
|
||||
|
||||
# Enable debugging
|
||||
enable_debug = true
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
|
|
@ -7,6 +7,9 @@ data_dir = "/tmp/client2"
|
|||
# Give the agent a unique name. Defaults to hostname
|
||||
name = "client2"
|
||||
|
||||
# Enable debugging
|
||||
enable_debug = true
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
|
|
@ -7,6 +7,9 @@ data_dir = "/tmp/client3"
|
|||
# Give the agent a unique name. Defaults to hostname
|
||||
name = "client3"
|
||||
|
||||
# Enable debugging
|
||||
enable_debug = true
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
|
|
@ -15,8 +15,16 @@ $ envchain nomadaws TF_VAR_nomad_sha=<nomad_sha> terraform apply
|
|||
|
||||
After this step, you should have a nomad client address to point the end to end tests in the `e2e` folder to.
|
||||
|
||||
Teardown
|
||||
========
|
||||
## SSH
|
||||
|
||||
Terraform will output node IPs that may be accessed via ssh:
|
||||
|
||||
```
|
||||
ssh -i keys/nomad-e2e-*.pem ubuntu@${EC2_IP_ADDR}
|
||||
```
|
||||
|
||||
|
||||
## Teardown
|
||||
The terraform state file stores all the info, so the nomad_sha doesn't need to be valid during teardown.
|
||||
|
||||
```
|
||||
|
|
|
@ -60,6 +60,7 @@ resource "aws_instance" "server" {
|
|||
"sudo cp /tmp/server.hcl /etc/nomad.d/nomad.hcl",
|
||||
"sudo chmod 0755 /usr/local/bin/nomad",
|
||||
"sudo chown root:root /usr/local/bin/nomad",
|
||||
"sudo systemctl enable nomad.service",
|
||||
"sudo systemctl start nomad.service"
|
||||
]
|
||||
|
||||
|
@ -112,6 +113,7 @@ resource "aws_instance" "client" {
|
|||
"sudo cp /tmp/client.hcl /etc/nomad.d/nomad.hcl",
|
||||
"sudo chmod 0755 /usr/local/bin/nomad",
|
||||
"sudo chown root:root /usr/local/bin/nomad",
|
||||
"sudo systemctl enable nomad.service",
|
||||
"sudo systemctl start nomad.service"
|
||||
]
|
||||
|
||||
|
|
|
@ -1,11 +1,19 @@
|
|||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "0.0.0.0"
|
||||
enable_debug = true
|
||||
log_level = "DEBUG"
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
options {
|
||||
# Allow jobs to run as root
|
||||
"user.blacklist" = ""
|
||||
|
||||
# Allow rawexec jobs
|
||||
"driver.raw_exec.enable" = "1"
|
||||
|
||||
# Allow privileged docker jobs
|
||||
"docker.privileged.enabled" = "true"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ locals {
|
|||
|
||||
# Generates keys to use for provisioning and access
|
||||
module "keys" {
|
||||
name = "nomad-e2e-${local.random_name}"
|
||||
name = "${local.random_name}"
|
||||
path = "${path.root}/keys"
|
||||
source = "mitchellh/dynamic-keys/aws"
|
||||
}
|
||||
|
@ -88,5 +88,10 @@ Then you can run e2e tests with:
|
|||
```
|
||||
go test -v ./e2e
|
||||
```
|
||||
|
||||
ssh into nodes with:
|
||||
```
|
||||
ssh -i keys/${local.random_name}.pem ubuntu@${aws_instance.client.0.public_ip}
|
||||
```
|
||||
EOM
|
||||
}
|
||||
|
|
1
e2e/terraform/shared/scripts/README.md
Normal file
1
e2e/terraform/shared/scripts/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
Scripts used by Packer to create base images
|
|
@ -25,10 +25,11 @@ sed -i "s/RETRY_JOIN/$RETRY_JOIN/g" $CONFIGDIR/consul_client.json
|
|||
sudo cp $CONFIGDIR/consul_client.json $CONSULCONFIGDIR/consul.json
|
||||
sudo cp $CONFIGDIR/consul_$CLOUD.service /etc/systemd/system/consul.service
|
||||
|
||||
sudo systemctl start consul.service
|
||||
sudo systemctl enable consul.service
|
||||
sudo systemctl start consul.service
|
||||
sleep 10
|
||||
|
||||
2export NOMAD_ADDR=http://$IP_ADDRESS:4646
|
||||
export NOMAD_ADDR=http://$IP_ADDRESS:4646
|
||||
|
||||
# Add hostname to /etc/hosts
|
||||
echo "127.0.0.1 $(hostname)" | sudo tee --append /etc/hosts
|
||||
|
@ -53,5 +54,3 @@ echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre" | sudo tee --appe
|
|||
|
||||
# Update PATH
|
||||
echo "export PATH=$PATH:/usr/local/bin/spark/bin:/usr/local/$HADOOP_VERSION/bin" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
|
||||
|
||||
|
|
|
@ -28,7 +28,8 @@ sed -i "s/RETRY_JOIN/$RETRY_JOIN/g" $CONFIGDIR/consul.json
|
|||
sudo cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
|
||||
sudo cp $CONFIGDIR/consul_$CLOUD.service /etc/systemd/system/consul.service
|
||||
|
||||
sudo systemctl start consul.service
|
||||
sudo systemctl enable consul.service
|
||||
sudo systemctl start consul.service
|
||||
sleep 10
|
||||
export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
|
||||
export CONSUL_RPC_ADDR=$IP_ADDRESS:8400
|
||||
|
@ -38,7 +39,8 @@ sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/vault.hcl
|
|||
sudo cp $CONFIGDIR/vault.hcl $VAULTCONFIGDIR
|
||||
sudo cp $CONFIGDIR/vault.service /etc/systemd/system/vault.service
|
||||
|
||||
sudo systemctl start vault.service
|
||||
sudo systemctl enable vault.service
|
||||
sudo systemctl start vault.service
|
||||
|
||||
export NOMAD_ADDR=http://$IP_ADDRESS:4646
|
||||
|
||||
|
|
|
@ -9,16 +9,19 @@ cd /ops
|
|||
|
||||
CONFIGDIR=/ops/shared/config
|
||||
|
||||
CONSULVERSION=1.4.0
|
||||
CONSULVERSION=1.4.4
|
||||
CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
CONSULDIR=/opt/consul
|
||||
|
||||
VAULTVERSION=0.11.4
|
||||
VAULTVERSION=1.1.1
|
||||
VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
|
||||
VAULTCONFIGDIR=/etc/vault.d
|
||||
VAULTDIR=/opt/vault
|
||||
|
||||
# Will be overwritten by sha specified
|
||||
NOMADVERSION=0.9.1
|
||||
NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
NOMADDIR=/opt/nomad
|
||||
|
||||
|
@ -38,42 +41,37 @@ sudo pip install numpy
|
|||
|
||||
sudo ufw disable || echo "ufw not installed"
|
||||
|
||||
# Consul
|
||||
|
||||
echo "Install Consul"
|
||||
curl -L $CONSULDOWNLOAD > consul.zip
|
||||
|
||||
## Install
|
||||
sudo unzip consul.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/consul
|
||||
sudo chown root:root /usr/local/bin/consul
|
||||
|
||||
## Configure
|
||||
echo "Configure Consul"
|
||||
sudo mkdir -p $CONSULCONFIGDIR
|
||||
sudo chmod 755 $CONSULCONFIGDIR
|
||||
sudo mkdir -p $CONSULDIR
|
||||
sudo chmod 755 $CONSULDIR
|
||||
|
||||
# Vault
|
||||
|
||||
echo "Install Vault"
|
||||
curl -L $VAULTDOWNLOAD > vault.zip
|
||||
|
||||
## Install
|
||||
sudo unzip vault.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/vault
|
||||
sudo chown root:root /usr/local/bin/vault
|
||||
|
||||
## Configure
|
||||
echo "Configure Vault"
|
||||
sudo mkdir -p $VAULTCONFIGDIR
|
||||
sudo chmod 755 $VAULTCONFIGDIR
|
||||
sudo mkdir -p $VAULTDIR
|
||||
sudo chmod 755 $VAULTDIR
|
||||
|
||||
## Install
|
||||
echo "Install Nomad"
|
||||
curl -L $NOMADDOWNLOAD > nomad.zip
|
||||
sudo unzip nomad.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/nomad
|
||||
sudo chown root:root /usr/local/bin/nomad
|
||||
|
||||
## Configure
|
||||
echo "Configure Nomad"
|
||||
sudo mkdir -p $NOMADCONFIGDIR
|
||||
sudo chmod 755 $NOMADCONFIGDIR
|
||||
sudo mkdir -p $NOMADDIR
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) {
|
|||
require.True(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
|
||||
// Change a config setting
|
||||
newConf := &api.SchedulerConfiguration{PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false}}
|
||||
newConf := &api.SchedulerConfiguration{PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false, BatchSchedulerEnabled: false}}
|
||||
resp, wm, err := operator.SchedulerSetConfiguration(newConf, nil)
|
||||
require.Nil(err)
|
||||
require.NotZero(wm.LastIndex)
|
||||
|
@ -33,6 +33,7 @@ func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) {
|
|||
config, _, err = operator.SchedulerGetConfiguration(nil)
|
||||
require.Nil(err)
|
||||
require.False(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.False(config.SchedulerConfig.PreemptionConfig.BatchSchedulerEnabled)
|
||||
}
|
||||
|
||||
func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) {
|
||||
|
@ -53,7 +54,7 @@ func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) {
|
|||
// Pass an invalid ModifyIndex
|
||||
{
|
||||
newConf := &api.SchedulerConfiguration{
|
||||
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false},
|
||||
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false, BatchSchedulerEnabled: false},
|
||||
ModifyIndex: config.SchedulerConfig.ModifyIndex - 1,
|
||||
}
|
||||
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
|
||||
|
@ -65,7 +66,7 @@ func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) {
|
|||
// Pass a valid ModifyIndex
|
||||
{
|
||||
newConf := &api.SchedulerConfiguration{
|
||||
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false},
|
||||
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false, BatchSchedulerEnabled: false},
|
||||
ModifyIndex: config.SchedulerConfig.ModifyIndex,
|
||||
}
|
||||
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
|
@ -559,26 +558,26 @@ func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error {
|
|||
|
||||
// If "version" is provided, set the operand
|
||||
// to "version" and the value to the "RTarget"
|
||||
if constraint, ok := m[structs.ConstraintVersion]; ok {
|
||||
m["Operand"] = structs.ConstraintVersion
|
||||
if constraint, ok := m[api.ConstraintVersion]; ok {
|
||||
m["Operand"] = api.ConstraintVersion
|
||||
m["RTarget"] = constraint
|
||||
}
|
||||
|
||||
// If "regexp" is provided, set the operand
|
||||
// to "regexp" and the value to the "RTarget"
|
||||
if constraint, ok := m[structs.ConstraintRegex]; ok {
|
||||
m["Operand"] = structs.ConstraintRegex
|
||||
if constraint, ok := m[api.ConstraintRegex]; ok {
|
||||
m["Operand"] = api.ConstraintRegex
|
||||
m["RTarget"] = constraint
|
||||
}
|
||||
|
||||
// If "set_contains" is provided, set the operand
|
||||
// to "set_contains" and the value to the "RTarget"
|
||||
if constraint, ok := m[structs.ConstraintSetContains]; ok {
|
||||
m["Operand"] = structs.ConstraintSetContains
|
||||
if constraint, ok := m[api.ConstraintSetContains]; ok {
|
||||
m["Operand"] = api.ConstraintSetContains
|
||||
m["RTarget"] = constraint
|
||||
}
|
||||
|
||||
if value, ok := m[structs.ConstraintDistinctHosts]; ok {
|
||||
if value, ok := m[api.ConstraintDistinctHosts]; ok {
|
||||
enabled, err := parseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("distinct_hosts should be set to true or false; %v", err)
|
||||
|
@ -589,11 +588,11 @@ func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error {
|
|||
continue
|
||||
}
|
||||
|
||||
m["Operand"] = structs.ConstraintDistinctHosts
|
||||
m["Operand"] = api.ConstraintDistinctHosts
|
||||
}
|
||||
|
||||
if property, ok := m[structs.ConstraintDistinctProperty]; ok {
|
||||
m["Operand"] = structs.ConstraintDistinctProperty
|
||||
if property, ok := m[api.ConstraintDistinctProperty]; ok {
|
||||
m["Operand"] = api.ConstraintDistinctProperty
|
||||
m["LTarget"] = property
|
||||
}
|
||||
|
||||
|
@ -641,35 +640,35 @@ func parseAffinities(result *[]*api.Affinity, list *ast.ObjectList) error {
|
|||
|
||||
// If "version" is provided, set the operand
|
||||
// to "version" and the value to the "RTarget"
|
||||
if affinity, ok := m[structs.ConstraintVersion]; ok {
|
||||
m["Operand"] = structs.ConstraintVersion
|
||||
if affinity, ok := m[api.ConstraintVersion]; ok {
|
||||
m["Operand"] = api.ConstraintVersion
|
||||
m["RTarget"] = affinity
|
||||
}
|
||||
|
||||
// If "regexp" is provided, set the operand
|
||||
// to "regexp" and the value to the "RTarget"
|
||||
if affinity, ok := m[structs.ConstraintRegex]; ok {
|
||||
m["Operand"] = structs.ConstraintRegex
|
||||
if affinity, ok := m[api.ConstraintRegex]; ok {
|
||||
m["Operand"] = api.ConstraintRegex
|
||||
m["RTarget"] = affinity
|
||||
}
|
||||
|
||||
// If "set_contains_any" is provided, set the operand
|
||||
// to "set_contains_any" and the value to the "RTarget"
|
||||
if affinity, ok := m[structs.ConstraintSetContainsAny]; ok {
|
||||
m["Operand"] = structs.ConstraintSetContainsAny
|
||||
if affinity, ok := m[api.ConstraintSetContainsAny]; ok {
|
||||
m["Operand"] = api.ConstraintSetContainsAny
|
||||
m["RTarget"] = affinity
|
||||
}
|
||||
|
||||
// If "set_contains_all" is provided, set the operand
|
||||
// to "set_contains_all" and the value to the "RTarget"
|
||||
if affinity, ok := m[structs.ConstraintSetContainsAll]; ok {
|
||||
m["Operand"] = structs.ConstraintSetContainsAll
|
||||
if affinity, ok := m[api.ConstraintSetContainsAll]; ok {
|
||||
m["Operand"] = api.ConstraintSetContainsAll
|
||||
m["RTarget"] = affinity
|
||||
}
|
||||
|
||||
// set_contains is a synonym of set_contains_all
|
||||
if affinity, ok := m[structs.ConstraintSetContains]; ok {
|
||||
m["Operand"] = structs.ConstraintSetContains
|
||||
if affinity, ok := m[api.ConstraintSetContains]; ok {
|
||||
m["Operand"] = api.ConstraintSetContains
|
||||
m["RTarget"] = affinity
|
||||
}
|
||||
|
||||
|
@ -1691,7 +1690,7 @@ func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error {
|
|||
|
||||
// If "cron" is provided, set the type to "cron" and store the spec.
|
||||
if cron, ok := m["cron"]; ok {
|
||||
m["SpecType"] = structs.PeriodicSpecCron
|
||||
m["SpecType"] = api.PeriodicSpecCron
|
||||
m["Spec"] = cron
|
||||
}
|
||||
|
||||
|
|
|
@ -2971,11 +2971,12 @@ func TestFSM_SchedulerConfig(t *testing.T) {
|
|||
|
||||
require := require.New(t)
|
||||
|
||||
// Set the autopilot config using a request.
|
||||
// Set the scheduler config using a request.
|
||||
req := structs.SchedulerSetConfigRequest{
|
||||
Config: structs.SchedulerConfiguration{
|
||||
PreemptionConfig: structs.PreemptionConfig{
|
||||
SystemSchedulerEnabled: true,
|
||||
BatchSchedulerEnabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -2992,10 +2993,11 @@ func TestFSM_SchedulerConfig(t *testing.T) {
|
|||
require.Nil(err)
|
||||
|
||||
require.Equal(config.PreemptionConfig.SystemSchedulerEnabled, req.Config.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.Equal(config.PreemptionConfig.BatchSchedulerEnabled, req.Config.PreemptionConfig.BatchSchedulerEnabled)
|
||||
|
||||
// Now use CAS and provide an old index
|
||||
req.CAS = true
|
||||
req.Config.PreemptionConfig = structs.PreemptionConfig{SystemSchedulerEnabled: false}
|
||||
req.Config.PreemptionConfig = structs.PreemptionConfig{SystemSchedulerEnabled: false, BatchSchedulerEnabled: false}
|
||||
req.Config.ModifyIndex = config.ModifyIndex - 1
|
||||
buf, err = structs.Encode(structs.SchedulerConfigRequestType, req)
|
||||
require.Nil(err)
|
||||
|
@ -3009,4 +3011,5 @@ func TestFSM_SchedulerConfig(t *testing.T) {
|
|||
require.Nil(err)
|
||||
// Verify that preemption is still enabled
|
||||
require.True(config.PreemptionConfig.SystemSchedulerEnabled)
|
||||
require.True(config.PreemptionConfig.BatchSchedulerEnabled)
|
||||
}
|
||||
|
|
|
@ -47,7 +47,9 @@ var minSchedulerConfigVersion = version.Must(version.NewVersion("0.9.0"))
|
|||
// Default configuration for scheduler with preemption enabled for system jobs
|
||||
var defaultSchedulerConfig = &structs.SchedulerConfiguration{
|
||||
PreemptionConfig: structs.PreemptionConfig{
|
||||
SystemSchedulerEnabled: true,
|
||||
SystemSchedulerEnabled: true,
|
||||
BatchSchedulerEnabled: true,
|
||||
ServiceSchedulerEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
FILES="$(ls *[!_test].go | tr '\n' ' ')"
|
||||
FILES="$(ls ./*.go | grep -v -e _test.go -e .generated.go | tr '\n' ' ')"
|
||||
codecgen -d 100 -o structs.generated.go ${FILES}
|
||||
|
|
|
@ -153,6 +153,12 @@ type SchedulerSetConfigurationResponse struct {
|
|||
type PreemptionConfig struct {
|
||||
// SystemSchedulerEnabled specifies if preemption is enabled for system jobs
|
||||
SystemSchedulerEnabled bool
|
||||
|
||||
// BatchSchedulerEnabled specifies if preemption is enabled for batch jobs
|
||||
BatchSchedulerEnabled bool
|
||||
|
||||
// ServiceSchedulerEnabled specifies if preemption is enabled for service jobs
|
||||
ServiceSchedulerEnabled bool
|
||||
}
|
||||
|
||||
// SchedulerSetConfigRequest is used by the Operator endpoint to update the
|
||||
|
|
|
@ -1282,7 +1282,6 @@ type EmitNodeEventsRequest struct {
|
|||
// EmitNodeEventsResponse is a response to the client about the status of
|
||||
// the node event source update.
|
||||
type EmitNodeEventsResponse struct {
|
||||
Index uint64
|
||||
WriteMeta
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
set -e
|
||||
|
||||
# Match entry in vendor.json
|
||||
GIT_TAG="v1.1.2"
|
||||
GIT_TAG="0053ebfd9d0ee06ccefbfe17072021e1d4acebee"
|
||||
echo "Installing codec/codecgen@${GIT_TAG} ..."
|
||||
|
||||
# Either fetch in existing git repo or use go get to clone
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import { alias } from '@ember/object/computed';
|
||||
import { assert } from '@ember/debug';
|
||||
import { htmlSafe } from '@ember/template';
|
||||
import Evented from '@ember/object/evented';
|
||||
import EmberObject, { computed } from '@ember/object';
|
||||
import { assign } from '@ember/polyfills';
|
||||
|
@ -7,6 +8,7 @@ import queryString from 'query-string';
|
|||
import { task } from 'ember-concurrency';
|
||||
import StreamLogger from 'nomad-ui/utils/classes/stream-logger';
|
||||
import PollLogger from 'nomad-ui/utils/classes/poll-logger';
|
||||
import Anser from 'anser';
|
||||
|
||||
const MAX_OUTPUT_LENGTH = 50000;
|
||||
|
||||
|
@ -37,7 +39,9 @@ const Log = EmberObject.extend(Evented, {
|
|||
// The top or bottom of the log, depending on whether
|
||||
// the logPointer is pointed at head or tail
|
||||
output: computed('logPointer', 'head', 'tail', function() {
|
||||
return this.logPointer === 'head' ? this.head : this.tail;
|
||||
let logs = this.logPointer === 'head' ? this.head : this.tail;
|
||||
let colouredLogs = Anser.ansiToHtml(logs);
|
||||
return htmlSafe(colouredLogs);
|
||||
}),
|
||||
|
||||
init() {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
"'app/styles/**/*.*'": ["prettier --write", "git add"]
|
||||
},
|
||||
"devDependencies": {
|
||||
"anser": "^1.4.8",
|
||||
"@babel/plugin-proposal-object-rest-spread": "^7.4.3",
|
||||
"@ember/jquery": "^0.6.0",
|
||||
"@ember/optional-features": "^0.7.0",
|
||||
|
|
|
@ -100,9 +100,9 @@ module('Unit | Util | Log', function(hooks) {
|
|||
});
|
||||
|
||||
await settled();
|
||||
assert.ok(log.get('output').endsWith(truncationMessage), 'Truncation message is shown');
|
||||
assert.ok(log.get('output').toString().endsWith(truncationMessage), 'Truncation message is shown');
|
||||
assert.equal(
|
||||
log.get('output').length,
|
||||
log.get('output').toString().length,
|
||||
50000 + truncationMessage.length,
|
||||
'Output is truncated the appropriate amount'
|
||||
);
|
||||
|
|
|
@ -1129,6 +1129,11 @@ amdefine@>=0.0.4:
|
|||
resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
|
||||
integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=
|
||||
|
||||
anser@^1.4.8:
|
||||
version "1.4.8"
|
||||
resolved "https://registry.yarnpkg.com/anser/-/anser-1.4.8.tgz#19a3bfc5f0e31c49efaea38f58fd0d136597f2a3"
|
||||
integrity sha512-tVHucTCKIt9VRrpQKzPtOlwm/3AmyQ7J+QE29ixFnvuE2hm83utEVrN7jJapYkHV6hI0HOHkEX9TOMCzHtwvuA==
|
||||
|
||||
ansi-escapes@^1.1.0:
|
||||
version "1.4.0"
|
||||
resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-1.4.0.tgz#d3a8a83b319aa67793662b13e761c7911422306e"
|
||||
|
|
|
@ -29,7 +29,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter allocations on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter deployments based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter evaluations on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")` - Specifies a string to filter jobs on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ The table below shows this endpoint's support for
|
|||
|
||||
- `format` `(string: "")` - Specifies the metrics format to be other than the
|
||||
JSON default. Currently, only `prometheus` is supported as an alternative
|
||||
format. This is specified as a querystring parameter.
|
||||
format. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter namespaces on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter nodes on based on an
|
||||
index prefix. This is specified as a querystring parameter.
|
||||
index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
@ -930,7 +930,7 @@ $ curl \
|
|||
|
||||
- `Message` - The specific message for the event, detailing what occurred.
|
||||
|
||||
- `Subsystem` - The subsystem where the node event took place. Subsysystems
|
||||
- `Subsystem` - The subsystem where the node event took place. Subsystems
|
||||
include:
|
||||
|
||||
- `Drain` - The Nomad server draining subsystem.
|
||||
|
|
|
@ -39,7 +39,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `stale` - Specifies if the cluster should respond without an active leader.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
@ -363,7 +363,9 @@ $ curl \
|
|||
"CreateIndex": 5,
|
||||
"ModifyIndex": 5,
|
||||
"PreemptionConfig": {
|
||||
"SystemSchedulerEnabled": true
|
||||
"SystemSchedulerEnabled": true,
|
||||
"BatchSchedulerEnabled": true,
|
||||
"ServiceSchedulerEnabled": true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -379,6 +381,10 @@ $ curl \
|
|||
- `PreemptionConfig` `(PreemptionConfig)` - Options to enable preemption for various schedulers.
|
||||
- `SystemSchedulerEnabled` `(bool: true)` - Specifies whether preemption for system jobs is enabled. Note that
|
||||
this defaults to true.
|
||||
- `BatchSchedulerEnabled` `(bool: true)` (Enterprise Only) - Specifies whether preemption for batch jobs is enabled. Note that
|
||||
this defaults to true.
|
||||
- `ServiceSchedulerEnabled` `(bool: true)` (Enterprise Only) - Specifies whether preemption for service jobs is enabled. Note that
|
||||
this defaults to true.
|
||||
- `CreateIndex` - The Raft index at which the config was created.
|
||||
- `ModifyIndex` - The Raft index at which the config was modified.
|
||||
|
||||
|
@ -409,7 +415,9 @@ The table below shows this endpoint's support for
|
|||
```json
|
||||
{
|
||||
"PreemptionConfig": {
|
||||
"EnablePreemption": false
|
||||
"SystemSchedulerEnabled": false,
|
||||
"BatchSchedulerEnabled": false,
|
||||
"ServiceSchedulerEnabled": true,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -417,3 +425,7 @@ The table below shows this endpoint's support for
|
|||
- `PreemptionConfig` `(PreemptionConfig)` - Options to enable preemption for various schedulers.
|
||||
- `SystemSchedulerEnabled` `(bool: true)` - Specifies whether preemption for system jobs is enabled. Note that
|
||||
if this is set to true, then system jobs can preempt any other jobs.
|
||||
- `BatchSchedulerEnabled` `(bool: true)` (Enterprise Only) - Specifies whether preemption for batch jobs is enabled. Note that
|
||||
if this is set to true, then batch jobs can preempt any other jobs.
|
||||
- `ServiceSchedulerEnabled` `(bool: true)` (Enterprise Only) - Specifies whether preemption for service jobs is enabled. Note that
|
||||
if this is set to true, then service jobs can preempt any other jobs.
|
||||
|
|
|
@ -32,7 +32,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter quota specifications on
|
||||
based on an index prefix. This is specified as a querystring parameter.
|
||||
based on an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
@ -228,7 +228,7 @@ The table below shows this endpoint's support for
|
|||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter quota specifications on
|
||||
based on an index prefix. This is specified as a querystring parameter.
|
||||
based on an index prefix. This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ job related results will not be returned. If the token is only valid for
|
|||
|
||||
### Parameters
|
||||
|
||||
- `Prefix` `(string: <required>)` - Specifies the identifer against which
|
||||
- `Prefix` `(string: <required>)` - Specifies the identifier against which
|
||||
matches will be found. For example, if the given prefix were "a", potential
|
||||
matches might be "abcd", or "aabb".
|
||||
- `Context` `(string: <required>)` - Defines the scope in which a search for a
|
||||
|
|
|
@ -23,19 +23,19 @@ This page lists all known jobs in a paginated, searchable, and sortable table.
|
|||
### Parameters
|
||||
|
||||
- `namespace` `(string: "")` - Specifies the namespace all jobs should be a member
|
||||
of. This is specified as a querystring parameter. Namespaces are an enterprise feature.
|
||||
of. This is specified as a query string parameter. Namespaces are an enterprise feature.
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of jobs should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `search` `(string: "")` - Specifies a regular expression uses to filter the list of
|
||||
visible jobs. This is specified as a querystring parameter.
|
||||
visible jobs. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the jobs list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
## Job Detail
|
||||
|
@ -74,13 +74,13 @@ based on the type of job.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of task groups should be
|
||||
sorted by. This is specified as a querystring parameter.
|
||||
sorted by. This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the task groups list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
### Job Definition
|
||||
|
@ -133,16 +133,16 @@ allocations.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of allocations should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `search` `(string: "")` - Specifies a regular expression uses to filter the list of
|
||||
visible allocations. This is specified as a querystring parameter.
|
||||
visible allocations. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the allocations list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
## Allocation Detail
|
||||
|
@ -160,10 +160,10 @@ description of the event.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of tasks should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
|
||||
## Task Detail
|
||||
|
@ -199,16 +199,16 @@ table.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of client nodes should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `search` `(string: "")` - Specifies a regular expression uses to filter the list of
|
||||
visible client nodes. This is specified as a querystring parameter.
|
||||
visible client nodes. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the client nodes list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
## Node Detail
|
||||
|
@ -223,16 +223,16 @@ address, port, datacenter, allocations, and attributes.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of allocations should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `search` `(string: "")` - Specifies a regular expression uses to filter the list of
|
||||
visible allocations. This is specified as a querystring parameter.
|
||||
visible allocations. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the allocations list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
## Servers List
|
||||
|
@ -248,13 +248,13 @@ the leader.
|
|||
### Parameters
|
||||
|
||||
- `sort` `(string: "")` - Specifies the property the list of server agents should be sorted by.
|
||||
This is specified as a querystring parameter.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
- `desc` `(boolean: false)` - Specifies whether or not the sort direction is descending
|
||||
or ascending. This is specified as a querystring parameter.
|
||||
or ascending. This is specified as a query string parameter.
|
||||
|
||||
- `page` `(int: 0)` - Specifies the page in the server agents list that should be visible. This
|
||||
is specified as a querystring parameter.
|
||||
is specified as a query string parameter.
|
||||
|
||||
|
||||
## Server Detail
|
||||
|
|
|
@ -33,7 +33,7 @@ PluginInfoResponse{
|
|||
|
||||
#### `ConfigSchema() (*hclspec.Spec, error)`
|
||||
|
||||
The `ConfigSchema` function allows a plugin to tell Nomad the schema for it's
|
||||
The `ConfigSchema` function allows a plugin to tell Nomad the schema for its
|
||||
configuration. This configuration is given in a [plugin block][pluginblock] of
|
||||
the client configuration. The schema is defined with the [hclspec][hclspec]
|
||||
package.
|
||||
|
|
|
@ -13,7 +13,7 @@ functionality of some components within Nomad. The design of the plugin system
|
|||
is inspired by the lessons learned from plugin systems implemented in other
|
||||
HashiCorp products such as Terraform and Vault.
|
||||
|
||||
The following components are currently plugable within Nomad:
|
||||
The following components are currently pluggable within Nomad:
|
||||
|
||||
- [Task Drivers](/docs/internals/plugins/task-drivers.html)
|
||||
- [Devices](/docs/internals/plugins/devices.html)
|
||||
|
|
|
@ -16,7 +16,7 @@ driver source][lxcdriver].
|
|||
|
||||
Authoring a task driver (shortened to driver in this documentation) in Nomad
|
||||
consists of implementing the [DriverPlugin][driverplugin] interface and adding
|
||||
a main package to launch the plugin. A driver plugin is long lived and it's
|
||||
a main package to launch the plugin. A driver plugin is long lived and its
|
||||
lifetime is not bound to the Nomad client. This means that the Nomad client can
|
||||
be restarted without the restarting the driver. Nomad will ensure that one
|
||||
instance of the driver is running, meaning if the driver crashes or otherwise
|
||||
|
|
|
@ -92,7 +92,7 @@ artifact {
|
|||
}
|
||||
```
|
||||
|
||||
To download from private repo, sshkey need to be set. The key must be
|
||||
To download from private repo, sshkey needs to be set. The key must be
|
||||
base64-encoded string. Run `base64 -w0 <file>`
|
||||
|
||||
```hcl
|
||||
|
|
|
@ -32,7 +32,7 @@ To invoke a parameterized job, [`nomad job
|
|||
dispatch`][dispatch command] or the equivalent HTTP APIs are
|
||||
used. When dispatching against a parameterized job, an opaque payload and
|
||||
metadata may be injected into the job. These inputs to the parameterized job act
|
||||
like arguments to a function. The job consumes them to change it's behavior,
|
||||
like arguments to a function. The job consumes them to change its behavior,
|
||||
without exposing the implementation details to the caller.
|
||||
|
||||
To that end, tasks within the job can add a
|
||||
|
|
|
@ -60,18 +60,18 @@ defaults by job type:
|
|||
|
||||
```hcl
|
||||
restart {
|
||||
attempts = 15
|
||||
attempts = 3
|
||||
delay = "15s"
|
||||
interval = "168h"
|
||||
interval = "24h"
|
||||
mode = "fail"
|
||||
}
|
||||
```
|
||||
|
||||
- The default non-batch restart policy is:
|
||||
- The default service and system job restart policy is:
|
||||
|
||||
```hcl
|
||||
restart {
|
||||
interval = "1m"
|
||||
interval = "30m"
|
||||
attempts = 2
|
||||
delay = "15s"
|
||||
mode = "fail"
|
||||
|
|
|
@ -513,7 +513,7 @@ directly since Nomad isn't managing any port assignments.
|
|||
### IPv6 Docker containers
|
||||
|
||||
The [Docker](/docs/drivers/docker.html#advertise_ipv6_address) driver supports the
|
||||
`advertise_ipv6_address` parameter in it's configuration.
|
||||
`advertise_ipv6_address` parameter in its configuration.
|
||||
|
||||
Services will automatically advertise the IPv6 address when `advertise_ipv6_address`
|
||||
is used.
|
||||
|
|
|
@ -120,7 +120,7 @@ job "redis" {
|
|||
}
|
||||
```
|
||||
Note that we used the `affinity` stanza and specified `dc2` as the
|
||||
value for the [attribute][attributes] `${node.datacenter}`. We used the value `100` for the [weight][weight] which will cause the Nomad schedular to rank nodes in datacenter `dc2` with a higher score. Keep in mind that weights can range from -100 to 100, inclusive. Negative weights serve as anti-affinities which cause Nomad to avoid placing allocations on nodes that match the criteria.
|
||||
value for the [attribute][attributes] `${node.datacenter}`. We used the value `100` for the [weight][weight] which will cause the Nomad scheduler to rank nodes in datacenter `dc2` with a higher score. Keep in mind that weights can range from -100 to 100, inclusive. Negative weights serve as anti-affinities which cause Nomad to avoid placing allocations on nodes that match the criteria.
|
||||
|
||||
### Step 3: Register the Job `redis.nomad`
|
||||
|
||||
|
|
|
@ -406,7 +406,7 @@ authentication backends, it could provide a workflow where a user or orchestrati
|
|||
using an pre-existing identity service (LDAP, Okta, Amazon IAM, etc.) in order to obtain a short-lived
|
||||
Nomad token.
|
||||
|
||||
~> HashiCorp Vault is a standalone product with it's own set of deployment and
|
||||
~> HashiCorp Vault is a standalone product with its own set of deployment and
|
||||
configuration best practices. Please review [Vault's
|
||||
documentation](https://www.vaultproject.io/docs/index.html) before deploying it
|
||||
in production.
|
||||
|
|
Loading…
Reference in a new issue