2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2022-04-02 00:24:02 +00:00
|
|
|
"errors"
|
2015-08-20 23:07:26 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2018-01-11 19:24:57 +00:00
|
|
|
"net/rpc"
|
2015-08-20 22:25:09 +00:00
|
|
|
"os"
|
2015-08-30 01:16:49 +00:00
|
|
|
"path/filepath"
|
2018-01-09 23:26:53 +00:00
|
|
|
"sort"
|
2015-08-20 23:07:26 +00:00
|
|
|
"strconv"
|
2016-08-16 00:24:09 +00:00
|
|
|
"strings"
|
2015-08-20 22:25:09 +00:00
|
|
|
"sync"
|
2015-08-20 23:07:26 +00:00
|
|
|
"time"
|
|
|
|
|
2019-08-27 21:19:44 +00:00
|
|
|
metrics "github.com/armon/go-metrics"
|
2016-05-24 06:23:57 +00:00
|
|
|
consulapi "github.com/hashicorp/consul/api"
|
2019-08-27 21:19:44 +00:00
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2016-01-12 23:03:53 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2018-10-04 23:22:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner"
|
2018-09-28 17:09:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
|
|
|
arstate "github.com/hashicorp/nomad/client/allocrunner/state"
|
2022-05-03 22:38:32 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/getter"
|
2018-08-23 19:03:17 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocwatcher"
|
2015-08-25 23:21:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2018-09-28 17:09:01 +00:00
|
|
|
consulApi "github.com/hashicorp/nomad/client/consul"
|
|
|
|
"github.com/hashicorp/nomad/client/devicemanager"
|
2019-10-22 13:20:26 +00:00
|
|
|
"github.com/hashicorp/nomad/client/dynamicplugins"
|
2018-12-01 16:10:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/fingerprint"
|
2022-05-03 22:38:32 +00:00
|
|
|
cinterfaces "github.com/hashicorp/nomad/client/interfaces"
|
2021-03-25 02:09:37 +00:00
|
|
|
"github.com/hashicorp/nomad/client/lib/cgutil"
|
2018-11-28 03:42:22 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager"
|
2019-10-22 13:20:26 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
|
2018-11-28 03:42:22 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
2018-01-09 23:26:53 +00:00
|
|
|
"github.com/hashicorp/nomad/client/servers"
|
2022-03-15 08:38:30 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration"
|
2022-06-07 14:18:19 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/checks/checkstore"
|
2022-03-21 09:29:57 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/nsd"
|
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/wrapper"
|
2018-08-08 00:46:37 +00:00
|
|
|
"github.com/hashicorp/nomad/client/state"
|
2016-05-09 15:55:19 +00:00
|
|
|
"github.com/hashicorp/nomad/client/stats"
|
2023-05-31 19:34:16 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2016-08-18 03:28:48 +00:00
|
|
|
"github.com/hashicorp/nomad/client/vaultclient"
|
2016-06-08 06:02:37 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
2017-02-03 00:24:32 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2022-04-02 00:24:02 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/envoy"
|
2022-08-17 16:26:34 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2018-01-12 21:58:44 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pool"
|
2018-09-28 17:09:01 +00:00
|
|
|
hstats "github.com/hashicorp/nomad/helper/stats"
|
2016-10-25 23:05:37 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/tlsutil"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-20 23:41:29 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-09-28 17:09:01 +00:00
|
|
|
nconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
2019-10-22 13:20:26 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/csi"
|
2018-11-16 22:13:01 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/device"
|
2022-04-02 00:24:02 +00:00
|
|
|
vaultapi "github.com/hashicorp/vault/api"
|
|
|
|
"github.com/shirou/gopsutil/v3/host"
|
2022-09-21 19:53:25 +00:00
|
|
|
"golang.org/x/exp/maps"
|
2015-08-20 22:25:09 +00:00
|
|
|
)
|
|
|
|
|
2015-08-20 23:07:26 +00:00
|
|
|
const (
|
|
|
|
// clientRPCCache controls how long we keep an idle connection
|
|
|
|
// open to a server
|
2016-03-09 18:37:56 +00:00
|
|
|
clientRPCCache = 5 * time.Minute
|
2015-08-20 23:07:26 +00:00
|
|
|
|
2018-03-11 17:50:39 +00:00
|
|
|
// clientMaxStreams controls how many idle streams we keep
|
2015-08-20 23:07:26 +00:00
|
|
|
// open to a server
|
|
|
|
clientMaxStreams = 2
|
2015-08-21 00:49:04 +00:00
|
|
|
|
2016-06-11 03:05:14 +00:00
|
|
|
// datacenterQueryLimit searches through up to this many adjacent
|
|
|
|
// datacenters looking for the Nomad server service.
|
2016-06-14 22:05:34 +00:00
|
|
|
datacenterQueryLimit = 9
|
2016-06-11 03:05:14 +00:00
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
// registerRetryIntv is minimum interval on which we retry
|
|
|
|
// registration. We pick a value between this and 2x this.
|
2015-08-24 00:40:14 +00:00
|
|
|
registerRetryIntv = 15 * time.Second
|
2015-08-23 02:31:22 +00:00
|
|
|
|
|
|
|
// getAllocRetryIntv is minimum interval on which we retry
|
|
|
|
// to fetch allocations. We pick a value between this and 2x this.
|
|
|
|
getAllocRetryIntv = 30 * time.Second
|
2015-08-24 00:40:14 +00:00
|
|
|
|
|
|
|
// devModeRetryIntv is the retry interval used for development
|
|
|
|
devModeRetryIntv = time.Second
|
2015-08-31 00:19:20 +00:00
|
|
|
|
2021-08-10 21:06:18 +00:00
|
|
|
// noServerRetryIntv is the retry interval used when client has not
|
|
|
|
// connected to server yet
|
|
|
|
noServerRetryIntv = time.Second
|
|
|
|
|
2015-08-31 00:19:20 +00:00
|
|
|
// stateSnapshotIntv is how often the client snapshots state
|
|
|
|
stateSnapshotIntv = 60 * time.Second
|
2015-09-07 03:18:47 +00:00
|
|
|
|
2015-09-21 00:02:12 +00:00
|
|
|
// initialHeartbeatStagger is used to stagger the interval between
|
2017-09-26 22:26:33 +00:00
|
|
|
// starting and the initial heartbeat. After the initial heartbeat,
|
2015-09-21 00:02:12 +00:00
|
|
|
// we switch to using the TTL specified by the servers.
|
|
|
|
initialHeartbeatStagger = 10 * time.Second
|
2016-02-03 20:07:09 +00:00
|
|
|
|
|
|
|
// nodeUpdateRetryIntv is how often the client checks for updates to the
|
|
|
|
// node attributes or meta map.
|
2016-02-10 22:09:23 +00:00
|
|
|
nodeUpdateRetryIntv = 5 * time.Second
|
2016-02-22 03:20:50 +00:00
|
|
|
|
|
|
|
// allocSyncIntv is the batching period of allocation updates before they
|
|
|
|
// are synced with the server.
|
|
|
|
allocSyncIntv = 200 * time.Millisecond
|
2016-02-22 05:32:32 +00:00
|
|
|
|
|
|
|
// allocSyncRetryIntv is the interval on which we retry updating
|
|
|
|
// the status of the allocation
|
|
|
|
allocSyncRetryIntv = 5 * time.Second
|
2019-08-15 15:22:37 +00:00
|
|
|
|
|
|
|
// defaultConnectLogLevel is the log level set in the node meta by default
|
2020-12-01 19:01:32 +00:00
|
|
|
// to be used by Consul Connect sidecar tasks.
|
2019-08-15 15:22:37 +00:00
|
|
|
defaultConnectLogLevel = "info"
|
2020-12-01 19:01:32 +00:00
|
|
|
|
|
|
|
// defaultConnectProxyConcurrency is the default number of worker threads the
|
|
|
|
// connect sidecar should be configured to use.
|
|
|
|
//
|
|
|
|
// https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-concurrency
|
|
|
|
defaultConnectProxyConcurrency = "1"
|
2015-08-20 23:07:26 +00:00
|
|
|
)
|
|
|
|
|
2019-04-19 13:00:24 +00:00
|
|
|
var (
|
|
|
|
// grace period to allow for batch fingerprint processing
|
|
|
|
batchFirstFingerprintsProcessingGrace = batchFirstFingerprintsTimeout + 5*time.Second
|
|
|
|
)
|
|
|
|
|
2016-05-09 19:24:03 +00:00
|
|
|
// ClientStatsReporter exposes all the APIs related to resource usage of a Nomad
|
|
|
|
// Client
|
2016-05-09 15:55:19 +00:00
|
|
|
type ClientStatsReporter interface {
|
2016-06-12 16:32:38 +00:00
|
|
|
// GetAllocStats returns the AllocStatsReporter for the passed allocation.
|
|
|
|
// If it does not exist an error is reported.
|
2018-10-04 22:45:46 +00:00
|
|
|
GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error)
|
2016-05-25 20:12:09 +00:00
|
|
|
|
2016-06-12 03:15:50 +00:00
|
|
|
// LatestHostStats returns the latest resource usage stats for the host
|
|
|
|
LatestHostStats() *stats.HostStats
|
2016-05-09 15:55:19 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
// Client is used to implement the client interaction with Nomad. Clients
|
2022-06-07 14:18:19 +00:00
|
|
|
// are expected to register as a schedule-able node to the servers, and to
|
2015-08-20 22:25:09 +00:00
|
|
|
// run allocations as determined by the servers.
|
|
|
|
type Client struct {
|
2022-08-18 23:32:04 +00:00
|
|
|
start time.Time
|
2016-02-10 22:09:23 +00:00
|
|
|
|
2017-04-29 22:43:23 +00:00
|
|
|
// stateDB is used to efficiently store client state.
|
2018-08-08 00:46:37 +00:00
|
|
|
stateDB state.StateDB
|
2017-04-29 22:43:23 +00:00
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
// config must only be accessed with lock held. To update the config, use the
|
|
|
|
// Client.UpdateConfig() helper. If you need more fine grained control use
|
|
|
|
// the following pattern:
|
|
|
|
//
|
|
|
|
// c.configLock.Lock()
|
|
|
|
// newConfig := c.config.Copy()
|
|
|
|
// // <mutate newConfig>
|
|
|
|
// c.config = newConfig
|
|
|
|
// c.configLock.Unlock()
|
2023-02-07 22:42:25 +00:00
|
|
|
configLock sync.Mutex
|
|
|
|
config *config.Config
|
|
|
|
metaDynamic map[string]*string // dynamic node metadata
|
|
|
|
|
|
|
|
// metaStatic are the Node's static metadata set via the agent configuration
|
|
|
|
// and defaults during client initialization. Since this map is never updated
|
|
|
|
// at runtime it may be accessed outside of locks.
|
|
|
|
metaStatic map[string]string
|
2015-08-20 23:07:26 +00:00
|
|
|
|
2019-10-15 19:14:25 +00:00
|
|
|
logger hclog.InterceptLogger
|
|
|
|
rpcLogger hclog.Logger
|
2015-08-20 22:25:09 +00:00
|
|
|
|
2018-01-12 21:58:44 +00:00
|
|
|
connPool *pool.ConnPool
|
2015-08-20 23:07:26 +00:00
|
|
|
|
2018-02-15 23:22:57 +00:00
|
|
|
// tlsWrap is used to wrap outbound connections using TLS. It should be
|
|
|
|
// accessed using the lock.
|
|
|
|
tlsWrap tlsutil.RegionWrapper
|
|
|
|
tlsWrapLock sync.RWMutex
|
|
|
|
|
2018-01-09 23:26:53 +00:00
|
|
|
// servers is the list of nomad servers
|
|
|
|
servers *servers.Manager
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
// heartbeat related times for tracking how often to heartbeat
|
2017-12-07 01:57:50 +00:00
|
|
|
heartbeatTTL time.Duration
|
|
|
|
haveHeartbeated bool
|
|
|
|
heartbeatLock sync.Mutex
|
2020-04-13 20:08:24 +00:00
|
|
|
heartbeatStop *heartbeatStop
|
2016-09-24 00:02:48 +00:00
|
|
|
|
2016-09-26 22:20:43 +00:00
|
|
|
// triggerDiscoveryCh triggers Consul discovery; see triggerDiscovery
|
|
|
|
triggerDiscoveryCh chan struct{}
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2018-02-14 19:35:15 +00:00
|
|
|
// triggerNodeUpdate triggers the client to mark the Node as changed and
|
|
|
|
// update it.
|
|
|
|
triggerNodeUpdate chan struct{}
|
|
|
|
|
2018-03-09 12:05:39 +00:00
|
|
|
// triggerEmitNodeEvent sends an event and triggers the client to update the
|
|
|
|
// server for the node event
|
|
|
|
triggerEmitNodeEvent chan *structs.NodeEvent
|
|
|
|
|
2018-04-04 01:05:28 +00:00
|
|
|
// rpcRetryCh is closed when there an event such as server discovery or a
|
2018-04-04 01:30:01 +00:00
|
|
|
// successful RPC occurring happens such that a retry should happen. Access
|
2018-04-04 01:05:28 +00:00
|
|
|
// should only occur via the getter method
|
|
|
|
rpcRetryCh chan struct{}
|
|
|
|
rpcRetryLock sync.Mutex
|
2015-08-23 01:16:05 +00:00
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
// allocs maps alloc IDs to their AllocRunner. This map includes all
|
|
|
|
// AllocRunners - running and GC'd - until the server GCs them.
|
2023-05-12 17:29:44 +00:00
|
|
|
allocs map[string]interfaces.AllocRunner
|
2015-08-23 22:06:47 +00:00
|
|
|
allocLock sync.RWMutex
|
2015-08-23 21:54:52 +00:00
|
|
|
|
2023-05-12 17:29:44 +00:00
|
|
|
// allocrunnerFactory is the function called to create new allocrunners
|
|
|
|
allocrunnerFactory config.AllocRunnerFactory
|
|
|
|
|
2019-01-08 18:31:44 +00:00
|
|
|
// invalidAllocs is a map that tracks allocations that failed because
|
|
|
|
// the client couldn't initialize alloc or task runners for it. This can
|
|
|
|
// happen due to driver errors
|
2019-05-22 13:37:49 +00:00
|
|
|
invalidAllocs map[string]struct{}
|
|
|
|
invalidAllocsLock sync.Mutex
|
2019-01-08 18:31:44 +00:00
|
|
|
|
2023-05-31 19:34:16 +00:00
|
|
|
// pendingUpdates stores allocations that need to be synced to the server.
|
|
|
|
pendingUpdates *pendingClientUpdates
|
2016-02-22 03:20:50 +00:00
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// consulService is the Consul handler implementation for managing services
|
2017-02-01 00:43:57 +00:00
|
|
|
// and checks.
|
2022-03-15 08:38:30 +00:00
|
|
|
consulService serviceregistration.Handler
|
2017-02-01 00:43:57 +00:00
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// nomadService is the Nomad handler implementation for managing service
|
|
|
|
// registrations.
|
|
|
|
nomadService serviceregistration.Handler
|
|
|
|
|
2022-06-07 14:18:19 +00:00
|
|
|
// checkStore is used to store group and task checks and their current pass/fail
|
|
|
|
// status.
|
|
|
|
checkStore checkstore.Shim
|
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// serviceRegWrapper wraps the consulService and nomadService
|
|
|
|
// implementations so that the alloc and task runner service hooks can call
|
|
|
|
// this without needing to identify which backend provider should be used.
|
|
|
|
serviceRegWrapper *wrapper.HandlerWrapper
|
|
|
|
|
2020-09-04 17:50:11 +00:00
|
|
|
// consulProxies is Nomad's custom Consul client for looking up supported
|
|
|
|
// envoy versions
|
|
|
|
consulProxies consulApi.SupportedProxiesAPI
|
|
|
|
|
2017-02-01 00:43:57 +00:00
|
|
|
// consulCatalog is the subset of Consul's Catalog API Nomad uses.
|
|
|
|
consulCatalog consul.CatalogAPI
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2016-05-26 22:25:18 +00:00
|
|
|
// HostStatsCollector collects host resource usage stats
|
2016-05-22 09:04:27 +00:00
|
|
|
hostStatsCollector *stats.HostStatsCollector
|
2016-05-09 15:55:19 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// shutdown is true when the Client has been shutdown. Must hold
|
|
|
|
// shutdownLock to access.
|
|
|
|
shutdown bool
|
|
|
|
|
|
|
|
// shutdownCh is closed to signal the Client is shutting down.
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
shutdownLock sync.Mutex
|
2016-08-18 03:28:48 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// shutdownGroup are goroutines that exit when shutdownCh is closed.
|
|
|
|
// Shutdown() blocks on Wait() after closing shutdownCh.
|
|
|
|
shutdownGroup group
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// tokensClient is Nomad Client's custom Consul client for requesting Consul
|
|
|
|
// Service Identity tokens through Nomad Server.
|
|
|
|
tokensClient consulApi.ServiceIdentityAPI
|
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// vaultClient is used to interact with Vault for token and secret renewals
|
2016-08-18 03:28:48 +00:00
|
|
|
vaultClient vaultclient.VaultClient
|
2016-10-03 16:59:57 +00:00
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
// garbageCollector is used to garbage collect terminal allocations present
|
|
|
|
// in the node automatically
|
|
|
|
garbageCollector *AllocGarbageCollector
|
2017-08-20 00:20:05 +00:00
|
|
|
|
2017-08-23 20:49:08 +00:00
|
|
|
// clientACLResolver holds the ACL resolution state
|
|
|
|
clientACLResolver
|
2017-08-31 19:22:53 +00:00
|
|
|
|
2018-01-11 19:24:57 +00:00
|
|
|
// rpcServer is used to serve RPCs by the local agent.
|
2018-01-19 00:51:49 +00:00
|
|
|
rpcServer *rpc.Server
|
|
|
|
endpoints rpcEndpoints
|
2018-03-11 18:41:13 +00:00
|
|
|
streamingRpcs *structs.StreamingRpcRegistry
|
2018-01-11 19:24:57 +00:00
|
|
|
|
2022-10-06 20:22:59 +00:00
|
|
|
// fingerprintManager is the FingerprintManager registered by the client
|
|
|
|
fingerprintManager *FingerprintManager
|
|
|
|
|
2018-11-28 03:42:22 +00:00
|
|
|
// pluginManagers is the set of PluginManagers registered by the client
|
|
|
|
pluginManagers *pluginmanager.PluginGroup
|
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
// csimanager is responsible for managing csi plugins.
|
2020-01-08 12:47:07 +00:00
|
|
|
csimanager csimanager.Manager
|
2019-10-22 13:20:26 +00:00
|
|
|
|
2018-09-28 17:09:01 +00:00
|
|
|
// devicemanger is responsible for managing device plugins.
|
|
|
|
devicemanager devicemanager.Manager
|
|
|
|
|
2018-11-28 03:42:22 +00:00
|
|
|
// drivermanager is responsible for managing driver plugins
|
|
|
|
drivermanager drivermanager.Manager
|
|
|
|
|
2017-08-31 19:22:53 +00:00
|
|
|
// baseLabels are used when emitting tagged metrics. All client metrics will
|
|
|
|
// have these tags, and optionally more.
|
|
|
|
baseLabels []metrics.Label
|
2018-12-13 06:41:44 +00:00
|
|
|
|
|
|
|
// batchNodeUpdates is used to batch initial updates to the node
|
|
|
|
batchNodeUpdates *batchNodeUpdates
|
2018-12-20 05:53:44 +00:00
|
|
|
|
|
|
|
// fpInitialized chan is closed when the first batch of fingerprints are
|
2021-06-07 21:36:41 +00:00
|
|
|
// applied to the node
|
2018-12-20 05:53:44 +00:00
|
|
|
fpInitialized chan struct{}
|
2019-05-10 15:51:06 +00:00
|
|
|
|
2023-06-22 15:06:49 +00:00
|
|
|
// registeredCh is closed when Node.Register has successfully run once.
|
|
|
|
registeredCh chan struct{}
|
|
|
|
registeredOnce sync.Once
|
|
|
|
|
2019-05-10 15:51:06 +00:00
|
|
|
// serversContactedCh is closed when GetClientAllocs and runAllocs have
|
|
|
|
// successfully run once.
|
|
|
|
serversContactedCh chan struct{}
|
|
|
|
serversContactedOnce sync.Once
|
2019-10-22 13:20:26 +00:00
|
|
|
|
|
|
|
// dynamicRegistry provides access to plugins that are dynamically registered
|
|
|
|
// with a nomad client. Currently only used for CSI.
|
|
|
|
dynamicRegistry dynamicplugins.Registry
|
2020-05-27 17:46:52 +00:00
|
|
|
|
2021-04-08 05:04:47 +00:00
|
|
|
// cpusetManager configures cpusets on supported platforms
|
|
|
|
cpusetManager cgutil.CpusetManager
|
|
|
|
|
2020-05-27 17:46:52 +00:00
|
|
|
// EnterpriseClient is used to set and check enterprise features for clients
|
|
|
|
EnterpriseClient *EnterpriseClient
|
2022-05-03 22:38:32 +00:00
|
|
|
|
|
|
|
// getter is an interface for retrieving artifacts.
|
|
|
|
getter cinterfaces.ArtifactGetter
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2016-09-26 22:12:35 +00:00
|
|
|
var (
|
|
|
|
// noServersErr is returned by the RPC method when the client has no
|
|
|
|
// configured servers. This is used to trigger Consul discovery if
|
|
|
|
// enabled.
|
|
|
|
noServersErr = errors.New("no servers")
|
|
|
|
)
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2021-03-19 14:52:43 +00:00
|
|
|
// NewClient is used to create a new client from the given configuration.
|
|
|
|
// `rpcs` is a map of RPC names to RPC structs that, if non-nil, will be
|
|
|
|
// registered via https://golang.org/pkg/net/rpc/#Server.RegisterName in place
|
|
|
|
// of the client's normal RPC handlers. This allows server tests to override
|
|
|
|
// the behavior of the client.
|
2022-03-15 08:38:30 +00:00
|
|
|
func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxies consulApi.SupportedProxiesAPI, consulService serviceregistration.Handler, rpcs map[string]interface{}) (*Client, error) {
|
2016-10-25 23:01:53 +00:00
|
|
|
// Create the tls wrapper
|
2016-11-01 18:55:29 +00:00
|
|
|
var tlsWrap tlsutil.RegionWrapper
|
2016-10-25 22:57:38 +00:00
|
|
|
if cfg.TLSConfig.EnableRPC {
|
2018-05-23 21:25:30 +00:00
|
|
|
tw, err := tlsutil.NewTLSConfiguration(cfg.TLSConfig, true, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
tlsWrap, err = tw.OutgoingTLSWrapper()
|
2016-10-24 05:22:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 18:57:56 +00:00
|
|
|
if cfg.StateDBFactory == nil {
|
|
|
|
cfg.StateDBFactory = state.GetStateDBFactory(cfg.DevMode)
|
|
|
|
}
|
|
|
|
|
2018-09-17 21:22:40 +00:00
|
|
|
// Create the logger
|
2019-10-15 19:14:25 +00:00
|
|
|
logger := cfg.Logger.ResetNamedIntercept("client")
|
2018-09-17 21:22:40 +00:00
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Create the client
|
2015-08-20 22:25:09 +00:00
|
|
|
c := &Client{
|
2018-03-09 12:05:39 +00:00
|
|
|
config: cfg,
|
|
|
|
consulCatalog: consulCatalog,
|
2020-09-04 17:50:11 +00:00
|
|
|
consulProxies: consulProxies,
|
2018-03-09 12:05:39 +00:00
|
|
|
consulService: consulService,
|
|
|
|
start: time.Now(),
|
2018-09-17 21:22:40 +00:00
|
|
|
connPool: pool.NewPool(logger, clientRPCCache, clientMaxStreams, tlsWrap),
|
2018-03-09 12:05:39 +00:00
|
|
|
tlsWrap: tlsWrap,
|
|
|
|
streamingRpcs: structs.NewStreamingRpcRegistry(),
|
2018-09-17 22:44:37 +00:00
|
|
|
logger: logger,
|
2019-10-15 19:14:25 +00:00
|
|
|
rpcLogger: logger.Named("rpc"),
|
2023-05-12 17:29:44 +00:00
|
|
|
allocs: make(map[string]interfaces.AllocRunner),
|
2023-05-31 19:34:16 +00:00
|
|
|
pendingUpdates: newPendingClientUpdates(),
|
2018-03-09 12:05:39 +00:00
|
|
|
shutdownCh: make(chan struct{}),
|
|
|
|
triggerDiscoveryCh: make(chan struct{}),
|
|
|
|
triggerNodeUpdate: make(chan struct{}, 8),
|
|
|
|
triggerEmitNodeEvent: make(chan *structs.NodeEvent, 8),
|
2018-12-20 05:53:44 +00:00
|
|
|
fpInitialized: make(chan struct{}),
|
2019-01-08 18:31:44 +00:00
|
|
|
invalidAllocs: make(map[string]struct{}),
|
2019-05-10 15:51:06 +00:00
|
|
|
serversContactedCh: make(chan struct{}),
|
|
|
|
serversContactedOnce: sync.Once{},
|
2023-06-22 15:06:49 +00:00
|
|
|
registeredCh: make(chan struct{}),
|
|
|
|
registeredOnce: sync.Once{},
|
2022-08-23 14:03:37 +00:00
|
|
|
cpusetManager: cgutil.CreateCPUSetManager(cfg.CgroupParent, cfg.ReservableCores, logger),
|
2022-12-07 22:02:25 +00:00
|
|
|
getter: getter.New(cfg.Artifact, logger),
|
2020-05-28 19:43:16 +00:00
|
|
|
EnterpriseClient: newEnterpriseClient(logger),
|
2023-05-12 17:29:44 +00:00
|
|
|
allocrunnerFactory: cfg.AllocRunnerFactory,
|
|
|
|
}
|
|
|
|
|
|
|
|
// we can't have this set in the default Config because of import cycles
|
|
|
|
if c.allocrunnerFactory == nil {
|
|
|
|
c.allocrunnerFactory = allocrunner.NewAllocRunner
|
2015-11-25 21:39:16 +00:00
|
|
|
}
|
|
|
|
|
2018-12-13 06:41:44 +00:00
|
|
|
c.batchNodeUpdates = newBatchNodeUpdates(
|
|
|
|
c.updateNodeFromDriver,
|
|
|
|
c.updateNodeFromDevices,
|
2019-10-22 13:20:26 +00:00
|
|
|
c.updateNodeFromCSI,
|
2018-12-13 06:41:44 +00:00
|
|
|
)
|
|
|
|
|
2018-01-09 23:26:53 +00:00
|
|
|
// Initialize the server manager
|
|
|
|
c.servers = servers.New(c.logger, c.shutdownCh, c)
|
|
|
|
|
2019-05-06 19:44:55 +00:00
|
|
|
// Start server manager rebalancing go routine
|
|
|
|
go c.servers.Start()
|
|
|
|
|
2020-03-12 20:24:58 +00:00
|
|
|
// initialize the client
|
2019-01-09 18:57:56 +00:00
|
|
|
if err := c.init(); err != nil {
|
2016-03-15 18:28:31 +00:00
|
|
|
return nil, fmt.Errorf("failed to initialize client: %v", err)
|
2015-09-12 18:47:44 +00:00
|
|
|
}
|
|
|
|
|
2020-03-12 20:24:58 +00:00
|
|
|
// initialize the dynamic registry (needs to happen after init)
|
|
|
|
c.dynamicRegistry =
|
|
|
|
dynamicplugins.NewRegistry(c.stateDB, map[string]dynamicplugins.PluginDispenser{
|
|
|
|
dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) {
|
2022-02-15 21:57:29 +00:00
|
|
|
return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "controller")), nil
|
2020-03-12 20:24:58 +00:00
|
|
|
},
|
|
|
|
dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) {
|
2022-02-15 21:57:29 +00:00
|
|
|
return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "client")), nil
|
|
|
|
},
|
2020-03-12 20:24:58 +00:00
|
|
|
})
|
|
|
|
|
2018-01-11 19:24:57 +00:00
|
|
|
// Setup the clients RPC server
|
2021-03-19 14:52:43 +00:00
|
|
|
c.setupClientRpc(rpcs)
|
2018-01-11 19:24:57 +00:00
|
|
|
|
2017-08-23 20:49:08 +00:00
|
|
|
// Initialize the ACL state
|
2023-02-08 21:20:33 +00:00
|
|
|
c.clientACLResolver.init()
|
2017-08-23 20:49:08 +00:00
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Setup the node
|
|
|
|
if err := c.setupNode(); err != nil {
|
|
|
|
return nil, fmt.Errorf("node setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-10-06 20:22:59 +00:00
|
|
|
c.fingerprintManager = NewFingerprintManager(
|
2022-08-18 23:32:04 +00:00
|
|
|
cfg.PluginSingletonLoader, c.GetConfig, cfg.Node,
|
2018-11-28 03:42:22 +00:00
|
|
|
c.shutdownCh, c.updateNodeFromFingerprint, c.logger)
|
|
|
|
|
|
|
|
c.pluginManagers = pluginmanager.New(c.logger)
|
2018-01-24 13:01:37 +00:00
|
|
|
|
2018-02-05 23:02:52 +00:00
|
|
|
// Fingerprint the node and scan for drivers
|
2022-10-06 20:22:59 +00:00
|
|
|
if err := c.fingerprintManager.Run(); err != nil {
|
2015-08-20 23:41:29 +00:00
|
|
|
return nil, fmt.Errorf("fingerprinting failed: %v", err)
|
|
|
|
}
|
2015-08-20 23:53:43 +00:00
|
|
|
|
2020-10-12 12:47:05 +00:00
|
|
|
// Build the allow/denylists of drivers.
|
2020-10-14 22:17:47 +00:00
|
|
|
// COMPAT(1.0) uses inclusive language. white/blacklist are there for backward compatible reasons only.
|
2020-10-12 12:47:05 +00:00
|
|
|
allowlistDrivers := cfg.ReadStringListToMap("driver.allowlist", "driver.whitelist")
|
|
|
|
blocklistDrivers := cfg.ReadStringListToMap("driver.denylist", "driver.blacklist")
|
2018-11-28 03:42:22 +00:00
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
// Setup the csi manager
|
|
|
|
csiConfig := &csimanager.Config{
|
|
|
|
Logger: c.logger,
|
|
|
|
DynamicRegistry: c.dynamicRegistry,
|
|
|
|
UpdateNodeCSIInfoFunc: c.batchNodeUpdates.updateNodeFromCSI,
|
2020-03-31 21:13:52 +00:00
|
|
|
TriggerNodeEvent: c.triggerNodeEvent,
|
2019-10-22 13:20:26 +00:00
|
|
|
}
|
|
|
|
csiManager := csimanager.New(csiConfig)
|
|
|
|
c.csimanager = csiManager
|
2020-01-08 12:47:07 +00:00
|
|
|
c.pluginManagers.RegisterAndRun(csiManager.PluginManager())
|
2019-10-22 13:20:26 +00:00
|
|
|
|
2018-11-28 03:42:22 +00:00
|
|
|
// Setup the driver manager
|
|
|
|
driverConfig := &drivermanager.Config{
|
2018-12-18 03:36:06 +00:00
|
|
|
Logger: c.logger,
|
2022-08-18 23:32:04 +00:00
|
|
|
Loader: cfg.PluginSingletonLoader,
|
|
|
|
PluginConfig: cfg.NomadPluginConfig(),
|
2018-12-18 03:36:06 +00:00
|
|
|
Updater: c.batchNodeUpdates.updateNodeFromDriver,
|
|
|
|
EventHandlerFactory: c.GetTaskEventHandler,
|
|
|
|
State: c.stateDB,
|
|
|
|
AllowedDrivers: allowlistDrivers,
|
|
|
|
BlockedDrivers: blocklistDrivers,
|
2018-11-28 03:42:22 +00:00
|
|
|
}
|
|
|
|
drvManager := drivermanager.New(driverConfig)
|
|
|
|
c.drivermanager = drvManager
|
|
|
|
c.pluginManagers.RegisterAndRun(drvManager)
|
|
|
|
|
2018-09-28 17:09:01 +00:00
|
|
|
// Setup the device manager
|
|
|
|
devConfig := &devicemanager.Config{
|
2018-11-14 03:20:03 +00:00
|
|
|
Logger: c.logger,
|
2022-08-18 23:32:04 +00:00
|
|
|
Loader: cfg.PluginSingletonLoader,
|
|
|
|
PluginConfig: cfg.NomadPluginConfig(),
|
2018-12-13 06:41:44 +00:00
|
|
|
Updater: c.batchNodeUpdates.updateNodeFromDevices,
|
2022-08-18 23:32:04 +00:00
|
|
|
StatsInterval: cfg.StatsCollectionInterval,
|
2018-09-28 17:09:01 +00:00
|
|
|
State: c.stateDB,
|
|
|
|
}
|
2018-11-28 03:42:22 +00:00
|
|
|
devManager := devicemanager.New(devConfig)
|
|
|
|
c.devicemanager = devManager
|
|
|
|
c.pluginManagers.RegisterAndRun(devManager)
|
2018-09-28 17:09:01 +00:00
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// Set up the service registration wrapper using the Consul and Nomad
|
|
|
|
// implementations. The Nomad implementation is only ever used on the
|
|
|
|
// client, so we do that here rather than within the agent.
|
|
|
|
c.setupNomadServiceRegistrationHandler()
|
|
|
|
c.serviceRegWrapper = wrapper.NewHandlerWrapper(c.logger, c.consulService, c.nomadService)
|
|
|
|
|
2018-12-13 06:41:44 +00:00
|
|
|
// Batching of initial fingerprints is done to reduce the number of node
|
2023-05-11 13:05:24 +00:00
|
|
|
// updates sent to the server on startup.
|
2018-12-13 06:41:44 +00:00
|
|
|
go c.batchFirstFingerprints()
|
|
|
|
|
2020-04-28 20:13:09 +00:00
|
|
|
// create heartbeatStop. We go after the first attempt to connect to the server, so
|
|
|
|
// that our grace period for connection goes for the full time
|
|
|
|
c.heartbeatStop = newHeartbeatStop(c.getAllocRunner, batchFirstFingerprintsTimeout, logger, c.shutdownCh)
|
|
|
|
|
2020-04-13 20:08:24 +00:00
|
|
|
// Watch for disconnection, and heartbeatStopAllocs configured to have a maximum
|
|
|
|
// lifetime when out of touch with the server
|
|
|
|
go c.heartbeatStop.watch()
|
|
|
|
|
2018-11-13 16:49:14 +00:00
|
|
|
// Add the stats collector
|
2022-08-22 16:28:53 +00:00
|
|
|
statsCollector := stats.NewHostStatsCollector(c.logger, c.GetConfig().AllocDir, c.devicemanager.AllStats)
|
2018-11-13 16:49:14 +00:00
|
|
|
c.hostStatsCollector = statsCollector
|
|
|
|
|
|
|
|
// Add the garbage collector
|
|
|
|
gcConfig := &GCConfig{
|
|
|
|
MaxAllocs: cfg.GCMaxAllocs,
|
|
|
|
DiskUsageThreshold: cfg.GCDiskUsageThreshold,
|
|
|
|
InodeUsageThreshold: cfg.GCInodeUsageThreshold,
|
|
|
|
Interval: cfg.GCInterval,
|
|
|
|
ParallelDestroys: cfg.GCParallelDestroys,
|
|
|
|
ReservedDiskMB: cfg.Node.Reserved.DiskMB,
|
|
|
|
}
|
|
|
|
c.garbageCollector = NewAllocGarbageCollector(c.logger, statsCollector, c, gcConfig)
|
|
|
|
go c.garbageCollector.Run()
|
|
|
|
|
2016-09-26 23:51:53 +00:00
|
|
|
// Set the preconfigured list of static servers
|
2022-08-18 23:32:04 +00:00
|
|
|
if len(cfg.Servers) > 0 {
|
|
|
|
if _, err := c.setServersImpl(cfg.Servers, true); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
logger.Warn("none of the configured servers are valid", "error", err)
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
2016-02-11 03:01:57 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
// Setup Consul discovery if enabled
|
2022-08-18 23:32:04 +00:00
|
|
|
if cfg.ConsulConfig.ClientAutoJoin != nil && *cfg.ConsulConfig.ClientAutoJoin {
|
2018-11-14 18:29:07 +00:00
|
|
|
c.shutdownGroup.Go(c.consulDiscovery)
|
2018-01-09 23:26:53 +00:00
|
|
|
if c.servers.NumServers() == 0 {
|
2016-09-24 00:02:48 +00:00
|
|
|
// No configured servers; trigger discovery manually
|
2016-09-26 22:52:40 +00:00
|
|
|
c.triggerDiscoveryCh <- struct{}{}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
if err := c.setupConsulTokenClient(); err != nil {
|
2022-04-02 00:24:02 +00:00
|
|
|
return nil, fmt.Errorf("failed to setup consul tokens client: %w", err)
|
2019-11-27 21:41:45 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// Setup the vault client for token and secret renewals
|
|
|
|
if err := c.setupVaultClient(); err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to setup vault client: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-04-19 13:00:24 +00:00
|
|
|
// wait until drivers are healthy before restoring or registering with servers
|
|
|
|
select {
|
2021-06-07 21:36:41 +00:00
|
|
|
case <-c.fpInitialized:
|
2019-04-19 13:00:24 +00:00
|
|
|
case <-time.After(batchFirstFingerprintsProcessingGrace):
|
2019-04-19 13:31:43 +00:00
|
|
|
logger.Warn("batch fingerprint operation timed out; proceeding to register with fingerprinted plugins so far")
|
2019-04-19 13:00:24 +00:00
|
|
|
}
|
|
|
|
|
2019-05-10 15:54:35 +00:00
|
|
|
// Register and then start heartbeating to the servers.
|
|
|
|
c.shutdownGroup.Go(c.registerAndHeartbeat)
|
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// Restore the state
|
|
|
|
if err := c.restoreState(); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
logger.Error("failed to restore state", "error", err)
|
|
|
|
logger.Error("Nomad is unable to start due to corrupt state. "+
|
2017-07-03 18:53:54 +00:00
|
|
|
"The safest way to proceed is to manually stop running task processes "+
|
2018-08-29 22:05:03 +00:00
|
|
|
"and remove Nomad's state and alloc directories before "+
|
2017-07-03 19:29:21 +00:00
|
|
|
"restarting. Lost allocations will be rescheduled.",
|
2022-08-18 23:32:04 +00:00
|
|
|
"state_dir", cfg.StateDir, "alloc_dir", cfg.AllocDir)
|
2018-08-29 22:05:03 +00:00
|
|
|
logger.Error("Corrupt state is often caused by a bug. Please " +
|
2017-07-03 18:53:54 +00:00
|
|
|
"report as much information as possible to " +
|
|
|
|
"https://github.com/hashicorp/nomad/issues")
|
|
|
|
return nil, fmt.Errorf("failed to restore state")
|
2016-09-14 20:30:01 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// Begin periodic snapshotting of state.
|
2018-11-14 18:29:07 +00:00
|
|
|
c.shutdownGroup.Go(c.periodicSnapshot)
|
2016-02-17 19:32:17 +00:00
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
// Begin syncing allocations to the server
|
2018-11-14 18:29:07 +00:00
|
|
|
c.shutdownGroup.Go(c.allocSync)
|
2016-02-22 03:20:50 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Start the client! Don't use the shutdownGroup as run handles
|
|
|
|
// shutdowns manually to prevent updates from being applied during
|
|
|
|
// shutdown.
|
2015-08-21 00:49:04 +00:00
|
|
|
go c.run()
|
2015-11-18 12:59:57 +00:00
|
|
|
|
2016-05-09 15:55:19 +00:00
|
|
|
// Start collecting stats
|
2018-11-14 18:29:07 +00:00
|
|
|
c.shutdownGroup.Go(c.emitStats)
|
2016-05-09 15:55:19 +00:00
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Info("started client", "node_id", c.NodeID())
|
2015-08-20 22:25:09 +00:00
|
|
|
return c, nil
|
2015-08-23 23:53:15 +00:00
|
|
|
}
|
|
|
|
|
2018-12-20 05:53:44 +00:00
|
|
|
// Ready returns a chan that is closed when the client is fully initialized
|
|
|
|
func (c *Client) Ready() <-chan struct{} {
|
2021-06-07 21:36:41 +00:00
|
|
|
return c.serversContactedCh
|
2018-12-20 05:53:44 +00:00
|
|
|
}
|
|
|
|
|
2015-09-12 18:47:44 +00:00
|
|
|
// init is used to initialize the client and perform any setup
|
|
|
|
// needed before we begin starting its various components.
|
2019-01-09 18:57:56 +00:00
|
|
|
func (c *Client) init() error {
|
2015-09-24 21:29:53 +00:00
|
|
|
// Ensure the state dir exists if we have one
|
2022-08-18 23:32:04 +00:00
|
|
|
conf := c.GetConfig()
|
|
|
|
if conf.StateDir != "" {
|
|
|
|
if err := os.MkdirAll(conf.StateDir, 0700); err != nil {
|
2015-09-24 21:29:53 +00:00
|
|
|
return fmt.Errorf("failed creating state dir: %s", err)
|
|
|
|
}
|
2015-09-25 17:04:08 +00:00
|
|
|
|
2015-11-11 00:03:18 +00:00
|
|
|
} else {
|
2018-03-11 18:34:27 +00:00
|
|
|
// Otherwise make a temp directory to use.
|
2023-03-08 19:25:10 +00:00
|
|
|
p, err := os.MkdirTemp("", "NomadClient")
|
2015-11-11 00:03:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating temporary directory for the StateDir: %v", err)
|
|
|
|
}
|
2016-10-11 22:49:43 +00:00
|
|
|
|
|
|
|
p, err = filepath.EvalSymlinks(p)
|
|
|
|
if err != nil {
|
2016-10-11 23:16:06 +00:00
|
|
|
return fmt.Errorf("failed to find temporary directory for the StateDir: %v", err)
|
2016-10-11 22:49:43 +00:00
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
conf = c.UpdateConfig(func(c *config.Config) {
|
|
|
|
c.StateDir = p
|
|
|
|
})
|
2015-09-24 21:29:53 +00:00
|
|
|
}
|
2022-08-18 23:32:04 +00:00
|
|
|
c.logger.Info("using state directory", "state_dir", conf.StateDir)
|
2015-09-24 21:29:53 +00:00
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
// Open the state database
|
2022-08-18 23:32:04 +00:00
|
|
|
db, err := conf.StateDBFactory(c.logger, conf.StateDir)
|
2017-04-29 22:43:23 +00:00
|
|
|
if err != nil {
|
2018-08-08 00:46:37 +00:00
|
|
|
return fmt.Errorf("failed to open state database: %v", err)
|
2017-04-29 22:43:23 +00:00
|
|
|
}
|
2018-12-07 01:24:43 +00:00
|
|
|
|
|
|
|
// Upgrade the state database
|
|
|
|
if err := db.Upgrade(); err != nil {
|
|
|
|
// Upgrade only returns an error on critical persistence
|
|
|
|
// failures in which an operator should intervene before the
|
|
|
|
// node is accessible. Upgrade drops and logs corrupt state it
|
|
|
|
// encounters, so failing to start the agent should be extremely
|
|
|
|
// rare.
|
|
|
|
return fmt.Errorf("failed to upgrade state database: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-04-29 22:43:23 +00:00
|
|
|
c.stateDB = db
|
|
|
|
|
2015-09-13 19:14:12 +00:00
|
|
|
// Ensure the alloc dir exists if we have one
|
2022-08-18 23:32:04 +00:00
|
|
|
if conf.AllocDir != "" {
|
|
|
|
if err := os.MkdirAll(conf.AllocDir, 0711); err != nil {
|
2015-09-13 19:14:12 +00:00
|
|
|
return fmt.Errorf("failed creating alloc dir: %s", err)
|
|
|
|
}
|
2015-09-26 01:12:11 +00:00
|
|
|
} else {
|
2018-03-11 18:34:27 +00:00
|
|
|
// Otherwise make a temp directory to use.
|
2023-03-08 19:25:10 +00:00
|
|
|
p, err := os.MkdirTemp("", "NomadClient")
|
2015-09-26 01:12:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating temporary directory for the AllocDir: %v", err)
|
|
|
|
}
|
2016-10-11 22:49:43 +00:00
|
|
|
|
|
|
|
p, err = filepath.EvalSymlinks(p)
|
|
|
|
if err != nil {
|
2016-10-11 23:16:06 +00:00
|
|
|
return fmt.Errorf("failed to find temporary directory for the AllocDir: %v", err)
|
2016-10-11 22:49:43 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 21:21:13 +00:00
|
|
|
// Change the permissions to have the execute bit
|
2017-05-25 21:44:13 +00:00
|
|
|
if err := os.Chmod(p, 0711); err != nil {
|
2017-03-20 21:21:13 +00:00
|
|
|
return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
conf = c.UpdateConfig(func(c *config.Config) {
|
|
|
|
c.AllocDir = p
|
|
|
|
})
|
2015-09-12 18:47:44 +00:00
|
|
|
}
|
2015-09-23 05:00:24 +00:00
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
c.logger.Info("using alloc directory", "alloc_dir", conf.AllocDir)
|
2021-03-25 02:09:37 +00:00
|
|
|
|
2021-10-04 22:43:35 +00:00
|
|
|
reserved := "<none>"
|
2022-08-18 23:32:04 +00:00
|
|
|
if conf.Node != nil && conf.Node.ReservedResources != nil {
|
2021-10-04 22:43:35 +00:00
|
|
|
// Node should always be non-nil due to initialization in the
|
|
|
|
// agent package, but don't risk a panic just for a long line.
|
2022-08-18 23:32:04 +00:00
|
|
|
reserved = conf.Node.ReservedResources.Networks.ReservedHostPorts
|
2021-10-04 22:43:35 +00:00
|
|
|
}
|
2021-10-01 00:05:46 +00:00
|
|
|
c.logger.Info("using dynamic ports",
|
2022-08-18 23:32:04 +00:00
|
|
|
"min", conf.MinDynamicPort,
|
|
|
|
"max", conf.MaxDynamicPort,
|
2021-10-04 22:43:35 +00:00
|
|
|
"reserved", reserved,
|
2021-10-01 00:05:46 +00:00
|
|
|
)
|
2021-09-10 08:52:47 +00:00
|
|
|
|
2022-08-23 14:03:37 +00:00
|
|
|
// startup the CPUSet manager
|
|
|
|
c.cpusetManager.Init()
|
2022-06-07 14:18:19 +00:00
|
|
|
|
2022-09-12 20:23:21 +00:00
|
|
|
// setup the nsd check store
|
2022-06-07 14:18:19 +00:00
|
|
|
c.checkStore = checkstore.NewStore(c.logger, c.stateDB)
|
|
|
|
|
2015-09-12 18:47:44 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
// reloadTLSConnections allows a client to reload its TLS configuration on the fly
|
2017-12-07 17:07:00 +00:00
|
|
|
func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error {
|
2017-11-29 22:22:21 +00:00
|
|
|
var tlsWrap tlsutil.RegionWrapper
|
2017-12-05 00:29:43 +00:00
|
|
|
if newConfig != nil && newConfig.EnableRPC {
|
2018-05-23 21:25:30 +00:00
|
|
|
tw, err := tlsutil.NewTLSConfiguration(newConfig, true, true)
|
2017-11-20 15:38:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-05-08 20:32:07 +00:00
|
|
|
|
|
|
|
twWrap, err := tw.OutgoingTLSWrapper()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tlsWrap = twWrap
|
2017-11-20 15:38:46 +00:00
|
|
|
}
|
|
|
|
|
2018-02-15 23:22:57 +00:00
|
|
|
// Store the new tls wrapper.
|
|
|
|
c.tlsWrapLock.Lock()
|
|
|
|
c.tlsWrap = tlsWrap
|
|
|
|
c.tlsWrapLock.Unlock()
|
|
|
|
|
2017-12-07 17:07:00 +00:00
|
|
|
// Keep the client configuration up to date as we use configuration values to
|
|
|
|
// decide on what type of connections to accept
|
2022-08-18 23:32:04 +00:00
|
|
|
c.UpdateConfig(func(c *config.Config) {
|
|
|
|
c.TLSConfig = newConfig
|
|
|
|
})
|
2017-11-21 18:21:29 +00:00
|
|
|
|
2017-12-05 00:29:43 +00:00
|
|
|
c.connPool.ReloadTLS(tlsWrap)
|
|
|
|
|
2017-11-20 15:38:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-02-07 22:42:25 +00:00
|
|
|
// Reload allows a client to reload parts of its configuration on the fly
|
2017-12-05 00:29:43 +00:00
|
|
|
func (c *Client) Reload(newConfig *config.Config) error {
|
2022-08-18 23:32:04 +00:00
|
|
|
existing := c.GetConfig()
|
|
|
|
shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(existing.TLSConfig, newConfig.TLSConfig)
|
2018-06-08 18:38:58 +00:00
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error parsing TLS configuration", "error", err)
|
2018-06-08 18:38:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if shouldReloadTLS {
|
2023-02-07 22:42:25 +00:00
|
|
|
if err := c.reloadTLSConnections(newConfig.TLSConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-08 18:38:58 +00:00
|
|
|
}
|
|
|
|
|
2022-10-06 20:22:59 +00:00
|
|
|
c.fingerprintManager.Reload()
|
|
|
|
|
2018-06-08 18:38:58 +00:00
|
|
|
return nil
|
2017-12-05 00:29:43 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 23:53:15 +00:00
|
|
|
// Leave is used to prepare the client to leave the cluster
|
|
|
|
func (c *Client) Leave() error {
|
2023-04-14 19:35:32 +00:00
|
|
|
if c.GetConfig().DevMode {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// In normal mode optionally drain the node
|
|
|
|
return c.DrainSelf()
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
// GetConfig returns the config of the client. Do *not* mutate without first
|
|
|
|
// calling Copy().
|
2017-11-15 01:53:23 +00:00
|
|
|
func (c *Client) GetConfig() *config.Config {
|
2018-02-05 23:02:52 +00:00
|
|
|
c.configLock.Lock()
|
|
|
|
defer c.configLock.Unlock()
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.config
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateConfig allows mutating the configuration. The updated configuration is
|
|
|
|
// returned.
|
|
|
|
func (c *Client) UpdateConfig(cb func(*config.Config)) *config.Config {
|
|
|
|
c.configLock.Lock()
|
|
|
|
defer c.configLock.Unlock()
|
|
|
|
|
|
|
|
// Create a copy of the active config
|
|
|
|
newConfig := c.config.Copy()
|
|
|
|
|
|
|
|
// Pass the copy to the supplied callback for mutation
|
|
|
|
cb(newConfig)
|
|
|
|
|
|
|
|
// Set new config struct
|
|
|
|
c.config = newConfig
|
|
|
|
|
|
|
|
return newConfig
|
2017-11-15 01:53:23 +00:00
|
|
|
}
|
|
|
|
|
2023-02-07 22:42:25 +00:00
|
|
|
// UpdateNode allows mutating just the Node portion of the client
|
|
|
|
// configuration. The updated Node is returned.
|
|
|
|
//
|
|
|
|
// This is similar to UpdateConfig but avoids deep copying the entier Config
|
|
|
|
// struct when only the Node is updated.
|
|
|
|
func (c *Client) UpdateNode(cb func(*structs.Node)) *structs.Node {
|
|
|
|
c.configLock.Lock()
|
|
|
|
defer c.configLock.Unlock()
|
|
|
|
|
|
|
|
// Create a new copy of Node for updating
|
|
|
|
newNode := c.config.Node.Copy()
|
|
|
|
|
|
|
|
// newNode is now a fresh unshared copy, mutate away!
|
|
|
|
cb(newNode)
|
|
|
|
|
|
|
|
// Shallow copy config before mutating Node pointer which might have
|
|
|
|
// concurrent readers
|
|
|
|
newConfig := *c.config
|
|
|
|
newConfig.Node = newNode
|
|
|
|
c.config = &newConfig
|
|
|
|
|
|
|
|
return newNode
|
|
|
|
}
|
|
|
|
|
2016-05-27 10:45:09 +00:00
|
|
|
// Datacenter returns the datacenter for the given client
|
|
|
|
func (c *Client) Datacenter() string {
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.GetConfig().Node.Datacenter
|
2016-05-27 10:45:09 +00:00
|
|
|
}
|
|
|
|
|
2016-05-23 18:09:31 +00:00
|
|
|
// Region returns the region for the given client
|
|
|
|
func (c *Client) Region() string {
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.GetConfig().Region
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2017-09-12 04:42:10 +00:00
|
|
|
// NodeID returns the node ID for the given client
|
|
|
|
func (c *Client) NodeID() string {
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.GetConfig().Node.ID
|
2017-09-12 04:42:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// secretNodeID returns the secret node ID for the given client
|
|
|
|
func (c *Client) secretNodeID() string {
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.GetConfig().Node.SecretID
|
2017-09-12 04:42:10 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
// Shutdown is used to tear down the client
|
|
|
|
func (c *Client) Shutdown() error {
|
|
|
|
c.shutdownLock.Lock()
|
|
|
|
defer c.shutdownLock.Unlock()
|
|
|
|
|
|
|
|
if c.shutdown {
|
2018-11-14 18:29:07 +00:00
|
|
|
c.logger.Info("already shutdown")
|
2015-08-20 22:25:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
c.logger.Info("shutting down")
|
2017-04-29 22:43:23 +00:00
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// Stop renewing tokens and secrets
|
|
|
|
if c.vaultClient != nil {
|
|
|
|
c.vaultClient.Stop()
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
// Stop Garbage collector
|
|
|
|
c.garbageCollector.Stop()
|
|
|
|
|
2018-12-14 15:04:58 +00:00
|
|
|
arGroup := group{}
|
2022-08-18 23:32:04 +00:00
|
|
|
if c.GetConfig().DevMode {
|
2018-11-14 18:29:07 +00:00
|
|
|
// In DevMode destroy all the running allocations.
|
2017-01-04 01:10:15 +00:00
|
|
|
for _, ar := range c.getAllocRunners() {
|
2018-06-29 00:01:05 +00:00
|
|
|
ar.Destroy()
|
2018-12-14 15:04:58 +00:00
|
|
|
arGroup.AddCh(ar.DestroyCh())
|
2015-10-04 20:36:03 +00:00
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
} else {
|
|
|
|
// In normal mode call shutdown
|
|
|
|
for _, ar := range c.getAllocRunners() {
|
2018-12-14 15:04:58 +00:00
|
|
|
ar.Shutdown()
|
|
|
|
arGroup.AddCh(ar.ShutdownCh())
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
2015-10-04 20:36:03 +00:00
|
|
|
}
|
2018-12-14 15:04:58 +00:00
|
|
|
arGroup.Wait()
|
2015-10-04 20:36:03 +00:00
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// Assert the implementation, so we can trigger the shutdown call. This is
|
|
|
|
// the only place this occurs, so it's OK to store the interface rather
|
|
|
|
// than the implementation.
|
|
|
|
if h, ok := c.nomadService.(*nsd.ServiceRegistrationHandler); ok {
|
|
|
|
h.Shutdown()
|
|
|
|
}
|
|
|
|
|
2018-12-21 19:23:21 +00:00
|
|
|
// Shutdown the plugin managers
|
|
|
|
c.pluginManagers.Shutdown()
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
c.shutdown = true
|
|
|
|
close(c.shutdownCh)
|
2018-11-14 18:29:07 +00:00
|
|
|
|
|
|
|
// Must close connection pool to unblock alloc watcher
|
2015-08-21 00:49:04 +00:00
|
|
|
c.connPool.Shutdown()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
|
|
|
// Wait for goroutines to stop
|
|
|
|
c.shutdownGroup.Wait()
|
|
|
|
|
|
|
|
// One final save state
|
|
|
|
c.saveState()
|
|
|
|
return c.stateDB.Close()
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
2015-08-20 23:07:26 +00:00
|
|
|
|
|
|
|
// Stats is used to return statistics for debugging and insight
|
|
|
|
// for various sub-systems
|
|
|
|
func (c *Client) Stats() map[string]map[string]string {
|
2016-06-10 02:27:02 +00:00
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
2015-08-20 23:07:26 +00:00
|
|
|
stats := map[string]map[string]string{
|
2017-09-26 22:26:33 +00:00
|
|
|
"client": {
|
2017-09-12 04:42:10 +00:00
|
|
|
"node_id": c.NodeID(),
|
2018-01-09 23:26:53 +00:00
|
|
|
"known_servers": strings.Join(c.GetServers(), ","),
|
2017-05-11 00:39:45 +00:00
|
|
|
"num_allocations": strconv.Itoa(c.NumAllocs()),
|
2020-04-13 20:08:24 +00:00
|
|
|
"last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat())),
|
2015-09-22 22:29:30 +00:00
|
|
|
"heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL),
|
2015-08-20 23:07:26 +00:00
|
|
|
},
|
2018-01-12 21:58:44 +00:00
|
|
|
"runtime": hstats.RuntimeStats(),
|
2015-08-20 23:07:26 +00:00
|
|
|
}
|
|
|
|
return stats
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
|
2019-10-01 20:06:24 +00:00
|
|
|
// GetAlloc returns an allocation or an error.
|
|
|
|
func (c *Client) GetAlloc(allocID string) (*structs.Allocation, error) {
|
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ar.Alloc(), nil
|
|
|
|
}
|
|
|
|
|
2019-04-03 10:46:15 +00:00
|
|
|
// SignalAllocation sends a signal to the tasks within an allocation.
|
|
|
|
// If the provided task is empty, then every allocation will be signalled.
|
|
|
|
// If a task is provided, then only an exactly matching task will be signalled.
|
|
|
|
func (c *Client) SignalAllocation(allocID, task, signal string) error {
|
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ar.Signal(task, signal)
|
|
|
|
}
|
|
|
|
|
2017-10-28 00:00:11 +00:00
|
|
|
// CollectAllocation garbage collects a single allocation on a node. Returns
|
|
|
|
// true if alloc was found and garbage collected; otherwise false.
|
|
|
|
func (c *Client) CollectAllocation(allocID string) bool {
|
|
|
|
return c.garbageCollector.Collect(allocID)
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CollectAllAllocs garbage collects all allocations on a node in the terminal
|
|
|
|
// state
|
2017-10-19 00:06:46 +00:00
|
|
|
func (c *Client) CollectAllAllocs() {
|
|
|
|
c.garbageCollector.CollectAll()
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
func (c *Client) RestartAllocation(allocID, taskName string, allTasks bool) error {
|
|
|
|
if allTasks && taskName != "" {
|
|
|
|
return fmt.Errorf("task name cannot be set when restarting all tasks")
|
|
|
|
}
|
|
|
|
|
2019-04-01 12:56:02 +00:00
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if taskName != "" {
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
event := structs.NewTaskEvent(structs.TaskRestartSignal).
|
|
|
|
SetRestartReason("User requested task to restart")
|
2019-04-01 12:56:02 +00:00
|
|
|
return ar.RestartTask(taskName, event)
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
if allTasks {
|
|
|
|
event := structs.NewTaskEvent(structs.TaskRestartSignal).
|
|
|
|
SetRestartReason("User requested all tasks to restart")
|
|
|
|
return ar.RestartAll(event)
|
|
|
|
}
|
|
|
|
|
|
|
|
event := structs.NewTaskEvent(structs.TaskRestartSignal).
|
|
|
|
SetRestartReason("User requested running tasks to restart")
|
|
|
|
return ar.RestartRunning(event)
|
2019-04-01 12:56:02 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Node returns the locally registered node
|
|
|
|
func (c *Client) Node() *structs.Node {
|
2022-08-18 23:32:04 +00:00
|
|
|
return c.GetConfig().Node
|
2015-08-20 23:41:29 +00:00
|
|
|
}
|
|
|
|
|
2019-10-01 20:06:24 +00:00
|
|
|
// getAllocRunner returns an AllocRunner or an UnknownAllocation error if the
|
|
|
|
// client has no runner for the given alloc ID.
|
2023-05-12 17:29:44 +00:00
|
|
|
func (c *Client) getAllocRunner(allocID string) (interfaces.AllocRunner, error) {
|
2018-12-12 19:45:45 +00:00
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
|
|
|
|
|
|
|
ar, ok := c.allocs[allocID]
|
|
|
|
if !ok {
|
|
|
|
return nil, structs.NewErrUnknownAllocation(allocID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ar, nil
|
|
|
|
}
|
|
|
|
|
2016-05-09 19:24:03 +00:00
|
|
|
// StatsReporter exposes the various APIs related resource usage of a Nomad
|
|
|
|
// client
|
2016-05-09 15:55:19 +00:00
|
|
|
func (c *Client) StatsReporter() ClientStatsReporter {
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2018-10-04 22:45:46 +00:00
|
|
|
func (c *Client) GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error) {
|
2018-12-12 19:45:45 +00:00
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-06-12 03:15:50 +00:00
|
|
|
}
|
2016-06-12 16:32:38 +00:00
|
|
|
return ar.StatsReporter(), nil
|
2016-05-09 15:55:19 +00:00
|
|
|
}
|
|
|
|
|
2021-08-30 09:08:12 +00:00
|
|
|
// LatestHostStats returns all the stats related to a Nomad client.
|
2016-06-12 03:15:50 +00:00
|
|
|
func (c *Client) LatestHostStats() *stats.HostStats {
|
2016-12-12 06:58:28 +00:00
|
|
|
return c.hostStatsCollector.Stats()
|
2016-04-29 18:06:19 +00:00
|
|
|
}
|
|
|
|
|
2018-11-15 15:13:14 +00:00
|
|
|
func (c *Client) LatestDeviceResourceStats(devices []*structs.AllocatedDeviceResource) []*device.DeviceGroupStats {
|
|
|
|
return c.computeAllocatedDeviceGroupStats(devices, c.LatestHostStats().DeviceStats)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) computeAllocatedDeviceGroupStats(devices []*structs.AllocatedDeviceResource, hostDeviceGroupStats []*device.DeviceGroupStats) []*device.DeviceGroupStats {
|
|
|
|
// basic optimization for the usual case
|
|
|
|
if len(devices) == 0 || len(hostDeviceGroupStats) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build an index of allocated devices
|
|
|
|
adIdx := map[structs.DeviceIdTuple][]string{}
|
|
|
|
|
|
|
|
total := 0
|
|
|
|
for _, ds := range devices {
|
|
|
|
adIdx[*ds.ID()] = ds.DeviceIDs
|
|
|
|
total += len(ds.DeviceIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect allocated device stats from host stats
|
|
|
|
result := make([]*device.DeviceGroupStats, 0, len(adIdx))
|
|
|
|
|
|
|
|
for _, dg := range hostDeviceGroupStats {
|
|
|
|
k := structs.DeviceIdTuple{
|
|
|
|
Vendor: dg.Vendor,
|
|
|
|
Type: dg.Type,
|
|
|
|
Name: dg.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
allocatedDeviceIDs, ok := adIdx[k]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
rdgStats := &device.DeviceGroupStats{
|
|
|
|
Vendor: dg.Vendor,
|
|
|
|
Type: dg.Type,
|
|
|
|
Name: dg.Name,
|
|
|
|
InstanceStats: map[string]*device.DeviceStats{},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, adID := range allocatedDeviceIDs {
|
|
|
|
deviceStats, ok := dg.InstanceStats[adID]
|
|
|
|
if !ok || deviceStats == nil {
|
|
|
|
c.logger.Warn("device not found in stats", "device_id", adID, "device_group_id", k)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
rdgStats.InstanceStats[adID] = deviceStats
|
|
|
|
}
|
|
|
|
result = append(result, rdgStats)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2017-10-07 00:54:09 +00:00
|
|
|
// ValidateMigrateToken verifies that a token is for a specific client and
|
2017-10-07 01:54:55 +00:00
|
|
|
// allocation, and has been created by a trusted party that has privileged
|
2017-10-07 00:54:09 +00:00
|
|
|
// knowledge of the client's secret identifier
|
2017-10-03 17:53:32 +00:00
|
|
|
func (c *Client) ValidateMigrateToken(allocID, migrateToken string) bool {
|
2022-08-18 23:32:04 +00:00
|
|
|
conf := c.GetConfig()
|
|
|
|
if !conf.ACLEnabled {
|
2017-10-03 17:53:32 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
return structs.CompareMigrateToken(allocID, conf.Node.SecretID, migrateToken)
|
2017-10-03 17:53:32 +00:00
|
|
|
}
|
|
|
|
|
2016-01-14 21:45:48 +00:00
|
|
|
// GetAllocFS returns the AllocFS interface for the alloc dir of an allocation
|
2016-01-14 21:35:42 +00:00
|
|
|
func (c *Client) GetAllocFS(allocID string) (allocdir.AllocDirFS, error) {
|
2018-12-12 19:45:45 +00:00
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-01-12 23:25:51 +00:00
|
|
|
}
|
2018-08-29 22:05:03 +00:00
|
|
|
return ar.GetAllocDir(), nil
|
2016-01-13 05:28:07 +00:00
|
|
|
}
|
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
// GetAllocState returns a copy of an allocation's state on this client. It
|
|
|
|
// returns either an AllocState or an unknown allocation error.
|
|
|
|
func (c *Client) GetAllocState(allocID string) (*arstate.State, error) {
|
2018-12-12 19:45:45 +00:00
|
|
|
ar, err := c.getAllocRunner(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-03-31 22:57:10 +00:00
|
|
|
}
|
2018-09-20 00:34:18 +00:00
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
return ar.AllocState(), nil
|
2017-03-31 22:57:10 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// GetServers returns the list of nomad servers this client is aware of.
|
|
|
|
func (c *Client) GetServers() []string {
|
2018-01-09 23:26:53 +00:00
|
|
|
endpoints := c.servers.GetServers()
|
2016-09-22 00:06:52 +00:00
|
|
|
res := make([]string, len(endpoints))
|
|
|
|
for i := range endpoints {
|
2018-01-09 23:26:53 +00:00
|
|
|
res[i] = endpoints[i].String()
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2018-01-09 23:26:53 +00:00
|
|
|
sort.Strings(res)
|
2016-09-22 00:06:52 +00:00
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetServers sets a new list of nomad servers to connect to. As long as one
|
|
|
|
// server is resolvable no error is returned.
|
2018-05-11 19:52:05 +00:00
|
|
|
func (c *Client) SetServers(in []string) (int, error) {
|
2018-02-16 00:04:53 +00:00
|
|
|
return c.setServersImpl(in, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// setServersImpl sets a new list of nomad servers to connect to. If force is
|
2018-03-11 18:55:30 +00:00
|
|
|
// set, we add the server to the internal serverlist even if the server could not
|
2018-02-16 00:04:53 +00:00
|
|
|
// be pinged. An error is returned if no endpoints were valid when non-forcing.
|
|
|
|
//
|
|
|
|
// Force should be used when setting the servers from the initial configuration
|
|
|
|
// since the server may be starting up in parallel and initial pings may fail.
|
2018-05-11 19:52:05 +00:00
|
|
|
func (c *Client) setServersImpl(in []string, force bool) (int, error) {
|
2018-01-26 01:56:47 +00:00
|
|
|
var mu sync.Mutex
|
|
|
|
var wg sync.WaitGroup
|
2016-09-22 00:06:52 +00:00
|
|
|
var merr multierror.Error
|
|
|
|
|
2018-01-26 01:56:47 +00:00
|
|
|
endpoints := make([]*servers.Server, 0, len(in))
|
|
|
|
wg.Add(len(in))
|
2018-01-10 18:41:56 +00:00
|
|
|
|
2018-01-26 01:56:47 +00:00
|
|
|
for _, s := range in {
|
|
|
|
go func(srv string) {
|
|
|
|
defer wg.Done()
|
|
|
|
addr, err := resolveServer(srv)
|
|
|
|
if err != nil {
|
2018-09-06 00:34:17 +00:00
|
|
|
mu.Lock()
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("ignoring server due to resolution error", "error", err, "server", srv)
|
2018-01-26 01:56:47 +00:00
|
|
|
merr.Errors = append(merr.Errors, err)
|
2018-09-06 00:34:17 +00:00
|
|
|
mu.Unlock()
|
2018-01-26 01:56:47 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to ping to check if it is a real server
|
|
|
|
if err := c.Ping(addr); err != nil {
|
2018-09-06 00:34:17 +00:00
|
|
|
mu.Lock()
|
2018-01-26 01:56:47 +00:00
|
|
|
merr.Errors = append(merr.Errors, fmt.Errorf("Server at address %s failed ping: %v", addr, err))
|
2018-09-06 00:34:17 +00:00
|
|
|
mu.Unlock()
|
2018-02-16 00:04:53 +00:00
|
|
|
|
|
|
|
// If we are forcing the setting of the servers, inject it to
|
|
|
|
// the serverlist even if we can't ping immediately.
|
|
|
|
if !force {
|
|
|
|
return
|
|
|
|
}
|
2018-01-26 01:56:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
endpoints = append(endpoints, &servers.Server{Addr: addr})
|
|
|
|
mu.Unlock()
|
|
|
|
}(s)
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
|
|
|
|
2018-01-26 01:56:47 +00:00
|
|
|
wg.Wait()
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// Only return errors if no servers are valid
|
|
|
|
if len(endpoints) == 0 {
|
|
|
|
if len(merr.Errors) > 0 {
|
2018-05-11 19:52:05 +00:00
|
|
|
return 0, merr.ErrorOrNil()
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2018-05-11 19:52:05 +00:00
|
|
|
return 0, noServersErr
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
|
|
|
|
2018-01-09 23:26:53 +00:00
|
|
|
c.servers.SetServers(endpoints)
|
2018-05-11 19:52:05 +00:00
|
|
|
return len(endpoints), nil
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 21:12:26 +00:00
|
|
|
// restoreState is used to restore our state from the data dir
|
2019-01-08 23:39:04 +00:00
|
|
|
// If there are errors restoring a specific allocation it is marked
|
|
|
|
// as failed whenever possible.
|
2015-08-23 21:12:26 +00:00
|
|
|
func (c *Client) restoreState() error {
|
2022-08-18 23:32:04 +00:00
|
|
|
conf := c.GetConfig()
|
|
|
|
if conf.DevMode {
|
2015-08-23 21:12:26 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
//XXX REMOVED! make a note in backward compat / upgrading doc
|
2017-05-02 20:31:56 +00:00
|
|
|
// COMPAT: Remove in 0.7.0
|
2018-03-11 19:06:05 +00:00
|
|
|
// 0.6.0 transitioned from individual state files to a single bolt-db.
|
2017-05-02 20:31:56 +00:00
|
|
|
// The upgrade path is to:
|
|
|
|
// Check if old state exists
|
|
|
|
// If so, restore from that and delete old state
|
|
|
|
// Restore using state database
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Restore allocations
|
2018-08-08 00:46:37 +00:00
|
|
|
allocs, allocErrs, err := c.stateDB.GetAllAllocations()
|
2018-07-13 00:56:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
for allocID, err := range allocErrs {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error restoring alloc", "error", err, "alloc_id", allocID)
|
|
|
|
//TODO Cleanup
|
2018-08-08 00:46:37 +00:00
|
|
|
// Try to clean up alloc dir
|
|
|
|
// Remove boltdb entries?
|
|
|
|
// Send to server with clientstatus=failed
|
|
|
|
}
|
|
|
|
|
2015-08-30 01:16:49 +00:00
|
|
|
// Load each alloc back
|
2018-07-13 00:56:52 +00:00
|
|
|
for _, alloc := range allocs {
|
2017-08-10 17:56:51 +00:00
|
|
|
|
2019-08-27 21:19:44 +00:00
|
|
|
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
|
2019-08-28 15:44:48 +00:00
|
|
|
// See hasLocalState for details. Skipping suspicious allocs
|
2019-08-27 21:19:44 +00:00
|
|
|
// now. If allocs should be run, they will be started when the client
|
|
|
|
// gets allocs from servers.
|
2019-08-28 15:44:48 +00:00
|
|
|
if !c.hasLocalState(alloc) {
|
2022-01-10 16:59:46 +00:00
|
|
|
c.logger.Warn("found an alloc without any local state, skipping restore", "alloc_id", alloc.ID)
|
2019-08-27 21:19:44 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-10-18 00:14:44 +00:00
|
|
|
//XXX On Restore we give up on watching previous allocs because
|
|
|
|
// we need the local AllocRunners initialized first. We could
|
|
|
|
// add a second loop to initialize just the alloc watcher.
|
|
|
|
prevAllocWatcher := allocwatcher.NoopPrevAlloc{}
|
2018-12-11 14:46:58 +00:00
|
|
|
prevAllocMigrator := allocwatcher.NoopPrevAlloc{}
|
2018-10-18 00:14:44 +00:00
|
|
|
|
2023-05-12 17:29:44 +00:00
|
|
|
arConf := &config.AllocRunnerConfig{
|
2018-11-28 03:42:22 +00:00
|
|
|
Alloc: alloc,
|
|
|
|
Logger: c.logger,
|
2022-08-18 23:32:04 +00:00
|
|
|
ClientConfig: conf,
|
2018-11-28 03:42:22 +00:00
|
|
|
StateDB: c.stateDB,
|
|
|
|
StateUpdater: c,
|
|
|
|
DeviceStatsReporter: c,
|
|
|
|
Consul: c.consulService,
|
2019-12-06 20:46:46 +00:00
|
|
|
ConsulSI: c.tokensClient,
|
2021-01-07 19:20:48 +00:00
|
|
|
ConsulProxies: c.consulProxies,
|
2018-11-28 03:42:22 +00:00
|
|
|
Vault: c.vaultClient,
|
|
|
|
PrevAllocWatcher: prevAllocWatcher,
|
|
|
|
PrevAllocMigrator: prevAllocMigrator,
|
2019-10-22 13:20:26 +00:00
|
|
|
DynamicRegistry: c.dynamicRegistry,
|
2020-01-08 12:47:07 +00:00
|
|
|
CSIManager: c.csimanager,
|
2021-04-08 05:04:47 +00:00
|
|
|
CpusetManager: c.cpusetManager,
|
2018-11-28 03:42:22 +00:00
|
|
|
DeviceManager: c.devicemanager,
|
|
|
|
DriverManager: c.drivermanager,
|
2019-05-22 11:47:35 +00:00
|
|
|
ServersContactedCh: c.serversContactedCh,
|
2022-03-21 09:29:57 +00:00
|
|
|
ServiceRegWrapper: c.serviceRegWrapper,
|
2022-06-07 14:18:19 +00:00
|
|
|
CheckStore: c.checkStore,
|
2020-02-11 13:30:34 +00:00
|
|
|
RPCClient: c,
|
2022-05-03 22:38:32 +00:00
|
|
|
Getter: c.getter,
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2023-05-12 17:29:44 +00:00
|
|
|
ar, err := c.allocrunnerFactory(arConf)
|
2018-07-13 00:56:52 +00:00
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error running alloc", "error", err, "alloc_id", alloc.ID)
|
2019-01-08 23:39:04 +00:00
|
|
|
c.handleInvalidAllocs(alloc, err)
|
2018-07-13 00:56:52 +00:00
|
|
|
continue
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Restore state
|
|
|
|
if err := ar.Restore(); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error restoring alloc", "error", err, "alloc_id", alloc.ID)
|
2019-01-09 16:16:33 +00:00
|
|
|
// Override the status of the alloc to failed
|
|
|
|
ar.SetClientStatus(structs.AllocClientStatusFailed)
|
2019-01-08 23:39:04 +00:00
|
|
|
// Destroy the alloc runner since this is a failed restore
|
|
|
|
ar.Destroy()
|
2018-07-13 00:56:52 +00:00
|
|
|
continue
|
2017-05-02 20:31:56 +00:00
|
|
|
}
|
2018-07-13 00:56:52 +00:00
|
|
|
|
2023-05-11 13:05:24 +00:00
|
|
|
allocState, err := c.stateDB.GetAcknowledgedState(alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("error restoring last acknowledged alloc state, will update again",
|
|
|
|
err, "alloc_id", alloc.ID)
|
|
|
|
} else {
|
|
|
|
ar.AcknowledgeState(allocState)
|
|
|
|
}
|
|
|
|
|
2020-04-13 20:08:24 +00:00
|
|
|
// Maybe mark the alloc for halt on missing server heartbeats
|
|
|
|
if c.heartbeatStop.shouldStop(alloc) {
|
|
|
|
err = c.heartbeatStop.stopAlloc(alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("error stopping alloc", "error", err, "alloc_id", alloc.ID)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
//XXX is this locking necessary?
|
|
|
|
c.allocLock.Lock()
|
|
|
|
c.allocs[alloc.ID] = ar
|
|
|
|
c.allocLock.Unlock()
|
2020-04-13 20:08:24 +00:00
|
|
|
|
|
|
|
c.heartbeatStop.allocHook(alloc)
|
2017-05-02 20:31:56 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// All allocs restored successfully, run them!
|
2018-07-19 00:04:36 +00:00
|
|
|
c.allocLock.Lock()
|
2018-07-13 00:56:52 +00:00
|
|
|
for _, ar := range c.allocs {
|
2018-11-14 18:29:07 +00:00
|
|
|
go ar.Run()
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
2018-07-19 00:04:36 +00:00
|
|
|
c.allocLock.Unlock()
|
2018-07-13 00:56:52 +00:00
|
|
|
return nil
|
2015-08-23 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
2019-08-28 15:44:48 +00:00
|
|
|
// hasLocalState returns true if we have any other associated state
|
|
|
|
// with alloc beyond the task itself
|
|
|
|
//
|
|
|
|
// Useful for detecting if a potentially completed alloc got resurrected
|
|
|
|
// after AR was destroyed. In such cases, re-running the alloc lead to
|
|
|
|
// unexpected reruns and may lead to process and task exhaustion on node.
|
2019-08-27 21:19:44 +00:00
|
|
|
//
|
|
|
|
// The heuristic used here is an alloc is suspect if we see no other information
|
|
|
|
// and no other task/status info is found.
|
|
|
|
//
|
2019-08-28 15:44:48 +00:00
|
|
|
// Also, an alloc without any client state will not be restored correctly; there will
|
|
|
|
// be no tasks processes to reattach to, etc. In such cases, client should
|
|
|
|
// wait until it gets allocs from server to launch them.
|
|
|
|
//
|
2019-08-27 21:19:44 +00:00
|
|
|
// See:
|
2022-08-16 14:06:30 +00:00
|
|
|
// - https://github.com/hashicorp/nomad/pull/6207
|
|
|
|
// - https://github.com/hashicorp/nomad/issues/5984
|
2019-08-27 21:19:44 +00:00
|
|
|
//
|
|
|
|
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
|
2019-08-28 15:44:48 +00:00
|
|
|
func (c *Client) hasLocalState(alloc *structs.Allocation) bool {
|
2019-08-27 21:19:44 +00:00
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
// corrupt alloc?!
|
2019-08-28 15:44:48 +00:00
|
|
|
return false
|
2019-08-27 21:19:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range tg.Tasks {
|
|
|
|
ls, tr, _ := c.stateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
|
|
|
if ls != nil || tr != nil {
|
2019-08-28 15:44:48 +00:00
|
|
|
return true
|
2019-08-27 21:19:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:44:48 +00:00
|
|
|
return false
|
2019-08-27 21:19:44 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 23:39:04 +00:00
|
|
|
func (c *Client) handleInvalidAllocs(alloc *structs.Allocation, err error) {
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Lock()
|
2019-01-08 23:39:04 +00:00
|
|
|
c.invalidAllocs[alloc.ID] = struct{}{}
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Unlock()
|
|
|
|
|
2019-01-08 23:39:04 +00:00
|
|
|
// Mark alloc as failed so server can handle this
|
|
|
|
failed := makeFailedAlloc(alloc, err)
|
2023-05-31 19:34:16 +00:00
|
|
|
c.pendingUpdates.add(failed)
|
2019-01-08 23:39:04 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 17:50:24 +00:00
|
|
|
// saveState is used to snapshot our state into the data dir.
|
|
|
|
func (c *Client) saveState() error {
|
2017-05-01 23:16:53 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
var l sync.Mutex
|
|
|
|
var mErr multierror.Error
|
|
|
|
runners := c.getAllocRunners()
|
|
|
|
wg.Add(len(runners))
|
|
|
|
|
2017-05-02 20:31:56 +00:00
|
|
|
for id, ar := range runners {
|
2023-05-12 17:29:44 +00:00
|
|
|
go func(id string, ar interfaces.AllocRunner) {
|
2019-08-25 15:03:49 +00:00
|
|
|
err := ar.PersistState()
|
2017-05-01 23:16:53 +00:00
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error saving alloc state", "error", err, "alloc_id", id)
|
2017-05-01 23:16:53 +00:00
|
|
|
l.Lock()
|
2021-01-14 20:46:35 +00:00
|
|
|
_ = multierror.Append(&mErr, err)
|
2017-05-01 23:16:53 +00:00
|
|
|
l.Unlock()
|
2017-05-01 22:06:18 +00:00
|
|
|
}
|
2017-05-01 23:16:53 +00:00
|
|
|
wg.Done()
|
2017-05-02 20:31:56 +00:00
|
|
|
}(id, ar)
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
2017-05-01 22:06:18 +00:00
|
|
|
|
2017-05-09 17:50:24 +00:00
|
|
|
wg.Wait()
|
|
|
|
return mErr.ErrorOrNil()
|
2015-08-23 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
2016-02-20 03:51:55 +00:00
|
|
|
// getAllocRunners returns a snapshot of the current set of alloc runners.
|
2023-05-12 17:29:44 +00:00
|
|
|
func (c *Client) getAllocRunners() map[string]interfaces.AllocRunner {
|
2016-02-20 03:51:55 +00:00
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
2023-05-12 17:29:44 +00:00
|
|
|
runners := make(map[string]interfaces.AllocRunner, len(c.allocs))
|
2016-02-20 03:51:55 +00:00
|
|
|
for id, ar := range c.allocs {
|
|
|
|
runners[id] = ar
|
|
|
|
}
|
|
|
|
return runners
|
|
|
|
}
|
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
// NumAllocs returns the number of un-GC'd allocs this client has. Used to
|
2017-05-11 00:39:45 +00:00
|
|
|
// fulfill the AllocCounter interface for the GC.
|
|
|
|
func (c *Client) NumAllocs() int {
|
2017-10-19 00:06:46 +00:00
|
|
|
n := 0
|
2017-05-11 00:39:45 +00:00
|
|
|
c.allocLock.RLock()
|
2017-10-19 00:06:46 +00:00
|
|
|
for _, a := range c.allocs {
|
|
|
|
if !a.IsDestroyed() {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
}
|
2017-05-31 21:05:47 +00:00
|
|
|
c.allocLock.RUnlock()
|
2017-05-11 00:39:45 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
// ensureNodeID restores, or generates if necessary, a unique node ID and
|
|
|
|
// SecretID. The node ID is, if available, a persistent unique ID. The secret
|
|
|
|
// ID is a high-entropy random UUID.
|
|
|
|
func ensureNodeID(conf *config.Config) (id, secret string, err error) {
|
2016-08-11 18:43:45 +00:00
|
|
|
var hostID string
|
|
|
|
hostInfo, err := host.Info()
|
2022-08-18 23:32:04 +00:00
|
|
|
if !conf.NoHostUUID && err == nil {
|
2017-04-10 18:44:51 +00:00
|
|
|
if hashed, ok := helper.HashUUID(hostInfo.HostID); ok {
|
|
|
|
hostID = hashed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hostID == "" {
|
2016-08-11 18:43:45 +00:00
|
|
|
// Generate a random hostID if no constant ID is available on
|
|
|
|
// this platform.
|
2017-09-29 16:58:48 +00:00
|
|
|
hostID = uuid.Generate()
|
2016-08-11 18:43:45 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 17:31:47 +00:00
|
|
|
// Do not persist in dev mode
|
2022-08-18 23:32:04 +00:00
|
|
|
if conf.DevMode {
|
2017-09-29 16:58:48 +00:00
|
|
|
return hostID, uuid.Generate(), nil
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to read existing ID
|
2022-08-18 23:32:04 +00:00
|
|
|
idPath := filepath.Join(conf.StateDir, "client-id")
|
2023-03-08 19:25:10 +00:00
|
|
|
idBuf, err := os.ReadFile(idPath)
|
2015-09-22 17:31:47 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2016-08-16 06:11:57 +00:00
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to read existing secret ID
|
2022-08-18 23:32:04 +00:00
|
|
|
secretPath := filepath.Join(conf.StateDir, "secret-id")
|
2023-03-08 19:25:10 +00:00
|
|
|
secretBuf, err := os.ReadFile(secretPath)
|
2016-08-16 06:11:57 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return "", "", err
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Use existing ID if any
|
2016-08-19 02:01:24 +00:00
|
|
|
if len(idBuf) != 0 {
|
2017-02-07 00:20:17 +00:00
|
|
|
id = strings.ToLower(string(idBuf))
|
2016-08-19 02:01:24 +00:00
|
|
|
} else {
|
2016-08-11 18:43:45 +00:00
|
|
|
id = hostID
|
2016-08-19 02:01:24 +00:00
|
|
|
|
|
|
|
// Persist the ID
|
2023-03-08 19:25:10 +00:00
|
|
|
if err := os.WriteFile(idPath, []byte(id), 0700); err != nil {
|
2016-08-19 02:01:24 +00:00
|
|
|
return "", "", err
|
|
|
|
}
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 02:01:24 +00:00
|
|
|
if len(secretBuf) != 0 {
|
|
|
|
secret = string(secretBuf)
|
|
|
|
} else {
|
|
|
|
// Generate new ID
|
2017-09-29 16:58:48 +00:00
|
|
|
secret = uuid.Generate()
|
2015-09-22 17:31:47 +00:00
|
|
|
|
2016-08-19 02:01:24 +00:00
|
|
|
// Persist the ID
|
2023-03-08 19:25:10 +00:00
|
|
|
if err := os.WriteFile(secretPath, []byte(secret), 0700); err != nil {
|
2016-08-19 02:01:24 +00:00
|
|
|
return "", "", err
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
}
|
2016-08-19 02:01:24 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
return id, secret, nil
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// setupNode is used to setup the initial node
|
|
|
|
func (c *Client) setupNode() error {
|
2022-08-18 23:32:04 +00:00
|
|
|
c.configLock.Lock()
|
|
|
|
defer c.configLock.Unlock()
|
|
|
|
|
|
|
|
newConfig := c.config.Copy()
|
|
|
|
node := newConfig.Node
|
2015-08-20 23:41:29 +00:00
|
|
|
if node == nil {
|
|
|
|
node = &structs.Node{}
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig.Node = node
|
2015-08-20 23:41:29 +00:00
|
|
|
}
|
2022-08-18 23:32:04 +00:00
|
|
|
|
2016-08-11 18:43:45 +00:00
|
|
|
// Generate an ID and secret for the node
|
2022-08-18 23:32:04 +00:00
|
|
|
id, secretID, err := ensureNodeID(newConfig)
|
2016-01-14 20:57:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node ID setup failed: %v", err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
|
|
|
|
node.ID = id
|
|
|
|
node.SecretID = secretID
|
2015-08-20 23:41:29 +00:00
|
|
|
if node.Attributes == nil {
|
|
|
|
node.Attributes = make(map[string]string)
|
|
|
|
}
|
|
|
|
if node.Links == nil {
|
|
|
|
node.Links = make(map[string]string)
|
|
|
|
}
|
2018-01-25 16:30:15 +00:00
|
|
|
if node.Drivers == nil {
|
|
|
|
node.Drivers = make(map[string]*structs.DriverInfo)
|
|
|
|
}
|
2019-10-22 13:20:26 +00:00
|
|
|
if node.CSIControllerPlugins == nil {
|
|
|
|
node.CSIControllerPlugins = make(map[string]*structs.CSIInfo)
|
|
|
|
}
|
|
|
|
if node.CSINodePlugins == nil {
|
|
|
|
node.CSINodePlugins = make(map[string]*structs.CSIInfo)
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
if node.Meta == nil {
|
|
|
|
node.Meta = make(map[string]string)
|
|
|
|
}
|
2018-09-30 00:23:41 +00:00
|
|
|
if node.NodeResources == nil {
|
|
|
|
node.NodeResources = &structs.NodeResources{}
|
2022-08-18 23:32:04 +00:00
|
|
|
node.NodeResources.MinDynamicPort = newConfig.MinDynamicPort
|
|
|
|
node.NodeResources.MaxDynamicPort = newConfig.MaxDynamicPort
|
2018-09-30 00:23:41 +00:00
|
|
|
}
|
2018-10-04 21:33:09 +00:00
|
|
|
if node.ReservedResources == nil {
|
|
|
|
node.ReservedResources = &structs.NodeReservedResources{}
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
if node.Resources == nil {
|
|
|
|
node.Resources = &structs.Resources{}
|
|
|
|
}
|
2016-03-14 02:05:41 +00:00
|
|
|
if node.Reserved == nil {
|
|
|
|
node.Reserved = &structs.Resources{}
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
if node.Datacenter == "" {
|
|
|
|
node.Datacenter = "dc1"
|
|
|
|
}
|
|
|
|
if node.Name == "" {
|
|
|
|
node.Name, _ = os.Hostname()
|
|
|
|
}
|
2022-08-18 23:32:04 +00:00
|
|
|
node.CgroupParent = newConfig.CgroupParent
|
2019-07-25 14:45:41 +00:00
|
|
|
if node.HostVolumes == nil {
|
2022-08-18 23:32:04 +00:00
|
|
|
if l := len(newConfig.HostVolumes); l != 0 {
|
2019-07-25 14:45:41 +00:00
|
|
|
node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig, l)
|
2022-08-18 23:32:04 +00:00
|
|
|
for k, v := range newConfig.HostVolumes {
|
2019-09-13 21:28:10 +00:00
|
|
|
if _, err := os.Stat(v.Path); err != nil {
|
|
|
|
return fmt.Errorf("failed to validate volume %s, err: %v", v.Name, err)
|
|
|
|
}
|
2019-07-25 14:45:41 +00:00
|
|
|
node.HostVolumes[k] = v.Copy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-05 13:02:46 +00:00
|
|
|
if node.HostNetworks == nil {
|
2022-08-18 23:32:04 +00:00
|
|
|
if l := len(newConfig.HostNetworks); l != 0 {
|
2021-11-05 13:02:46 +00:00
|
|
|
node.HostNetworks = make(map[string]*structs.ClientHostNetworkConfig, l)
|
2022-08-18 23:32:04 +00:00
|
|
|
for k, v := range newConfig.HostNetworks {
|
2021-11-05 13:02:46 +00:00
|
|
|
node.HostNetworks[k] = v.Copy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 14:45:41 +00:00
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
if node.Name == "" {
|
|
|
|
node.Name = node.ID
|
|
|
|
}
|
|
|
|
node.Status = structs.NodeStatusInit
|
2019-08-15 15:22:37 +00:00
|
|
|
|
2023-02-07 22:42:25 +00:00
|
|
|
// Setup default static meta
|
2020-12-11 15:57:13 +00:00
|
|
|
if _, ok := node.Meta[envoy.SidecarMetaParam]; !ok {
|
|
|
|
node.Meta[envoy.SidecarMetaParam] = envoy.ImageFormat
|
2019-08-15 15:22:37 +00:00
|
|
|
}
|
2020-12-11 15:57:13 +00:00
|
|
|
if _, ok := node.Meta[envoy.GatewayMetaParam]; !ok {
|
|
|
|
node.Meta[envoy.GatewayMetaParam] = envoy.ImageFormat
|
2020-07-28 20:12:08 +00:00
|
|
|
}
|
2019-08-15 15:22:37 +00:00
|
|
|
if _, ok := node.Meta["connect.log_level"]; !ok {
|
|
|
|
node.Meta["connect.log_level"] = defaultConnectLogLevel
|
|
|
|
}
|
2020-12-01 19:01:32 +00:00
|
|
|
if _, ok := node.Meta["connect.proxy_concurrency"]; !ok {
|
|
|
|
node.Meta["connect.proxy_concurrency"] = defaultConnectProxyConcurrency
|
|
|
|
}
|
2019-08-15 15:22:37 +00:00
|
|
|
|
2023-02-07 22:42:25 +00:00
|
|
|
// Since node.Meta will get dynamic metadata merged in, save static metadata
|
|
|
|
// here.
|
|
|
|
c.metaStatic = maps.Clone(node.Meta)
|
|
|
|
|
|
|
|
// Merge dynamic node metadata
|
|
|
|
c.metaDynamic, err = c.stateDB.GetNodeMeta()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading dynamic node metadata: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.metaDynamic == nil {
|
|
|
|
c.metaDynamic = map[string]*string{}
|
|
|
|
}
|
|
|
|
|
|
|
|
for dk, dv := range c.metaDynamic {
|
|
|
|
if dv == nil {
|
|
|
|
_, ok := node.Meta[dk]
|
|
|
|
if ok {
|
|
|
|
// Unset static node metadata
|
|
|
|
delete(node.Meta, dk)
|
|
|
|
} else {
|
|
|
|
// Forget dynamic node metadata tombstone as there's no
|
|
|
|
// static value to erase.
|
|
|
|
delete(c.metaDynamic, dk)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
node.Meta[dk] = *dv
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write back dynamic node metadata as tombstones may have been removed
|
|
|
|
// above
|
|
|
|
if err := c.stateDB.PutNodeMeta(c.metaDynamic); err != nil {
|
|
|
|
return fmt.Errorf("error syncing dynamic node metadata: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
c.config = newConfig
|
2015-08-20 23:41:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-24 14:09:53 +00:00
|
|
|
// updateNodeFromFingerprint updates the node with the result of
|
|
|
|
// fingerprinting the node from the diff that was created
|
2018-12-01 16:10:39 +00:00
|
|
|
func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResponse) *structs.Node {
|
2018-01-24 14:09:53 +00:00
|
|
|
c.configLock.Lock()
|
|
|
|
defer c.configLock.Unlock()
|
2018-02-14 19:35:15 +00:00
|
|
|
|
2018-02-26 22:02:15 +00:00
|
|
|
nodeHasChanged := false
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig := c.config.Copy()
|
2018-02-14 19:35:15 +00:00
|
|
|
|
2018-02-23 20:01:57 +00:00
|
|
|
for name, newVal := range response.Attributes {
|
2022-08-18 23:32:04 +00:00
|
|
|
oldVal := newConfig.Node.Attributes[name]
|
2018-02-23 22:52:06 +00:00
|
|
|
if oldVal == newVal {
|
2018-02-14 19:35:15 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeHasChanged = true
|
2018-02-23 20:01:57 +00:00
|
|
|
if newVal == "" {
|
2022-08-18 23:32:04 +00:00
|
|
|
delete(newConfig.Node.Attributes, name)
|
2018-01-26 11:51:09 +00:00
|
|
|
} else {
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig.Node.Attributes[name] = newVal
|
2018-01-26 11:51:09 +00:00
|
|
|
}
|
2018-01-24 14:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// update node links and resources from the diff created from
|
|
|
|
// fingerprinting
|
2018-02-23 20:01:57 +00:00
|
|
|
for name, newVal := range response.Links {
|
2022-08-18 23:32:04 +00:00
|
|
|
oldVal := newConfig.Node.Links[name]
|
2018-02-23 22:52:06 +00:00
|
|
|
if oldVal == newVal {
|
2018-02-14 19:35:15 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeHasChanged = true
|
2018-02-23 20:01:57 +00:00
|
|
|
if newVal == "" {
|
2022-08-18 23:32:04 +00:00
|
|
|
delete(newConfig.Node.Links, name)
|
2018-01-26 11:51:09 +00:00
|
|
|
} else {
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig.Node.Links[name] = newVal
|
2018-01-26 11:51:09 +00:00
|
|
|
}
|
2018-01-24 14:09:53 +00:00
|
|
|
}
|
|
|
|
|
2018-09-30 00:23:41 +00:00
|
|
|
// COMPAT(0.10): Remove in 0.10
|
2019-04-11 17:17:26 +00:00
|
|
|
// update the response networks with the config
|
|
|
|
// if we still have node changes, merge them
|
|
|
|
if response.Resources != nil {
|
|
|
|
response.Resources.Networks = updateNetworks(
|
|
|
|
response.Resources.Networks,
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig)
|
2022-10-10 14:28:46 +00:00
|
|
|
if !newConfig.Node.Resources.Equal(response.Resources) {
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig.Node.Resources.Merge(response.Resources)
|
2019-04-11 17:17:26 +00:00
|
|
|
nodeHasChanged = true
|
|
|
|
}
|
2018-01-30 17:57:37 +00:00
|
|
|
}
|
2018-02-14 19:35:15 +00:00
|
|
|
|
2019-04-11 17:17:26 +00:00
|
|
|
// update the response networks with the config
|
|
|
|
// if we still have node changes, merge them
|
|
|
|
if response.NodeResources != nil {
|
|
|
|
response.NodeResources.Networks = updateNetworks(
|
|
|
|
response.NodeResources.Networks,
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig)
|
2022-10-10 14:28:46 +00:00
|
|
|
if !newConfig.Node.NodeResources.Equal(response.NodeResources) {
|
2022-08-18 23:32:04 +00:00
|
|
|
newConfig.Node.NodeResources.Merge(response.NodeResources)
|
2019-04-11 17:17:26 +00:00
|
|
|
nodeHasChanged = true
|
|
|
|
}
|
2021-09-10 08:52:47 +00:00
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
response.NodeResources.MinDynamicPort = newConfig.MinDynamicPort
|
|
|
|
response.NodeResources.MaxDynamicPort = newConfig.MaxDynamicPort
|
|
|
|
if newConfig.Node.NodeResources.MinDynamicPort != response.NodeResources.MinDynamicPort ||
|
|
|
|
newConfig.Node.NodeResources.MaxDynamicPort != response.NodeResources.MaxDynamicPort {
|
2021-09-10 08:52:47 +00:00
|
|
|
nodeHasChanged = true
|
|
|
|
}
|
|
|
|
|
2018-09-30 00:23:41 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 17:28:01 +00:00
|
|
|
if nodeHasChanged {
|
2022-08-18 23:32:04 +00:00
|
|
|
c.config = newConfig
|
|
|
|
c.updateNode()
|
2018-03-09 17:28:01 +00:00
|
|
|
}
|
2018-03-07 18:34:38 +00:00
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
return newConfig.Node
|
2018-03-09 17:28:01 +00:00
|
|
|
}
|
|
|
|
|
2020-05-15 15:09:01 +00:00
|
|
|
// updateNetworks filters and overrides network speed of host networks based
|
|
|
|
// on configured settings
|
|
|
|
func updateNetworks(up structs.Networks, c *config.Config) structs.Networks {
|
|
|
|
if up == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.NetworkInterface != "" {
|
|
|
|
// For host networks, if a network device is configured filter up to contain details for only
|
2019-05-02 21:14:35 +00:00
|
|
|
// that device
|
|
|
|
upd := []*structs.NetworkResource{}
|
2019-04-11 17:17:26 +00:00
|
|
|
for _, n := range up {
|
2020-05-15 15:09:01 +00:00
|
|
|
switch n.Mode {
|
|
|
|
case "host":
|
|
|
|
if c.NetworkInterface == n.Device {
|
|
|
|
upd = append(upd, n)
|
|
|
|
}
|
|
|
|
default:
|
2019-05-02 21:14:35 +00:00
|
|
|
upd = append(upd, n)
|
2020-05-15 15:09:01 +00:00
|
|
|
|
2019-04-11 17:17:26 +00:00
|
|
|
}
|
2019-04-11 14:25:19 +00:00
|
|
|
}
|
2020-05-15 15:09:01 +00:00
|
|
|
up = upd
|
2019-04-11 17:17:26 +00:00
|
|
|
}
|
2019-05-02 21:14:35 +00:00
|
|
|
|
2020-05-15 15:09:01 +00:00
|
|
|
// if set, apply the config NetworkSpeed to networks in host mode
|
2019-04-11 17:17:26 +00:00
|
|
|
if c.NetworkSpeed != 0 {
|
2020-05-15 15:09:01 +00:00
|
|
|
for _, n := range up {
|
|
|
|
if n.Mode == "host" {
|
|
|
|
n.MBits = c.NetworkSpeed
|
|
|
|
}
|
2019-03-29 14:25:36 +00:00
|
|
|
}
|
2018-02-14 19:35:15 +00:00
|
|
|
}
|
2020-05-15 15:09:01 +00:00
|
|
|
return up
|
2018-02-14 19:35:15 +00:00
|
|
|
}
|
|
|
|
|
2015-08-24 00:40:14 +00:00
|
|
|
// retryIntv calculates a retry interval value given the base
|
|
|
|
func (c *Client) retryIntv(base time.Duration) time.Duration {
|
2022-08-18 23:32:04 +00:00
|
|
|
if c.GetConfig().DevMode {
|
2015-08-24 00:40:14 +00:00
|
|
|
return devModeRetryIntv
|
|
|
|
}
|
2022-04-09 11:22:44 +00:00
|
|
|
return base + helper.RandomStagger(base)
|
2015-08-24 00:40:14 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// registerAndHeartbeat is a long lived goroutine used to register the client
|
2018-03-11 18:12:19 +00:00
|
|
|
// and then start heartbeating to the server.
|
2016-02-17 19:32:17 +00:00
|
|
|
func (c *Client) registerAndHeartbeat() {
|
|
|
|
// Register the node
|
2016-02-03 20:07:09 +00:00
|
|
|
c.retryRegisterNode()
|
|
|
|
|
2016-04-01 18:29:44 +00:00
|
|
|
// Start watching changes for node changes
|
2018-02-14 19:35:15 +00:00
|
|
|
go c.watchNodeUpdates()
|
2016-04-01 18:29:44 +00:00
|
|
|
|
2018-03-09 12:05:39 +00:00
|
|
|
// Start watching for emitting node events
|
2018-03-13 13:33:53 +00:00
|
|
|
go c.watchNodeEvents()
|
2018-03-09 12:05:39 +00:00
|
|
|
|
2015-09-21 00:02:12 +00:00
|
|
|
// Setup the heartbeat timer, for the initial registration
|
|
|
|
// we want to do this quickly. We want to do it extra quickly
|
|
|
|
// in development mode.
|
|
|
|
var heartbeat <-chan time.Time
|
2022-08-18 23:32:04 +00:00
|
|
|
if c.GetConfig().DevMode {
|
2015-09-21 00:02:12 +00:00
|
|
|
heartbeat = time.After(0)
|
|
|
|
} else {
|
2022-04-09 11:22:44 +00:00
|
|
|
heartbeat = time.After(helper.RandomStagger(initialHeartbeatStagger))
|
2015-09-21 00:02:12 +00:00
|
|
|
}
|
2015-08-23 01:16:05 +00:00
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
for {
|
|
|
|
select {
|
2018-04-04 01:05:28 +00:00
|
|
|
case <-c.rpcRetryWatcher():
|
2016-02-17 19:32:17 +00:00
|
|
|
case <-heartbeat:
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
if err := c.updateNodeStatus(); err != nil {
|
|
|
|
// The servers have changed such that this node has not been
|
|
|
|
// registered before
|
|
|
|
if strings.Contains(err.Error(), "node not found") {
|
|
|
|
// Re-register the node
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Info("re-registering node")
|
2016-09-24 00:02:48 +00:00
|
|
|
c.retryRegisterNode()
|
2022-04-09 11:22:44 +00:00
|
|
|
heartbeat = time.After(helper.RandomStagger(initialHeartbeatStagger))
|
2016-09-24 00:02:48 +00:00
|
|
|
} else {
|
2018-04-05 18:22:47 +00:00
|
|
|
intv := c.getHeartbeatRetryIntv(err)
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error heartbeating. retrying", "error", err, "period", intv)
|
2016-09-24 00:02:48 +00:00
|
|
|
heartbeat = time.After(intv)
|
|
|
|
|
2018-04-05 17:58:13 +00:00
|
|
|
// If heartbeating fails, trigger Consul discovery
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
heartbeat = time.After(c.heartbeatTTL)
|
|
|
|
c.heartbeatLock.Unlock()
|
|
|
|
}
|
2016-02-17 19:32:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 20:08:24 +00:00
|
|
|
func (c *Client) lastHeartbeat() time.Time {
|
|
|
|
return c.heartbeatStop.getLastOk()
|
|
|
|
}
|
|
|
|
|
2018-04-05 17:58:13 +00:00
|
|
|
// getHeartbeatRetryIntv is used to retrieve the time to wait before attempting
|
|
|
|
// another heartbeat.
|
2018-04-05 18:22:47 +00:00
|
|
|
func (c *Client) getHeartbeatRetryIntv(err error) time.Duration {
|
2022-08-18 23:32:04 +00:00
|
|
|
if c.GetConfig().DevMode {
|
2018-04-05 17:58:13 +00:00
|
|
|
return devModeRetryIntv
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect the useful heartbeat info
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
haveHeartbeated := c.haveHeartbeated
|
2020-04-13 20:08:24 +00:00
|
|
|
last := c.lastHeartbeat()
|
2018-04-05 17:58:13 +00:00
|
|
|
ttl := c.heartbeatTTL
|
|
|
|
c.heartbeatLock.Unlock()
|
|
|
|
|
2018-04-05 18:22:47 +00:00
|
|
|
// If we haven't even successfully heartbeated once or there is no leader
|
|
|
|
// treat it as a registration. In the case that there is a leadership loss,
|
|
|
|
// we will have our heartbeat timer reset to a much larger threshold, so
|
|
|
|
// do not put unnecessary pressure on the new leader.
|
|
|
|
if !haveHeartbeated || err == structs.ErrNoLeader {
|
2018-04-05 17:58:13 +00:00
|
|
|
return c.retryIntv(registerRetryIntv)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine how much time we have left to heartbeat
|
2020-12-09 19:05:18 +00:00
|
|
|
left := time.Until(last.Add(ttl))
|
2018-04-05 17:58:13 +00:00
|
|
|
|
|
|
|
// Logic for retrying is:
|
|
|
|
// * Do not retry faster than once a second
|
|
|
|
// * Do not retry less that once every 30 seconds
|
2018-04-05 18:22:47 +00:00
|
|
|
// * If we have missed the heartbeat by more than 30 seconds, start to use
|
|
|
|
// the absolute time since we do not want to retry indefinitely
|
|
|
|
switch {
|
|
|
|
case left < -30*time.Second:
|
2018-04-05 20:48:33 +00:00
|
|
|
// Make left the absolute value so we delay and jitter properly.
|
2018-04-05 18:22:47 +00:00
|
|
|
left *= -1
|
|
|
|
case left < 0:
|
2022-04-09 11:22:44 +00:00
|
|
|
return time.Second + helper.RandomStagger(time.Second)
|
2018-04-05 18:22:47 +00:00
|
|
|
default:
|
2018-04-05 17:58:13 +00:00
|
|
|
}
|
|
|
|
|
2022-04-09 11:22:44 +00:00
|
|
|
stagger := helper.RandomStagger(left)
|
2018-04-05 17:58:13 +00:00
|
|
|
switch {
|
|
|
|
case stagger < time.Second:
|
2022-04-09 11:22:44 +00:00
|
|
|
return time.Second + helper.RandomStagger(time.Second)
|
2018-04-05 17:58:13 +00:00
|
|
|
case stagger > 30*time.Second:
|
2022-04-09 11:22:44 +00:00
|
|
|
return 25*time.Second + helper.RandomStagger(5*time.Second)
|
2018-04-05 17:58:13 +00:00
|
|
|
default:
|
|
|
|
return stagger
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// periodicSnapshot is a long lived goroutine used to periodically snapshot the
|
|
|
|
// state of the client
|
|
|
|
func (c *Client) periodicSnapshot() {
|
2015-08-31 00:19:20 +00:00
|
|
|
// Create a snapshot timer
|
|
|
|
snapshot := time.After(stateSnapshotIntv)
|
|
|
|
|
2015-08-24 00:40:14 +00:00
|
|
|
for {
|
|
|
|
select {
|
2015-08-31 00:19:20 +00:00
|
|
|
case <-snapshot:
|
|
|
|
snapshot = time.After(stateSnapshotIntv)
|
2017-05-09 17:50:24 +00:00
|
|
|
if err := c.saveState(); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error saving state", "error", err)
|
2015-08-31 00:19:20 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// run is a long lived goroutine used to run the client. Shutdown() stops it first
|
2016-02-17 19:32:17 +00:00
|
|
|
func (c *Client) run() {
|
2023-06-22 15:06:49 +00:00
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// Watch for changes in allocations
|
2016-02-19 04:43:48 +00:00
|
|
|
allocUpdates := make(chan *allocUpdates, 8)
|
2016-02-17 19:32:17 +00:00
|
|
|
go c.watchAllocations(allocUpdates)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2016-02-01 21:57:35 +00:00
|
|
|
case update := <-allocUpdates:
|
2018-11-14 18:29:07 +00:00
|
|
|
// Don't apply updates while shutting down.
|
|
|
|
c.shutdownLock.Lock()
|
|
|
|
if c.shutdown {
|
|
|
|
c.shutdownLock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply updates inside lock to prevent a concurrent
|
|
|
|
// shutdown.
|
2016-02-01 21:57:35 +00:00
|
|
|
c.runAllocs(update)
|
2018-11-14 18:29:07 +00:00
|
|
|
c.shutdownLock.Unlock()
|
2015-08-24 00:40:14 +00:00
|
|
|
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-09 12:05:39 +00:00
|
|
|
// submitNodeEvents is used to submit a client-side node event. Examples of
|
2018-03-09 17:43:20 +00:00
|
|
|
// these kinds of events include when a driver moves from healthy to unhealthy
|
2018-03-09 12:05:39 +00:00
|
|
|
// (and vice versa)
|
2018-03-12 01:00:13 +00:00
|
|
|
func (c *Client) submitNodeEvents(events []*structs.NodeEvent) error {
|
2018-03-13 13:33:53 +00:00
|
|
|
nodeID := c.NodeID()
|
2018-03-09 12:05:39 +00:00
|
|
|
nodeEvents := map[string][]*structs.NodeEvent{
|
2018-03-12 01:00:13 +00:00
|
|
|
nodeID: events,
|
2018-03-09 12:05:39 +00:00
|
|
|
}
|
2018-03-12 01:00:13 +00:00
|
|
|
req := structs.EmitNodeEventsRequest{
|
2023-06-22 15:06:49 +00:00
|
|
|
NodeEvents: nodeEvents,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.Region(),
|
|
|
|
AuthToken: c.secretNodeID(),
|
|
|
|
},
|
2018-03-09 12:05:39 +00:00
|
|
|
}
|
2018-03-12 01:00:13 +00:00
|
|
|
var resp structs.EmitNodeEventsResponse
|
|
|
|
if err := c.RPC("Node.EmitEvents", &req, &resp); err != nil {
|
2018-03-14 01:04:55 +00:00
|
|
|
return fmt.Errorf("Emitting node events failed: %v", err)
|
2018-03-09 12:05:39 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-13 13:33:53 +00:00
|
|
|
// watchNodeEvents is a handler which receives node events and on a interval
|
|
|
|
// and submits them in batch format to the server
|
|
|
|
func (c *Client) watchNodeEvents() {
|
|
|
|
// batchEvents stores events that have yet to be published
|
|
|
|
var batchEvents []*structs.NodeEvent
|
2018-03-09 12:05:39 +00:00
|
|
|
|
2019-04-19 13:12:50 +00:00
|
|
|
timer := stoppedTimer()
|
2018-03-09 12:05:39 +00:00
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-c.triggerEmitNodeEvent:
|
2018-03-13 13:33:53 +00:00
|
|
|
if l := len(batchEvents); l <= structs.MaxRetainedNodeEvents {
|
|
|
|
batchEvents = append(batchEvents, event)
|
|
|
|
} else {
|
|
|
|
// Drop the oldest event
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Warn("dropping node event", "node_event", batchEvents[0])
|
2018-03-13 13:33:53 +00:00
|
|
|
batchEvents = append(batchEvents[1:], event)
|
2018-03-09 12:05:39 +00:00
|
|
|
}
|
2018-03-13 13:33:53 +00:00
|
|
|
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
|
|
|
|
case <-timer.C:
|
|
|
|
if err := c.submitNodeEvents(batchEvents); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error submitting node events", "error", err)
|
2018-03-13 13:33:53 +00:00
|
|
|
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
|
2018-03-14 16:48:59 +00:00
|
|
|
} else {
|
2018-03-14 20:54:25 +00:00
|
|
|
// Reset the events since we successfully sent them.
|
2018-03-20 17:25:07 +00:00
|
|
|
batchEvents = []*structs.NodeEvent{}
|
2018-03-12 01:00:13 +00:00
|
|
|
}
|
2018-03-09 12:05:39 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-12 01:00:13 +00:00
|
|
|
// triggerNodeEvent triggers a emit node event
|
2018-03-09 12:05:39 +00:00
|
|
|
func (c *Client) triggerNodeEvent(nodeEvent *structs.NodeEvent) {
|
|
|
|
select {
|
|
|
|
case c.triggerEmitNodeEvent <- nodeEvent:
|
|
|
|
// emit node event goroutine was released to execute
|
|
|
|
default:
|
|
|
|
// emit node event goroutine was already running
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
// retryRegisterNode is used to register the node or update the registration and
|
|
|
|
// retry in case of failure.
|
|
|
|
func (c *Client) retryRegisterNode() {
|
2023-06-22 15:06:49 +00:00
|
|
|
|
|
|
|
authToken := c.getRegistrationToken()
|
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
for {
|
2023-06-22 15:06:49 +00:00
|
|
|
err := c.registerNode(authToken)
|
2016-09-22 00:06:52 +00:00
|
|
|
if err == nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
// Registered!
|
2016-09-22 00:06:52 +00:00
|
|
|
return
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
|
2021-08-10 21:06:18 +00:00
|
|
|
retryIntv := registerRetryIntv
|
2023-03-16 19:38:33 +00:00
|
|
|
if err == noServersErr || structs.IsErrNoRegionPath(err) {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("registration waiting on servers")
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
2021-08-10 21:06:18 +00:00
|
|
|
retryIntv = noServerRetryIntv
|
2023-06-22 15:06:49 +00:00
|
|
|
} else if structs.IsErrPermissionDenied(err) {
|
|
|
|
// any previous cluster state we have here is invalid (ex. client
|
|
|
|
// has been assigned to a new region), so clear the token and local
|
|
|
|
// state for next pass.
|
|
|
|
authToken = ""
|
|
|
|
c.stateDB.PutNodeRegistration(&cstructs.NodeRegistration{HasRegistered: false})
|
|
|
|
c.logger.Error("error registering", "error", err)
|
2016-08-16 06:11:57 +00:00
|
|
|
} else {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error registering", "error", err)
|
2016-02-03 20:07:09 +00:00
|
|
|
}
|
|
|
|
select {
|
2018-04-04 01:05:28 +00:00
|
|
|
case <-c.rpcRetryWatcher():
|
2021-08-10 21:06:18 +00:00
|
|
|
case <-time.After(c.retryIntv(retryIntv)):
|
2016-02-03 20:07:09 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 15:06:49 +00:00
|
|
|
// getRegistrationToken gets the node secret to use for the Node.Register call.
|
|
|
|
// Registration is trust-on-first-use so we can't send the auth token with the
|
|
|
|
// initial request, but we want to add the auth token after that so that we can
|
|
|
|
// capture metrics.
|
|
|
|
func (c *Client) getRegistrationToken() string {
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-c.registeredCh:
|
|
|
|
return c.secretNodeID()
|
|
|
|
default:
|
|
|
|
// If we haven't yet closed the registeredCh we're either starting for
|
|
|
|
// the 1st time or we've just restarted. Check the local state to see if
|
|
|
|
// we've written a successful registration previously so that we don't
|
|
|
|
// block allocrunner operations on disconnected clients.
|
|
|
|
registration, err := c.stateDB.GetNodeRegistration()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("could not determine previous node registration", "error", err)
|
|
|
|
}
|
|
|
|
if registration != nil && registration.HasRegistered {
|
|
|
|
c.registeredOnce.Do(func() { close(c.registeredCh) })
|
|
|
|
return c.secretNodeID()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
// registerNode is used to register the node or update the registration
|
2023-06-22 15:06:49 +00:00
|
|
|
func (c *Client) registerNode(authToken string) error {
|
2015-08-21 00:49:04 +00:00
|
|
|
req := structs.NodeRegisterRequest{
|
2023-06-22 15:06:49 +00:00
|
|
|
Node: c.Node(),
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.Region(),
|
|
|
|
AuthToken: authToken,
|
|
|
|
},
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
2023-06-22 15:06:49 +00:00
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
var resp structs.NodeUpdateResponse
|
2023-06-22 15:06:49 +00:00
|
|
|
if err := c.UnauthenticatedRPC("Node.Register", &req, &resp); err != nil {
|
2015-08-21 00:49:04 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-02-19 07:02:28 +00:00
|
|
|
|
2023-06-22 15:06:49 +00:00
|
|
|
// Signal that we've registered once so that RPCs sent from the client can
|
|
|
|
// send authenticated requests. Persist this information in the state so
|
|
|
|
// that we don't block restoring running allocs when restarting while
|
|
|
|
// disconnected
|
|
|
|
c.registeredOnce.Do(func() {
|
|
|
|
err := c.stateDB.PutNodeRegistration(&cstructs.NodeRegistration{
|
|
|
|
HasRegistered: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("could not write node registration", "error", err)
|
|
|
|
}
|
|
|
|
close(c.registeredCh)
|
|
|
|
})
|
|
|
|
|
2023-03-16 19:38:33 +00:00
|
|
|
err := c.handleNodeUpdateResponse(resp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-02-19 07:02:28 +00:00
|
|
|
// Update the node status to ready after we register.
|
2022-08-18 23:32:04 +00:00
|
|
|
c.UpdateConfig(func(c *config.Config) {
|
|
|
|
c.Node.Status = structs.NodeStatusReady
|
|
|
|
})
|
2016-02-19 07:02:28 +00:00
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Info("node registration complete")
|
2015-08-21 00:49:04 +00:00
|
|
|
if len(resp.EvalIDs) != 0 {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("evaluations triggered by node registration", "num_evals", len(resp.EvalIDs))
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
2016-02-10 06:43:16 +00:00
|
|
|
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
2020-04-28 20:13:09 +00:00
|
|
|
c.heartbeatStop.setLastOk(time.Now())
|
2015-08-23 01:16:05 +00:00
|
|
|
c.heartbeatTTL = resp.HeartbeatTTL
|
2023-03-16 19:38:33 +00:00
|
|
|
|
2015-08-23 01:16:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateNodeStatus is used to heartbeat and update the status of the node
|
|
|
|
func (c *Client) updateNodeStatus() error {
|
2017-12-07 02:02:24 +00:00
|
|
|
start := time.Now()
|
2015-08-23 01:16:05 +00:00
|
|
|
req := structs.NodeUpdateStatusRequest{
|
2023-06-22 15:06:49 +00:00
|
|
|
NodeID: c.NodeID(),
|
|
|
|
Status: structs.NodeStatusReady,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.Region(),
|
|
|
|
AuthToken: c.secretNodeID(),
|
|
|
|
},
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
2016-08-08 23:57:21 +00:00
|
|
|
if err := c.RPC("Node.UpdateStatus", &req, &resp); err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
2016-08-10 22:17:32 +00:00
|
|
|
return fmt.Errorf("failed to update status: %v", err)
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
2017-12-07 02:02:24 +00:00
|
|
|
end := time.Now()
|
|
|
|
|
2015-08-23 01:16:05 +00:00
|
|
|
if len(resp.EvalIDs) != 0 {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("evaluations triggered by node update", "num_evals", len(resp.EvalIDs))
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
2016-02-10 06:43:16 +00:00
|
|
|
|
2017-12-07 01:57:50 +00:00
|
|
|
// Update the last heartbeat and the new TTL, capturing the old values
|
|
|
|
c.heartbeatLock.Lock()
|
2020-04-13 20:08:24 +00:00
|
|
|
last := c.lastHeartbeat()
|
2017-12-07 01:57:50 +00:00
|
|
|
oldTTL := c.heartbeatTTL
|
|
|
|
haveHeartbeated := c.haveHeartbeated
|
2020-04-28 20:13:09 +00:00
|
|
|
c.heartbeatStop.setLastOk(time.Now())
|
2015-08-23 01:16:05 +00:00
|
|
|
c.heartbeatTTL = resp.HeartbeatTTL
|
2017-12-07 01:57:50 +00:00
|
|
|
c.haveHeartbeated = true
|
|
|
|
c.heartbeatLock.Unlock()
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Trace("next heartbeat", "period", resp.HeartbeatTTL)
|
2017-12-07 01:57:50 +00:00
|
|
|
|
|
|
|
if resp.Index != 0 {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("state updated", "node_status", req.Status)
|
2017-12-07 01:57:50 +00:00
|
|
|
|
|
|
|
// We have potentially missed our TTL log how delayed we were
|
|
|
|
if haveHeartbeated {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Warn("missed heartbeat",
|
|
|
|
"req_latency", end.Sub(start), "heartbeat_ttl", oldTTL, "since_last_heartbeat", time.Since(last))
|
2017-12-07 01:57:50 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-23 18:09:31 +00:00
|
|
|
|
2022-09-08 18:31:36 +00:00
|
|
|
// Check heartbeat response for information about the server-side scheduling
|
2023-05-25 20:04:54 +00:00
|
|
|
// state of this node. If there are errors on the server side, this will come
|
|
|
|
// back as an empty string.
|
2022-09-08 18:31:36 +00:00
|
|
|
c.UpdateConfig(func(c *config.Config) {
|
|
|
|
if resp.SchedulingEligibility != "" {
|
|
|
|
c.Node.SchedulingEligibility = resp.SchedulingEligibility
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-03-16 19:38:33 +00:00
|
|
|
err := c.handleNodeUpdateResponse(resp)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("heartbeat response returned no valid servers")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's no Leader in the response we may be talking to a partitioned
|
|
|
|
// server. Redo discovery to ensure our server list is up to date.
|
|
|
|
if resp.LeaderRPCAddr == "" {
|
|
|
|
c.triggerDiscovery()
|
|
|
|
}
|
|
|
|
|
|
|
|
c.EnterpriseClient.SetFeatures(resp.Features)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) handleNodeUpdateResponse(resp structs.NodeUpdateResponse) error {
|
2018-01-09 23:26:53 +00:00
|
|
|
// Update the number of nodes in the cluster so we can adjust our server
|
|
|
|
// rebalance rate.
|
|
|
|
c.servers.SetNumNodes(resp.NumNodes)
|
|
|
|
|
|
|
|
// Convert []*NodeServerInfo to []*servers.Server
|
|
|
|
nomadServers := make([]*servers.Server, 0, len(resp.Servers))
|
2016-09-22 00:06:52 +00:00
|
|
|
for _, s := range resp.Servers {
|
|
|
|
addr, err := resolveServer(s.RPCAdvertiseAddr)
|
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Warn("ignoring invalid server", "error", err, "server", s.RPCAdvertiseAddr)
|
2016-09-22 00:06:52 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-05-20 21:19:15 +00:00
|
|
|
e := &servers.Server{Addr: addr}
|
2018-01-09 23:26:53 +00:00
|
|
|
nomadServers = append(nomadServers, e)
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
2018-01-09 23:26:53 +00:00
|
|
|
if len(nomadServers) == 0 {
|
2023-03-16 19:38:33 +00:00
|
|
|
return noServersErr
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2018-01-09 23:26:53 +00:00
|
|
|
c.servers.SetServers(nomadServers)
|
2015-08-21 00:49:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-08-23 02:31:22 +00:00
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// AllocStateUpdated asynchronously updates the server with the current state
|
|
|
|
// of an allocations and its tasks.
|
|
|
|
func (c *Client) AllocStateUpdated(alloc *structs.Allocation) {
|
2017-05-31 21:05:47 +00:00
|
|
|
if alloc.Terminated() {
|
2017-10-19 00:06:46 +00:00
|
|
|
// Terminated, mark for GC if we're still tracking this alloc
|
|
|
|
// runner. If it's not being tracked that means the server has
|
|
|
|
// already GC'd it (see removeAlloc).
|
2018-12-12 19:45:45 +00:00
|
|
|
ar, err := c.getAllocRunner(alloc.ID)
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-12 19:45:45 +00:00
|
|
|
if err == nil {
|
2018-06-29 00:01:05 +00:00
|
|
|
c.garbageCollector.MarkForCollection(alloc.ID, ar)
|
2017-10-26 21:03:51 +00:00
|
|
|
|
|
|
|
// Trigger a GC in case we're over thresholds and just
|
|
|
|
// waiting for eligible allocs.
|
|
|
|
c.garbageCollector.Trigger()
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-21 02:21:39 +00:00
|
|
|
// Strip all the information that can be reconstructed at the server. Only
|
|
|
|
// send the fields that are updatable by the client.
|
2016-02-22 03:20:50 +00:00
|
|
|
stripped := new(structs.Allocation)
|
|
|
|
stripped.ID = alloc.ID
|
2017-09-12 04:42:10 +00:00
|
|
|
stripped.NodeID = c.NodeID()
|
2016-02-22 03:20:50 +00:00
|
|
|
stripped.TaskStates = alloc.TaskStates
|
|
|
|
stripped.ClientStatus = alloc.ClientStatus
|
|
|
|
stripped.ClientDescription = alloc.ClientDescription
|
2017-07-03 04:49:56 +00:00
|
|
|
stripped.DeploymentStatus = alloc.DeploymentStatus
|
2020-10-12 17:43:04 +00:00
|
|
|
stripped.NetworkStatus = alloc.NetworkStatus
|
2016-11-21 02:21:39 +00:00
|
|
|
|
2023-05-31 19:34:16 +00:00
|
|
|
c.pendingUpdates.add(stripped)
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
|
2022-03-02 10:47:26 +00:00
|
|
|
// PutAllocation stores an allocation or returns an error if it could not be stored.
|
|
|
|
func (c *Client) PutAllocation(alloc *structs.Allocation) error {
|
|
|
|
return c.stateDB.PutAllocation(alloc)
|
|
|
|
}
|
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
// allocSync is a long lived function that batches allocation updates to the
|
|
|
|
// server.
|
|
|
|
func (c *Client) allocSync() {
|
2016-02-22 05:32:32 +00:00
|
|
|
syncTicker := time.NewTicker(allocSyncIntv)
|
2023-05-31 19:34:16 +00:00
|
|
|
updateTicks := 0
|
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
2016-02-22 05:32:32 +00:00
|
|
|
syncTicker.Stop()
|
2016-02-22 03:20:50 +00:00
|
|
|
return
|
2023-05-31 19:34:16 +00:00
|
|
|
|
2016-02-22 05:32:32 +00:00
|
|
|
case <-syncTicker.C:
|
2016-02-20 05:44:23 +00:00
|
|
|
|
2023-05-31 19:34:16 +00:00
|
|
|
updateTicks++
|
|
|
|
toSync := c.pendingUpdates.nextBatch(c, updateTicks)
|
|
|
|
|
|
|
|
if len(toSync) == 0 {
|
2023-05-11 13:05:24 +00:00
|
|
|
syncTicker.Reset(allocSyncIntv)
|
|
|
|
continue
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send to server.
|
|
|
|
args := structs.AllocUpdateRequest{
|
2023-06-22 15:06:49 +00:00
|
|
|
Alloc: toSync,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.Region(),
|
|
|
|
AuthToken: c.secretNodeID(),
|
|
|
|
},
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.GenericResponse
|
2020-11-24 17:14:27 +00:00
|
|
|
err := c.RPC("Node.UpdateAlloc", &args, &resp)
|
|
|
|
if err != nil {
|
|
|
|
// Error updating allocations, do *not* clear
|
|
|
|
// updates and retry after backoff
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error updating allocations", "error", err)
|
2023-05-31 19:34:16 +00:00
|
|
|
|
|
|
|
// refill the updates queue with updates that we failed to make
|
|
|
|
c.pendingUpdates.restore(toSync)
|
2023-05-11 13:05:24 +00:00
|
|
|
syncTicker.Reset(c.retryIntv(allocSyncRetryIntv))
|
2020-11-24 17:14:27 +00:00
|
|
|
continue
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
2020-11-24 17:14:27 +00:00
|
|
|
|
2023-05-31 19:34:16 +00:00
|
|
|
// Record that we've successfully synced these updates so that it's
|
|
|
|
// written to disk
|
2023-05-11 13:05:24 +00:00
|
|
|
c.allocLock.RLock()
|
2023-05-31 19:34:16 +00:00
|
|
|
for _, update := range toSync {
|
2023-05-11 13:05:24 +00:00
|
|
|
if ar, ok := c.allocs[update.ID]; ok {
|
|
|
|
ar.AcknowledgeState(&arstate.State{
|
|
|
|
ClientStatus: update.ClientStatus,
|
|
|
|
ClientDescription: update.ClientDescription,
|
|
|
|
DeploymentStatus: update.DeploymentStatus,
|
|
|
|
TaskStates: update.TaskStates,
|
|
|
|
NetworkStatus: update.NetworkStatus,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.allocLock.RUnlock()
|
|
|
|
|
2023-05-31 19:34:16 +00:00
|
|
|
// Successfully updated allocs. Reset ticker to give loop time to
|
|
|
|
// receive new alloc updates. Otherwise if the RPC took the ticker
|
|
|
|
// interval we may call it in a tight loop reading empty updates.
|
|
|
|
updateTicks = 0
|
2023-05-11 13:05:24 +00:00
|
|
|
syncTicker.Reset(allocSyncIntv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// allocUpdates holds the results of receiving updated allocations from the
|
|
|
|
// servers.
|
|
|
|
type allocUpdates struct {
|
|
|
|
// pulled is the set of allocations that were downloaded from the servers.
|
|
|
|
pulled map[string]*structs.Allocation
|
|
|
|
|
|
|
|
// filtered is the set of allocations that were not pulled because their
|
|
|
|
// AllocModifyIndex didn't change.
|
|
|
|
filtered map[string]struct{}
|
2017-10-03 17:53:32 +00:00
|
|
|
|
|
|
|
// migrateTokens are a list of tokens necessary for when clients pull data
|
|
|
|
// from authorized volumes
|
|
|
|
migrateTokens map[string]string
|
2016-02-01 21:57:35 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 02:31:22 +00:00
|
|
|
// watchAllocations is used to scan for updates to allocations
|
2016-02-01 21:57:35 +00:00
|
|
|
func (c *Client) watchAllocations(updates chan *allocUpdates) {
|
|
|
|
// The request and response for getting the map of allocations that should
|
|
|
|
// be running on the Node to their AllocModifyIndex which is incremented
|
|
|
|
// when the allocation is updated by the servers.
|
2015-08-23 02:31:22 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2017-09-12 04:42:10 +00:00
|
|
|
NodeID: c.NodeID(),
|
|
|
|
SecretID: c.secretNodeID(),
|
2015-08-23 02:31:22 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2021-07-20 19:13:28 +00:00
|
|
|
Region: c.Region(),
|
|
|
|
|
|
|
|
// Make a consistent read query when the client starts
|
|
|
|
// to avoid acting on stale data that predates this
|
|
|
|
// client state before a client restart.
|
|
|
|
//
|
|
|
|
// After the first request, only require monotonically
|
|
|
|
// increasing state.
|
|
|
|
AllowStale: false,
|
2023-06-22 15:06:49 +00:00
|
|
|
AuthToken: c.secretNodeID(),
|
2015-08-23 02:31:22 +00:00
|
|
|
},
|
|
|
|
}
|
2016-02-01 21:57:35 +00:00
|
|
|
var resp structs.NodeClientAllocsResponse
|
|
|
|
|
|
|
|
// The request and response for pulling down the set of allocations that are
|
|
|
|
// new, or updated server side.
|
|
|
|
allocsReq := structs.AllocsGetRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2016-05-23 18:09:31 +00:00
|
|
|
Region: c.Region(),
|
2016-02-01 21:57:35 +00:00
|
|
|
AllowStale: true,
|
2019-10-01 20:06:24 +00:00
|
|
|
AuthToken: c.secretNodeID(),
|
2016-02-01 21:57:35 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var allocsResp structs.AllocsGetResponse
|
2015-08-23 02:31:22 +00:00
|
|
|
|
2017-01-21 00:30:40 +00:00
|
|
|
OUTER:
|
2015-08-23 02:31:22 +00:00
|
|
|
for {
|
2016-02-01 21:57:35 +00:00
|
|
|
// Get the allocation modify index map, blocking for updates. We will
|
|
|
|
// use this to determine exactly what allocations need to be downloaded
|
|
|
|
// in full.
|
|
|
|
resp = structs.NodeClientAllocsResponse{}
|
|
|
|
err := c.RPC("Node.GetClientAllocs", &req, &resp)
|
2015-08-23 02:31:22 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
// Shutdown often causes EOF errors, so check for shutdown first
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-11-08 00:52:08 +00:00
|
|
|
// COMPAT: Remove in 0.6. This is to allow the case in which the
|
|
|
|
// servers are not fully upgraded before the clients register. This
|
|
|
|
// can cause the SecretID to be lost
|
|
|
|
if strings.Contains(err.Error(), "node secret ID does not match") {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("secret mismatch; re-registering node", "error", err)
|
2016-11-08 00:52:08 +00:00
|
|
|
c.retryRegisterNode()
|
|
|
|
} else if err != noServersErr {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error querying node allocations", "error", err)
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2015-08-24 00:40:14 +00:00
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
2015-08-23 02:31:22 +00:00
|
|
|
select {
|
2018-04-04 01:05:28 +00:00
|
|
|
case <-c.rpcRetryWatcher():
|
2016-09-22 00:06:52 +00:00
|
|
|
continue
|
2015-08-23 02:31:22 +00:00
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for shutdown
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Filter all allocations whose AllocModifyIndex was not incremented.
|
|
|
|
// These are the allocations who have either not been updated, or whose
|
|
|
|
// updates are a result of the client sending an update for the alloc.
|
|
|
|
// This lets us reduce the network traffic to the server as we don't
|
|
|
|
// need to pull all the allocations.
|
|
|
|
var pull []string
|
|
|
|
filtered := make(map[string]struct{})
|
2017-01-21 00:30:40 +00:00
|
|
|
var pullIndex uint64
|
2016-02-01 21:57:35 +00:00
|
|
|
for allocID, modifyIndex := range resp.Allocs {
|
|
|
|
// Pull the allocation if we don't have an alloc runner for the
|
|
|
|
// allocation or if the alloc runner requires an updated allocation.
|
2018-06-29 00:01:05 +00:00
|
|
|
//XXX Part of Client alloc index tracking exp
|
|
|
|
c.allocLock.RLock()
|
|
|
|
currentAR, ok := c.allocs[allocID]
|
|
|
|
c.allocLock.RUnlock()
|
2017-01-21 00:30:40 +00:00
|
|
|
|
2019-01-08 23:39:04 +00:00
|
|
|
// Ignore alloc updates for allocs that are invalid because of initialization errors
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Lock()
|
2019-01-08 23:39:04 +00:00
|
|
|
_, isInvalid := c.invalidAllocs[allocID]
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Unlock()
|
2019-01-08 23:39:04 +00:00
|
|
|
|
|
|
|
if (!ok || modifyIndex > currentAR.Alloc().AllocModifyIndex) && !isInvalid {
|
2017-01-21 00:30:40 +00:00
|
|
|
// Only pull allocs that are required. Filtered
|
|
|
|
// allocs might be at a higher index, so ignore
|
|
|
|
// it.
|
|
|
|
if modifyIndex > pullIndex {
|
|
|
|
pullIndex = modifyIndex
|
|
|
|
}
|
2016-02-01 21:57:35 +00:00
|
|
|
pull = append(pull, allocID)
|
2016-02-01 23:43:43 +00:00
|
|
|
} else {
|
|
|
|
filtered[allocID] = struct{}{}
|
2016-02-01 21:57:35 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-20 03:51:55 +00:00
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Pull the allocations that passed filtering.
|
|
|
|
allocsResp.Allocs = nil
|
2017-01-10 21:25:52 +00:00
|
|
|
var pulledAllocs map[string]*structs.Allocation
|
2016-02-01 21:57:35 +00:00
|
|
|
if len(pull) != 0 {
|
|
|
|
// Pull the allocations that need to be updated.
|
|
|
|
allocsReq.AllocIDs = pull
|
2017-01-21 00:30:40 +00:00
|
|
|
allocsReq.MinQueryIndex = pullIndex - 1
|
2016-02-01 21:57:35 +00:00
|
|
|
allocsResp = structs.AllocsGetResponse{}
|
|
|
|
if err := c.RPC("Alloc.GetAllocs", &allocsReq, &allocsResp); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error querying updated allocations", "error", err)
|
2016-02-01 21:57:35 +00:00
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
|
|
|
select {
|
2018-04-04 01:05:28 +00:00
|
|
|
case <-c.rpcRetryWatcher():
|
2016-09-24 00:02:48 +00:00
|
|
|
continue
|
2016-02-01 21:57:35 +00:00
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-10 21:25:52 +00:00
|
|
|
// Ensure that we received all the allocations we wanted
|
|
|
|
pulledAllocs = make(map[string]*structs.Allocation, len(allocsResp.Allocs))
|
|
|
|
for _, alloc := range allocsResp.Allocs {
|
2020-01-09 13:34:19 +00:00
|
|
|
|
|
|
|
// handle an old Server
|
|
|
|
alloc.Canonicalize()
|
|
|
|
|
2017-01-10 21:25:52 +00:00
|
|
|
pulledAllocs[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, desiredID := range pull {
|
|
|
|
if _, ok := pulledAllocs[desiredID]; !ok {
|
|
|
|
// We didn't get everything we wanted. Do not update the
|
|
|
|
// MinQueryIndex, sleep and then retry.
|
2017-01-11 21:24:23 +00:00
|
|
|
wait := c.retryIntv(2 * time.Second)
|
2017-01-10 21:25:52 +00:00
|
|
|
select {
|
2017-01-11 21:24:23 +00:00
|
|
|
case <-time.After(wait):
|
2017-01-10 21:25:52 +00:00
|
|
|
// Wait for the server we contact to receive the
|
|
|
|
// allocations
|
2017-01-21 00:30:40 +00:00
|
|
|
continue OUTER
|
2017-01-10 21:25:52 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Check for shutdown
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("updated allocations", "index", resp.Index,
|
|
|
|
"total", len(resp.Allocs), "pulled", len(allocsResp.Allocs), "filtered", len(filtered))
|
2017-01-10 21:25:52 +00:00
|
|
|
|
2021-07-20 19:13:28 +00:00
|
|
|
// After the first request, only require monotonically increasing state.
|
|
|
|
req.AllowStale = true
|
2016-03-11 00:18:20 +00:00
|
|
|
if resp.Index > req.MinQueryIndex {
|
|
|
|
req.MinQueryIndex = resp.Index
|
2015-08-23 02:31:22 +00:00
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Push the updates.
|
|
|
|
update := &allocUpdates{
|
2017-10-03 17:53:32 +00:00
|
|
|
filtered: filtered,
|
|
|
|
pulled: pulledAllocs,
|
|
|
|
migrateTokens: resp.MigrateTokens,
|
2016-02-01 21:57:35 +00:00
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2015-08-23 02:31:22 +00:00
|
|
|
select {
|
2016-02-01 21:57:35 +00:00
|
|
|
case updates <- update:
|
2015-08-23 02:31:22 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
// updateNode signals the client to send the updated
|
|
|
|
// Node to the server.
|
|
|
|
func (c *Client) updateNode() {
|
2018-02-14 19:35:15 +00:00
|
|
|
select {
|
|
|
|
case c.triggerNodeUpdate <- struct{}{}:
|
|
|
|
// Node update goroutine was released to execute
|
|
|
|
default:
|
|
|
|
// Node update goroutine was already running
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// watchNodeUpdates blocks until it is edge triggered. Once triggered,
|
|
|
|
// it will update the client node copy and re-register the node.
|
|
|
|
func (c *Client) watchNodeUpdates() {
|
|
|
|
var hasChanged bool
|
2019-04-19 13:12:50 +00:00
|
|
|
|
|
|
|
timer := stoppedTimer()
|
2018-02-27 17:21:06 +00:00
|
|
|
defer timer.Stop()
|
2018-02-26 22:02:15 +00:00
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
for {
|
|
|
|
select {
|
2018-02-27 17:21:06 +00:00
|
|
|
case <-timer.C:
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("state changed, updating node and re-registering")
|
2018-02-26 22:02:15 +00:00
|
|
|
c.retryRegisterNode()
|
|
|
|
hasChanged = false
|
2018-02-14 19:35:15 +00:00
|
|
|
case <-c.triggerNodeUpdate:
|
2018-03-01 14:48:26 +00:00
|
|
|
if hasChanged {
|
|
|
|
continue
|
|
|
|
}
|
2018-02-14 19:35:15 +00:00
|
|
|
hasChanged = true
|
2018-03-01 14:48:26 +00:00
|
|
|
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
|
2016-02-03 20:07:09 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-23 02:31:22 +00:00
|
|
|
// runAllocs is invoked when we get an updated set of allocations
|
2016-02-01 21:57:35 +00:00
|
|
|
func (c *Client) runAllocs(update *allocUpdates) {
|
2015-08-23 21:54:52 +00:00
|
|
|
// Get the existing allocs
|
2015-08-23 22:06:47 +00:00
|
|
|
c.allocLock.RLock()
|
2018-06-29 00:01:05 +00:00
|
|
|
existing := make(map[string]uint64, len(c.allocs))
|
|
|
|
for id, ar := range c.allocs {
|
|
|
|
existing[id] = ar.Alloc().AllocModifyIndex
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
2015-08-23 22:06:47 +00:00
|
|
|
c.allocLock.RUnlock()
|
2015-08-23 21:54:52 +00:00
|
|
|
|
|
|
|
// Diff the existing and updated allocations
|
2018-06-29 00:01:05 +00:00
|
|
|
diff := diffAllocs(existing, update)
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("allocation updates", "added", len(diff.added), "removed", len(diff.removed),
|
|
|
|
"updated", len(diff.updated), "ignored", len(diff.ignore))
|
2015-08-23 21:54:52 +00:00
|
|
|
|
2019-02-02 20:17:03 +00:00
|
|
|
errs := 0
|
|
|
|
|
2015-08-23 21:54:52 +00:00
|
|
|
// Remove the old allocations
|
|
|
|
for _, remove := range diff.removed {
|
2017-10-19 00:06:46 +00:00
|
|
|
c.removeAlloc(remove)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the existing allocations
|
|
|
|
for _, update := range diff.updated {
|
2018-08-01 18:03:52 +00:00
|
|
|
c.updateAlloc(update)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
// Make room for new allocations before running
|
|
|
|
if err := c.garbageCollector.MakeRoomFor(diff.added); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error making room for new allocations", "error", err)
|
2019-02-02 20:17:03 +00:00
|
|
|
errs++
|
2017-10-19 00:06:46 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 21:54:52 +00:00
|
|
|
// Start the new allocations
|
|
|
|
for _, add := range diff.added {
|
2017-10-03 17:53:32 +00:00
|
|
|
migrateToken := update.migrateTokens[add.ID]
|
|
|
|
if err := c.addAlloc(add, migrateToken); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error adding alloc", "error", err, "alloc_id", add.ID)
|
2019-02-02 20:17:03 +00:00
|
|
|
errs++
|
2019-01-08 18:31:44 +00:00
|
|
|
// We mark the alloc as failed and send an update to the server
|
|
|
|
// We track the fact that creating an allocrunner failed so that we don't send updates again
|
2019-01-08 23:39:04 +00:00
|
|
|
if add.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
c.handleInvalidAllocs(add, err)
|
2019-01-08 18:31:44 +00:00
|
|
|
}
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
}
|
2017-10-26 21:03:51 +00:00
|
|
|
|
2019-05-10 15:51:06 +00:00
|
|
|
// Mark servers as having been contacted so blocked tasks that failed
|
|
|
|
// to restore can now restart.
|
|
|
|
c.serversContactedOnce.Do(func() {
|
|
|
|
close(c.serversContactedCh)
|
|
|
|
})
|
|
|
|
|
2017-10-26 21:03:51 +00:00
|
|
|
// Trigger the GC once more now that new allocs are started that could
|
2018-03-11 19:03:47 +00:00
|
|
|
// have caused thresholds to be exceeded
|
2017-10-26 21:03:51 +00:00
|
|
|
c.garbageCollector.Trigger()
|
2019-02-02 20:17:03 +00:00
|
|
|
c.logger.Debug("allocation updates applied", "added", len(diff.added), "removed", len(diff.removed),
|
|
|
|
"updated", len(diff.updated), "ignored", len(diff.ignore), "errors", errs)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 18:31:44 +00:00
|
|
|
// makeFailedAlloc creates a stripped down version of the allocation passed in
|
|
|
|
// with its status set to failed and other fields needed for the server to be
|
|
|
|
// able to examine deployment and task states
|
|
|
|
func makeFailedAlloc(add *structs.Allocation, err error) *structs.Allocation {
|
|
|
|
stripped := new(structs.Allocation)
|
|
|
|
stripped.ID = add.ID
|
|
|
|
stripped.NodeID = add.NodeID
|
|
|
|
stripped.ClientStatus = structs.AllocClientStatusFailed
|
2019-01-11 14:21:29 +00:00
|
|
|
stripped.ClientDescription = fmt.Sprintf("Unable to add allocation due to error: %v", err)
|
2019-01-08 18:31:44 +00:00
|
|
|
|
|
|
|
// Copy task states if it exists in the original allocation
|
|
|
|
if add.TaskStates != nil {
|
|
|
|
stripped.TaskStates = add.TaskStates
|
|
|
|
} else {
|
|
|
|
stripped.TaskStates = make(map[string]*structs.TaskState)
|
|
|
|
}
|
|
|
|
|
|
|
|
failTime := time.Now()
|
2019-01-11 14:49:31 +00:00
|
|
|
if add.DeploymentStatus.HasHealth() {
|
|
|
|
// Never change deployment health once it has been set
|
|
|
|
stripped.DeploymentStatus = add.DeploymentStatus.Copy()
|
|
|
|
} else {
|
|
|
|
stripped.DeploymentStatus = &structs.AllocDeploymentStatus{
|
2022-08-17 16:26:34 +00:00
|
|
|
Healthy: pointer.Of(false),
|
2019-01-11 14:49:31 +00:00
|
|
|
Timestamp: failTime,
|
|
|
|
}
|
2019-01-08 18:31:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
taskGroup := add.Job.LookupTaskGroup(add.TaskGroup)
|
|
|
|
if taskGroup == nil {
|
|
|
|
return stripped
|
|
|
|
}
|
|
|
|
for _, task := range taskGroup.Tasks {
|
|
|
|
ts, ok := stripped.TaskStates[task.Name]
|
|
|
|
if !ok {
|
|
|
|
ts = &structs.TaskState{}
|
|
|
|
stripped.TaskStates[task.Name] = ts
|
|
|
|
}
|
|
|
|
if ts.FinishedAt.IsZero() {
|
|
|
|
ts.FinishedAt = failTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return stripped
|
|
|
|
}
|
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
// removeAlloc is invoked when we should remove an allocation because it has
|
|
|
|
// been removed by the server.
|
2018-06-29 00:01:05 +00:00
|
|
|
func (c *Client) removeAlloc(allocID string) {
|
2015-08-29 21:33:30 +00:00
|
|
|
c.allocLock.Lock()
|
2018-06-29 00:01:05 +00:00
|
|
|
defer c.allocLock.Unlock()
|
2018-12-12 19:45:45 +00:00
|
|
|
|
2018-06-29 00:01:05 +00:00
|
|
|
ar, ok := c.allocs[allocID]
|
2015-08-23 22:06:47 +00:00
|
|
|
if !ok {
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Lock()
|
2019-01-09 16:53:39 +00:00
|
|
|
if _, ok := c.invalidAllocs[allocID]; ok {
|
|
|
|
// Removing from invalid allocs map if present
|
|
|
|
delete(c.invalidAllocs, allocID)
|
|
|
|
} else {
|
|
|
|
// Alloc is unknown, log a warning.
|
|
|
|
c.logger.Warn("cannot remove nonexistent alloc", "alloc_id", allocID, "error", "alloc not found")
|
|
|
|
}
|
2019-05-22 13:37:49 +00:00
|
|
|
c.invalidAllocsLock.Unlock()
|
2017-10-19 00:06:46 +00:00
|
|
|
return
|
2015-08-23 22:06:47 +00:00
|
|
|
}
|
2017-10-19 00:06:46 +00:00
|
|
|
|
|
|
|
// Stop tracking alloc runner as it's been GC'd by the server
|
2018-06-29 00:01:05 +00:00
|
|
|
delete(c.allocs, allocID)
|
2016-02-20 03:51:55 +00:00
|
|
|
|
2017-03-11 00:27:00 +00:00
|
|
|
// Ensure the GC has a reference and then collect. Collecting through the GC
|
|
|
|
// applies rate limiting
|
2018-06-29 00:01:05 +00:00
|
|
|
c.garbageCollector.MarkForCollection(allocID, ar)
|
2016-12-20 19:14:22 +00:00
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
// GC immediately since the server has GC'd it
|
2018-06-29 00:01:05 +00:00
|
|
|
go c.garbageCollector.Collect(allocID)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// updateAlloc is invoked when we should update an allocation
|
2018-08-01 18:03:52 +00:00
|
|
|
func (c *Client) updateAlloc(update *structs.Allocation) {
|
2018-12-12 19:45:45 +00:00
|
|
|
ar, err := c.getAllocRunner(update.ID)
|
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Warn("cannot update nonexistent alloc", "alloc_id", update.ID)
|
2018-08-01 18:03:52 +00:00
|
|
|
return
|
2015-08-23 22:06:47 +00:00
|
|
|
}
|
2016-02-20 03:51:55 +00:00
|
|
|
|
Update alloc after reconnect and enforece client heartbeat order (#15068)
* scheduler: allow updates after alloc reconnects
When an allocation reconnects to a cluster the scheduler needs to run
special logic to handle the reconnection, check if a replacement was
create and stop one of them.
If the allocation kept running while the node was disconnected, it will
be reconnected with `ClientStatus: running` and the node will have
`Status: ready`. This combination is the same as the normal steady state
of allocation, where everything is running as expected.
In order to differentiate between the two states (an allocation that is
reconnecting and one that is just running) the scheduler needs an extra
piece of state.
The current implementation uses the presence of a
`TaskClientReconnected` task event to detect when the allocation has
reconnected and thus must go through the reconnection process. But this
event remains even after the allocation is reconnected, causing all
future evals to consider the allocation as still reconnecting.
This commit changes the reconnect logic to use an `AllocState` to
register when the allocation was reconnected. This provides the
following benefits:
- Only a limited number of task states are kept, and they are used for
many other events. It's possible that, upon reconnecting, several
actions are triggered that could cause the `TaskClientReconnected`
event to be dropped.
- Task events are set by clients and so their timestamps are subject
to time skew from servers. This prevents using time to determine if
an allocation reconnected after a disconnect event.
- Disconnect events are already stored as `AllocState` and so storing
reconnects there as well makes it the only source of information
required.
With the new logic, the reconnection logic is only triggered if the
last `AllocState` is a disconnect event, meaning that the allocation has
not been reconnected yet. After the reconnection is handled, the new
`ClientStatus` is store in `AllocState` allowing future evals to skip
the reconnection logic.
* scheduler: prevent spurious placement on reconnect
When a client reconnects it makes two independent RPC calls:
- `Node.UpdateStatus` to heartbeat and set its status as `ready`.
- `Node.UpdateAlloc` to update the status of its allocations.
These two calls can happen in any order, and in case the allocations are
updated before a heartbeat it causes the state to be the same as a node
being disconnected: the node status will still be `disconnected` while
the allocation `ClientStatus` is set to `running`.
The current implementation did not handle this order of events properly,
and the scheduler would create an unnecessary placement since it
considered the allocation was being disconnected. This extra allocation
would then be quickly stopped by the heartbeat eval.
This commit adds a new code path to handle this order of events. If the
node is `disconnected` and the allocation `ClientStatus` is `running`
the scheduler will check if the allocation is actually reconnecting
using its `AllocState` events.
* rpc: only allow alloc updates from `ready` nodes
Clients interact with servers using three main RPC methods:
- `Node.GetAllocs` reads allocation data from the server and writes it
to the client.
- `Node.UpdateAlloc` reads allocation from from the client and writes
them to the server.
- `Node.UpdateStatus` writes the client status to the server and is
used as the heartbeat mechanism.
These three methods are called periodically by the clients and are done
so independently from each other, meaning that there can't be any
assumptions in their ordering.
This can generate scenarios that are hard to reason about and to code
for. For example, when a client misses too many heartbeats it will be
considered `down` or `disconnected` and the allocations it was running
are set to `lost` or `unknown`.
When connectivity is restored the to rest of the cluster, the natural
mental model is to think that the client will heartbeat first and then
update its allocations status into the servers.
But since there's no inherit order in these calls the reverse is just as
possible: the client updates the alloc status and then heartbeats. This
results in a state where allocs are, for example, `running` while the
client is still `disconnected`.
This commit adds a new verification to the `Node.UpdateAlloc` method to
reject updates from nodes that are not `ready`, forcing clients to
heartbeat first. Since this check is done server-side there is no need
to coordinate operations client-side: they can continue sending these
requests independently and alloc update will succeed after the heartbeat
is done.
* chagelog: add entry for #15068
* code review
* client: skip terminal allocations on reconnect
When the client reconnects with the server it synchronizes the state of
its allocations by sending data using the `Node.UpdateAlloc` RPC and
fetching data using the `Node.GetClientAllocs` RPC.
If the data fetch happens before the data write, `unknown` allocations
will still be in this state and would trigger the
`allocRunner.Reconnect` flow.
But when the server `DesiredStatus` for the allocation is `stop` the
client should not reconnect the allocation.
* apply more code review changes
* scheduler: persist changes to reconnected allocs
Reconnected allocs have a new AllocState entry that must be persisted by
the plan applier.
* rpc: read node ID from allocs in UpdateAlloc
The AllocUpdateRequest struct is used in three disjoint use cases:
1. Stripped allocs from clients Node.UpdateAlloc RPC using the Allocs,
and WriteRequest fields
2. Raft log message using the Allocs, Evals, and WriteRequest fields
3. Plan updates using the AllocsStopped, AllocsUpdated, and Job fields
Adding a new field that would only be used in one these cases (1) made
things more confusing and error prone. While in theory an
AllocUpdateRequest could send allocations from different nodes, in
practice this never actually happens since only clients call this method
with their own allocations.
* scheduler: remove logic to handle exceptional case
This condition could only be hit if, somehow, the allocation status was
set to "running" while the client was "unknown". This was addressed by
enforcing an order in "Node.UpdateStatus" and "Node.UpdateAlloc" RPC
calls, so this scenario is not expected to happen.
Adding unnecessary code to the scheduler makes it harder to read and
reason about it.
* more code review
* remove another unused test
2022-11-04 20:25:11 +00:00
|
|
|
// Reconnect unknown allocations if they were updated and are not terminal.
|
|
|
|
reconnect := update.ClientStatus == structs.AllocClientStatusUnknown &&
|
|
|
|
update.AllocModifyIndex > ar.Alloc().AllocModifyIndex &&
|
|
|
|
!update.ServerTerminalStatus()
|
|
|
|
if reconnect {
|
2022-03-02 10:47:26 +00:00
|
|
|
err = ar.Reconnect(update)
|
|
|
|
if err != nil {
|
2022-09-01 13:06:10 +00:00
|
|
|
c.logger.Error("error reconnecting alloc", "alloc_id", update.ID, "alloc_modify_index", update.AllocModifyIndex, "error", err)
|
2022-03-02 10:47:26 +00:00
|
|
|
}
|
|
|
|
return
|
2022-02-22 11:12:10 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Update local copy of alloc
|
|
|
|
if err := c.stateDB.PutAllocation(update); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error persisting updated alloc locally", "error", err, "alloc_id", update.ID)
|
2018-08-17 17:34:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update alloc runner
|
2015-08-30 01:16:49 +00:00
|
|
|
ar.Update(update)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// addAlloc is invoked when we should add an allocation
|
2017-10-03 17:53:32 +00:00
|
|
|
func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error {
|
2017-01-05 21:06:56 +00:00
|
|
|
c.allocLock.Lock()
|
2017-10-19 00:06:46 +00:00
|
|
|
defer c.allocLock.Unlock()
|
2018-06-29 00:01:05 +00:00
|
|
|
|
|
|
|
// Check if we already have an alloc runner
|
2016-12-13 20:34:23 +00:00
|
|
|
if _, ok := c.allocs[alloc.ID]; ok {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Debug("dropping duplicate add allocation request", "alloc_id", alloc.ID)
|
2016-12-13 20:34:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-01-05 21:06:56 +00:00
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Initialize local copy of alloc before creating the alloc runner so
|
|
|
|
// we can't end up with an alloc runner that does not have an alloc.
|
|
|
|
if err := c.stateDB.PutAllocation(alloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// Collect any preempted allocations to pass into the previous alloc watcher
|
|
|
|
var preemptedAllocs map[string]allocwatcher.AllocRunnerMeta
|
|
|
|
if len(alloc.PreemptedAllocations) > 0 {
|
|
|
|
preemptedAllocs = make(map[string]allocwatcher.AllocRunnerMeta)
|
|
|
|
for _, palloc := range alloc.PreemptedAllocations {
|
|
|
|
preemptedAllocs[palloc] = c.allocs[palloc]
|
2018-12-05 18:18:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// Since only the Client has access to other AllocRunners and the RPC
|
|
|
|
// client, create the previous allocation watcher here.
|
|
|
|
watcherConfig := allocwatcher.Config{
|
|
|
|
Alloc: alloc,
|
|
|
|
PreviousRunner: c.allocs[alloc.PreviousAllocation],
|
|
|
|
PreemptedRunners: preemptedAllocs,
|
|
|
|
RPC: c,
|
2022-08-18 23:32:04 +00:00
|
|
|
Config: c.GetConfig(),
|
2018-12-06 11:15:59 +00:00
|
|
|
MigrateToken: migrateToken,
|
|
|
|
Logger: c.logger,
|
2018-12-05 18:18:04 +00:00
|
|
|
}
|
2018-12-06 11:15:59 +00:00
|
|
|
prevAllocWatcher, prevAllocMigrator := allocwatcher.NewAllocWatcher(watcherConfig)
|
2018-12-05 18:18:04 +00:00
|
|
|
|
2023-05-12 17:29:44 +00:00
|
|
|
arConf := &config.AllocRunnerConfig{
|
2018-11-28 03:42:22 +00:00
|
|
|
Alloc: alloc,
|
|
|
|
Logger: c.logger,
|
2022-08-18 23:32:04 +00:00
|
|
|
ClientConfig: c.GetConfig(),
|
2018-11-28 03:42:22 +00:00
|
|
|
StateDB: c.stateDB,
|
|
|
|
Consul: c.consulService,
|
2020-09-04 17:50:11 +00:00
|
|
|
ConsulProxies: c.consulProxies,
|
2019-12-06 20:46:46 +00:00
|
|
|
ConsulSI: c.tokensClient,
|
2018-11-28 03:42:22 +00:00
|
|
|
Vault: c.vaultClient,
|
|
|
|
StateUpdater: c,
|
|
|
|
DeviceStatsReporter: c,
|
|
|
|
PrevAllocWatcher: prevAllocWatcher,
|
|
|
|
PrevAllocMigrator: prevAllocMigrator,
|
2019-10-22 13:20:26 +00:00
|
|
|
DynamicRegistry: c.dynamicRegistry,
|
2020-01-08 12:47:07 +00:00
|
|
|
CSIManager: c.csimanager,
|
2021-04-08 05:04:47 +00:00
|
|
|
CpusetManager: c.cpusetManager,
|
2018-11-28 03:42:22 +00:00
|
|
|
DeviceManager: c.devicemanager,
|
|
|
|
DriverManager: c.drivermanager,
|
2022-03-21 09:29:57 +00:00
|
|
|
ServiceRegWrapper: c.serviceRegWrapper,
|
2022-06-07 14:18:19 +00:00
|
|
|
CheckStore: c.checkStore,
|
2020-02-11 13:30:34 +00:00
|
|
|
RPCClient: c,
|
2022-05-03 22:38:32 +00:00
|
|
|
Getter: c.getter,
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2023-05-12 17:29:44 +00:00
|
|
|
ar, err := c.allocrunnerFactory(arConf)
|
2018-07-13 00:56:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-29 00:01:05 +00:00
|
|
|
|
2017-07-05 23:15:19 +00:00
|
|
|
// Store the alloc runner.
|
|
|
|
c.allocs[alloc.ID] = ar
|
|
|
|
|
2020-04-13 20:08:24 +00:00
|
|
|
// Maybe mark the alloc for halt on missing server heartbeats
|
|
|
|
c.heartbeatStop.allocHook(alloc)
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
go ar.Run()
|
2015-08-23 21:54:52 +00:00
|
|
|
return nil
|
2015-08-23 02:31:22 +00:00
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// setupConsulTokenClient configures a tokenClient for managing consul service
|
|
|
|
// identity tokens.
|
|
|
|
func (c *Client) setupConsulTokenClient() error {
|
|
|
|
tc := consulApi.NewIdentitiesClient(c.logger, c.deriveSIToken)
|
|
|
|
c.tokensClient = tc
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// setupVaultClient creates an object to periodically renew tokens and secrets
|
|
|
|
// with vault.
|
|
|
|
func (c *Client) setupVaultClient() error {
|
|
|
|
var err error
|
2022-08-18 23:32:04 +00:00
|
|
|
c.vaultClient, err = vaultclient.NewVaultClient(c.GetConfig().VaultConfig, c.logger, c.deriveToken)
|
2017-08-10 17:56:51 +00:00
|
|
|
if err != nil {
|
2016-08-18 03:28:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-23 21:10:00 +00:00
|
|
|
if c.vaultClient == nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("failed to create vault client")
|
2016-08-23 21:10:00 +00:00
|
|
|
return fmt.Errorf("failed to create vault client")
|
|
|
|
}
|
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// Start renewing tokens and secrets
|
|
|
|
c.vaultClient.Start()
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-21 09:29:57 +00:00
|
|
|
// setupNomadServiceRegistrationHandler sets up the registration handler to use
|
|
|
|
// for native service discovery.
|
|
|
|
func (c *Client) setupNomadServiceRegistrationHandler() {
|
|
|
|
cfg := nsd.ServiceRegistrationHandlerCfg{
|
|
|
|
Datacenter: c.Datacenter(),
|
2022-08-18 23:32:04 +00:00
|
|
|
Enabled: c.GetConfig().NomadServiceDiscovery,
|
2022-03-21 09:29:57 +00:00
|
|
|
NodeID: c.NodeID(),
|
|
|
|
NodeSecret: c.secretNodeID(),
|
|
|
|
Region: c.Region(),
|
|
|
|
RPCFn: c.RPC,
|
2022-09-12 20:23:21 +00:00
|
|
|
CheckWatcher: serviceregistration.NewCheckWatcher(
|
|
|
|
c.logger, nsd.NewStatusGetter(c.checkStore),
|
|
|
|
),
|
2022-03-21 09:29:57 +00:00
|
|
|
}
|
|
|
|
c.nomadService = nsd.NewServiceRegistrationHandler(c.logger, &cfg)
|
|
|
|
}
|
|
|
|
|
2016-08-30 01:30:06 +00:00
|
|
|
// deriveToken takes in an allocation and a set of tasks and derives vault
|
|
|
|
// tokens for each of the tasks, unwraps all of them using the supplied vault
|
|
|
|
// client and returns a map of unwrapped tokens, indexed by the task name.
|
|
|
|
func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vclient *vaultapi.Client) (map[string]string, error) {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger := c.logger.Named("vault")
|
2016-08-29 21:07:23 +00:00
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
verifiedTasks, err := verifiedTasks(vlogger, alloc, taskNames)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeriveVaultToken of nomad server can take in a set of tasks and
|
|
|
|
// creates tokens for all the tasks.
|
|
|
|
req := &structs.DeriveVaultTokenRequest{
|
2017-09-12 04:42:10 +00:00
|
|
|
NodeID: c.NodeID(),
|
|
|
|
SecretID: c.secretNodeID(),
|
2016-08-29 21:07:23 +00:00
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: verifiedTasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2022-06-14 14:22:31 +00:00
|
|
|
Region: c.Region(),
|
|
|
|
AllowStale: false,
|
|
|
|
MinQueryIndex: alloc.CreateIndex,
|
2023-06-22 15:06:49 +00:00
|
|
|
AuthToken: c.secretNodeID(),
|
2016-08-29 21:07:23 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Derive the tokens
|
2020-07-17 14:41:45 +00:00
|
|
|
// namespace is handled via nomad/vault
|
2016-08-29 21:07:23 +00:00
|
|
|
var resp structs.DeriveVaultTokenResponse
|
|
|
|
if err := c.RPC("Node.DeriveVaultToken", &req, &resp); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger.Error("error making derive token RPC", "error", err)
|
2016-10-23 01:20:50 +00:00
|
|
|
return nil, fmt.Errorf("DeriveVaultToken RPC failed: %v", err)
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
if resp.Error != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger.Error("error deriving vault tokens", "error", resp.Error)
|
2018-03-13 22:09:03 +00:00
|
|
|
return nil, structs.NewWrappedServerError(resp.Error)
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
|
|
|
if resp.Tasks == nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger.Error("error derivng vault token", "error", "invalid response")
|
2016-08-29 21:07:23 +00:00
|
|
|
return nil, fmt.Errorf("failed to derive vault tokens: invalid response")
|
|
|
|
}
|
|
|
|
|
|
|
|
unwrappedTokens := make(map[string]string)
|
|
|
|
|
|
|
|
// Retrieve the wrapped tokens from the response and unwrap it
|
|
|
|
for _, taskName := range verifiedTasks {
|
|
|
|
// Get the wrapped token
|
|
|
|
wrappedToken, ok := resp.Tasks[taskName]
|
|
|
|
if !ok {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger.Error("wrapped token missing for task", "task_name", taskName)
|
2016-08-29 21:07:23 +00:00
|
|
|
return nil, fmt.Errorf("wrapped token missing for task %q", taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unwrap the vault token
|
|
|
|
unwrapResp, err := vclient.Logical().Unwrap(wrappedToken)
|
|
|
|
if err != nil {
|
2018-04-03 21:29:22 +00:00
|
|
|
if structs.VaultUnrecoverableError.MatchString(err.Error()) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The error is recoverable
|
|
|
|
return nil, structs.NewRecoverableError(
|
|
|
|
fmt.Errorf("failed to unwrap the token for task %q: %v", taskName, err), true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate the response
|
|
|
|
var validationErr error
|
|
|
|
if unwrapResp == nil {
|
|
|
|
validationErr = fmt.Errorf("Vault returned nil secret when unwrapping")
|
|
|
|
} else if unwrapResp.Auth == nil {
|
|
|
|
validationErr = fmt.Errorf("Vault returned unwrap secret with nil Auth. Secret warnings: %v", unwrapResp.Warnings)
|
|
|
|
} else if unwrapResp.Auth.ClientToken == "" {
|
|
|
|
validationErr = fmt.Errorf("Vault returned unwrap secret with empty Auth.ClientToken. Secret warnings: %v", unwrapResp.Warnings)
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
2018-04-03 21:29:22 +00:00
|
|
|
if validationErr != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
vlogger.Warn("error unwrapping token", "error", err)
|
2018-04-03 21:29:22 +00:00
|
|
|
return nil, structs.NewRecoverableError(validationErr, true)
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Append the unwrapped token to the return value
|
|
|
|
unwrappedTokens[taskName] = unwrapResp.Auth.ClientToken
|
|
|
|
}
|
|
|
|
|
|
|
|
return unwrappedTokens, nil
|
|
|
|
}
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// deriveSIToken takes an allocation and a set of tasks and derives Consul
|
|
|
|
// Service Identity tokens for each of the tasks by requesting them from the
|
|
|
|
// Nomad Server.
|
|
|
|
func (c *Client) deriveSIToken(alloc *structs.Allocation, taskNames []string) (map[string]string, error) {
|
|
|
|
tasks, err := verifiedTasks(c.logger, alloc, taskNames)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &structs.DeriveSITokenRequest{
|
2023-06-22 15:06:49 +00:00
|
|
|
NodeID: c.NodeID(),
|
|
|
|
SecretID: c.secretNodeID(),
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: tasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: c.Region(),
|
|
|
|
AuthToken: c.secretNodeID(),
|
|
|
|
},
|
2019-11-27 21:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Nicely ask Nomad Server for the tokens.
|
|
|
|
var resp structs.DeriveSITokenResponse
|
|
|
|
if err := c.RPC("Node.DeriveSIToken", &req, &resp); err != nil {
|
|
|
|
c.logger.Error("error making derive token RPC", "error", err)
|
|
|
|
return nil, fmt.Errorf("DeriveSIToken RPC failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := resp.Error; err != nil {
|
|
|
|
c.logger.Error("error deriving SI tokens", "error", err)
|
|
|
|
return nil, structs.NewWrappedServerError(err)
|
|
|
|
}
|
|
|
|
if len(resp.Tokens) == 0 {
|
|
|
|
c.logger.Error("error deriving SI tokens", "error", "invalid_response")
|
|
|
|
return nil, fmt.Errorf("failed to derive SI tokens: invalid response")
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: Unlike with the Vault integration, Nomad Server replies with the
|
|
|
|
// actual Consul SI token (.SecretID), because otherwise each Nomad
|
|
|
|
// Client would need to be blessed with 'acl:write' permissions to read the
|
|
|
|
// secret value given the .AccessorID, which does not fit well in the Consul
|
|
|
|
// security model.
|
|
|
|
//
|
|
|
|
// https://www.consul.io/api/acl/tokens.html#read-a-token
|
|
|
|
// https://www.consul.io/docs/internals/security.html
|
|
|
|
|
2022-09-21 19:53:25 +00:00
|
|
|
m := maps.Clone(resp.Tokens)
|
2019-11-27 21:41:45 +00:00
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifiedTasks asserts each task in taskNames actually exists in the given alloc,
|
|
|
|
// otherwise an error is returned.
|
|
|
|
func verifiedTasks(logger hclog.Logger, alloc *structs.Allocation, taskNames []string) ([]string, error) {
|
|
|
|
if alloc == nil {
|
|
|
|
return nil, fmt.Errorf("nil allocation")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(taskNames) == 0 {
|
|
|
|
return nil, fmt.Errorf("missing task names")
|
|
|
|
}
|
|
|
|
|
|
|
|
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if group == nil {
|
|
|
|
return nil, fmt.Errorf("group name in allocation is not present in job")
|
|
|
|
}
|
|
|
|
|
|
|
|
verifiedTasks := make([]string, 0, len(taskNames))
|
|
|
|
|
|
|
|
// confirm the requested task names actually exist in the allocation
|
|
|
|
for _, taskName := range taskNames {
|
|
|
|
if !taskIsPresent(taskName, group.Tasks) {
|
|
|
|
logger.Error("task not found in the allocation", "task_name", taskName)
|
|
|
|
return nil, fmt.Errorf("task %q not found in allocation", taskName)
|
|
|
|
}
|
|
|
|
verifiedTasks = append(verifiedTasks, taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return verifiedTasks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func taskIsPresent(taskName string, tasks []*structs.Task) bool {
|
|
|
|
for _, task := range tasks {
|
|
|
|
if task.Name == taskName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-03-11 17:39:04 +00:00
|
|
|
// triggerDiscovery causes a Consul discovery to begin (if one hasn't already)
|
2016-09-24 00:02:48 +00:00
|
|
|
func (c *Client) triggerDiscovery() {
|
2022-08-18 23:32:04 +00:00
|
|
|
config := c.GetConfig()
|
|
|
|
if config.ConsulConfig.ClientAutoJoin != nil && *config.ConsulConfig.ClientAutoJoin {
|
2021-11-30 18:20:42 +00:00
|
|
|
select {
|
|
|
|
case c.triggerDiscoveryCh <- struct{}{}:
|
|
|
|
// Discovery goroutine was released to execute
|
|
|
|
default:
|
|
|
|
// Discovery goroutine was already running
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:52:40 +00:00
|
|
|
// consulDiscovery waits for the signal to attempt server discovery via Consul.
|
|
|
|
// It's intended to be started in a goroutine. See triggerDiscovery() for
|
|
|
|
// causing consul discovery from other code locations.
|
2016-09-24 00:02:48 +00:00
|
|
|
func (c *Client) consulDiscovery() {
|
|
|
|
for {
|
|
|
|
select {
|
2016-09-26 22:52:40 +00:00
|
|
|
case <-c.triggerDiscoveryCh:
|
|
|
|
if err := c.consulDiscoveryImpl(); err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Error("error discovering nomad servers", "error", err)
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
2016-05-24 06:23:57 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2016-09-26 22:52:40 +00:00
|
|
|
func (c *Client) consulDiscoveryImpl() error {
|
2018-08-29 22:05:03 +00:00
|
|
|
consulLogger := c.logger.Named("consul")
|
|
|
|
|
2017-02-01 00:43:57 +00:00
|
|
|
dcs, err := c.consulCatalog.Datacenters()
|
2016-09-24 00:02:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("client.consul: unable to query Consul datacenters: %v", err)
|
|
|
|
}
|
|
|
|
if len(dcs) > 2 {
|
|
|
|
// Query the local DC first, then shuffle the
|
|
|
|
// remaining DCs. Future heartbeats will cause Nomad
|
|
|
|
// Clients to fixate on their local datacenter so
|
|
|
|
// it's okay to talk with remote DCs. If the no
|
|
|
|
// Nomad servers are available within
|
|
|
|
// datacenterQueryLimit, the next heartbeat will pick
|
|
|
|
// a new set of servers so it's okay.
|
|
|
|
shuffleStrings(dcs[1:])
|
2023-08-14 13:43:27 +00:00
|
|
|
dcs = dcs[0:min(len(dcs), datacenterQueryLimit)]
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
|
2022-08-18 23:32:04 +00:00
|
|
|
serviceName := c.GetConfig().ConsulConfig.ServerServiceName
|
2016-09-24 00:02:48 +00:00
|
|
|
var mErr multierror.Error
|
2018-01-09 23:26:53 +00:00
|
|
|
var nomadServers servers.Servers
|
2018-08-29 22:05:03 +00:00
|
|
|
consulLogger.Debug("bootstrap contacting Consul DCs", "consul_dcs", dcs)
|
2016-09-24 00:02:48 +00:00
|
|
|
DISCOLOOP:
|
|
|
|
for _, dc := range dcs {
|
|
|
|
consulOpts := &consulapi.QueryOptions{
|
|
|
|
AllowStale: true,
|
|
|
|
Datacenter: dc,
|
|
|
|
Near: "_agent",
|
|
|
|
WaitTime: consul.DefaultQueryWaitDuration,
|
|
|
|
}
|
2017-02-01 00:43:57 +00:00
|
|
|
consulServices, _, err := c.consulCatalog.Service(serviceName, consul.ServiceTagRPC, consulOpts)
|
2016-05-24 06:23:57 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, fmt.Errorf("unable to query service %+q from Consul datacenter %+q: %v", serviceName, dc, err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range consulServices {
|
|
|
|
port := strconv.Itoa(s.ServicePort)
|
|
|
|
addrstr := s.ServiceAddress
|
|
|
|
if addrstr == "" {
|
|
|
|
addrstr = s.Address
|
2016-06-11 03:05:14 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(addrstr, port))
|
2016-06-11 03:05:14 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
continue
|
|
|
|
}
|
2023-03-02 18:36:45 +00:00
|
|
|
|
2023-03-16 19:38:33 +00:00
|
|
|
srv := &servers.Server{Addr: addr}
|
|
|
|
nomadServers = append(nomadServers, srv)
|
|
|
|
}
|
2023-03-02 18:36:45 +00:00
|
|
|
|
2023-03-16 19:38:33 +00:00
|
|
|
if len(nomadServers) > 0 {
|
|
|
|
break DISCOLOOP
|
2016-05-24 06:23:57 +00:00
|
|
|
}
|
2023-03-16 19:38:33 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
2018-01-09 23:26:53 +00:00
|
|
|
if len(nomadServers) == 0 {
|
2016-09-24 00:02:48 +00:00
|
|
|
if len(mErr.Errors) > 0 {
|
|
|
|
return mErr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
return fmt.Errorf("no Nomad Servers advertising service %q in Consul datacenters: %+q", serviceName, dcs)
|
|
|
|
}
|
2016-06-07 15:59:17 +00:00
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
consulLogger.Info("discovered following servers", "servers", nomadServers)
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2018-04-05 17:40:17 +00:00
|
|
|
// Fire the retry trigger if we have updated the set of servers.
|
|
|
|
if c.servers.SetServers(nomadServers) {
|
2018-04-05 22:41:59 +00:00
|
|
|
// Start rebalancing
|
|
|
|
c.servers.RebalanceServers()
|
|
|
|
|
2018-04-05 17:40:17 +00:00
|
|
|
// Notify waiting rpc calls. If a goroutine just failed an RPC call and
|
|
|
|
// isn't receiving on this chan yet they'll still retry eventually.
|
|
|
|
// This is a shortcircuit for the longer retry intervals.
|
|
|
|
c.fireRpcRetryWatcher()
|
|
|
|
}
|
|
|
|
|
2018-04-04 01:05:28 +00:00
|
|
|
return nil
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// emitStats collects host resource usage stats periodically
|
|
|
|
func (c *Client) emitStats() {
|
2018-02-18 15:19:40 +00:00
|
|
|
// Determining NodeClass to be emitted
|
|
|
|
var emittedNodeClass string
|
|
|
|
if emittedNodeClass = c.Node().NodeClass; emittedNodeClass == "" {
|
|
|
|
emittedNodeClass = "none"
|
|
|
|
}
|
|
|
|
|
2017-09-04 02:56:47 +00:00
|
|
|
// Assign labels directly before emitting stats so the information expected
|
|
|
|
// is ready
|
2018-02-18 15:19:40 +00:00
|
|
|
c.baseLabels = []metrics.Label{
|
|
|
|
{Name: "node_id", Value: c.NodeID()},
|
|
|
|
{Name: "datacenter", Value: c.Datacenter()},
|
|
|
|
{Name: "node_class", Value: emittedNodeClass},
|
2023-06-14 19:58:38 +00:00
|
|
|
{Name: "node_pool", Value: c.Node().NodePool},
|
2018-02-18 15:19:40 +00:00
|
|
|
}
|
2017-09-04 02:56:47 +00:00
|
|
|
|
2016-05-26 22:12:48 +00:00
|
|
|
// Start collecting host stats right away and then keep collecting every
|
|
|
|
// collection interval
|
|
|
|
next := time.NewTimer(0)
|
|
|
|
defer next.Stop()
|
2016-05-09 15:55:19 +00:00
|
|
|
for {
|
2022-08-18 23:32:04 +00:00
|
|
|
config := c.GetConfig()
|
2016-05-09 15:55:19 +00:00
|
|
|
select {
|
|
|
|
case <-next.C:
|
2016-12-12 06:58:28 +00:00
|
|
|
err := c.hostStatsCollector.Collect()
|
2022-08-18 23:32:04 +00:00
|
|
|
next.Reset(config.StatsCollectionInterval)
|
2016-05-09 15:55:19 +00:00
|
|
|
if err != nil {
|
2018-08-29 22:05:03 +00:00
|
|
|
c.logger.Warn("error fetching host resource usage stats", "error", err)
|
2022-08-18 23:32:04 +00:00
|
|
|
} else if config.PublishNodeMetrics {
|
2019-09-18 23:13:14 +00:00
|
|
|
// Publish Node metrics if operator has opted in
|
2020-12-08 20:47:04 +00:00
|
|
|
c.emitHostStats()
|
2016-08-02 02:49:01 +00:00
|
|
|
}
|
2017-03-09 20:37:41 +00:00
|
|
|
|
|
|
|
c.emitClientMetrics()
|
2016-05-09 15:55:19 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-31 02:02:03 +00:00
|
|
|
|
2017-09-04 02:56:47 +00:00
|
|
|
// setGaugeForMemoryStats proxies metrics for memory specific statistics
|
2019-09-03 16:11:11 +00:00
|
|
|
func (c *Client) setGaugeForMemoryStats(nodeID string, hStats *stats.HostStats, baseLabels []metrics.Label) {
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "total"}, float32(hStats.Memory.Total), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "available"}, float32(hStats.Memory.Available), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "used"}, float32(hStats.Memory.Used), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "free"}, float32(hStats.Memory.Free), baseLabels)
|
2017-08-31 16:29:15 +00:00
|
|
|
}
|
|
|
|
|
2017-09-04 02:56:47 +00:00
|
|
|
// setGaugeForCPUStats proxies metrics for CPU specific statistics
|
2019-09-03 16:11:11 +00:00
|
|
|
func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats, baseLabels []metrics.Label) {
|
2021-09-06 08:49:44 +00:00
|
|
|
|
|
|
|
labels := make([]metrics.Label, len(baseLabels))
|
|
|
|
copy(labels, baseLabels)
|
|
|
|
|
2016-05-31 02:02:03 +00:00
|
|
|
for _, cpu := range hStats.CPU {
|
2021-09-06 08:49:44 +00:00
|
|
|
labels := append(labels, metrics.Label{
|
2020-10-13 19:56:54 +00:00
|
|
|
Name: "cpu",
|
|
|
|
Value: cpu.CPU,
|
|
|
|
})
|
2017-08-31 16:29:15 +00:00
|
|
|
|
2023-07-05 14:28:55 +00:00
|
|
|
// Keep "total" around to remain compatible with older consumers of the metrics
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total"}, float32(cpu.TotalPercent), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total_percent"}, float32(cpu.TotalPercent), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total_ticks"}, float32(cpu.TotalTicks), labels)
|
|
|
|
metrics.IncrCounterWithLabels([]string{"client", "host", "cpu", "total_ticks_count"}, float32(cpu.TotalTicks), labels)
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "user"}, float32(cpu.User), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "idle"}, float32(cpu.Idle), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "system"}, float32(cpu.System), labels)
|
2016-05-31 02:02:03 +00:00
|
|
|
}
|
2017-08-31 16:29:15 +00:00
|
|
|
}
|
|
|
|
|
2017-09-04 02:56:47 +00:00
|
|
|
// setGaugeForDiskStats proxies metrics for disk specific statistics
|
2019-09-03 16:11:11 +00:00
|
|
|
func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats, baseLabels []metrics.Label) {
|
2021-09-06 08:49:44 +00:00
|
|
|
|
|
|
|
labels := make([]metrics.Label, len(baseLabels))
|
|
|
|
copy(labels, baseLabels)
|
|
|
|
|
2016-05-31 02:02:03 +00:00
|
|
|
for _, disk := range hStats.DiskStats {
|
2021-09-06 08:49:44 +00:00
|
|
|
labels := append(labels, metrics.Label{
|
2020-10-13 19:56:54 +00:00
|
|
|
Name: "disk",
|
|
|
|
Value: disk.Device,
|
|
|
|
})
|
2017-08-31 16:29:15 +00:00
|
|
|
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "size"}, float32(disk.Size), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used"}, float32(disk.Used), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "available"}, float32(disk.Available), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used_percent"}, float32(disk.UsedPercent), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "inodes_percent"}, float32(disk.InodesUsedPercent), labels)
|
2016-05-31 02:02:03 +00:00
|
|
|
}
|
2017-08-31 16:29:15 +00:00
|
|
|
}
|
|
|
|
|
2017-09-04 02:56:47 +00:00
|
|
|
// setGaugeForAllocationStats proxies metrics for allocation specific statistics
|
2020-09-22 17:53:50 +00:00
|
|
|
func (c *Client) setGaugeForAllocationStats(nodeID string, baseLabels []metrics.Label) {
|
2022-08-18 23:32:04 +00:00
|
|
|
node := c.GetConfig().Node
|
2018-10-03 16:47:18 +00:00
|
|
|
total := node.NodeResources
|
|
|
|
res := node.ReservedResources
|
2018-10-04 21:33:09 +00:00
|
|
|
allocated := c.getAllocatedResources(node)
|
2017-02-17 02:28:11 +00:00
|
|
|
|
|
|
|
// Emit allocated
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocated", "memory"}, float32(allocated.Flattened.Memory.MemoryMB), baseLabels)
|
2021-11-17 13:34:22 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocated", "max_memory"}, float32(allocated.Flattened.Memory.MemoryMaxMB), baseLabels)
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocated", "disk"}, float32(allocated.Shared.DiskMB), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocated", "cpu"}, float32(allocated.Flattened.Cpu.CpuShares), baseLabels)
|
2017-02-17 02:28:11 +00:00
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
for _, n := range allocated.Flattened.Networks {
|
2021-09-06 08:49:44 +00:00
|
|
|
labels := append(baseLabels, metrics.Label{ //nolint:gocritic
|
2020-10-13 19:56:54 +00:00
|
|
|
Name: "device",
|
|
|
|
Value: n.Device,
|
|
|
|
})
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocated", "network"}, float32(n.MBits), labels)
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit unallocated
|
2018-10-03 16:47:18 +00:00
|
|
|
unallocatedMem := total.Memory.MemoryMB - res.Memory.MemoryMB - allocated.Flattened.Memory.MemoryMB
|
|
|
|
unallocatedDisk := total.Disk.DiskMB - res.Disk.DiskMB - allocated.Shared.DiskMB
|
2018-10-04 21:33:09 +00:00
|
|
|
unallocatedCpu := total.Cpu.CpuShares - res.Cpu.CpuShares - allocated.Flattened.Cpu.CpuShares
|
2017-08-31 16:29:15 +00:00
|
|
|
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "memory"}, float32(unallocatedMem), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "disk"}, float32(unallocatedDisk), baseLabels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "cpu"}, float32(unallocatedCpu), baseLabels)
|
2017-02-17 02:28:11 +00:00
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
totalComparable := total.Comparable()
|
|
|
|
for _, n := range totalComparable.Flattened.Networks {
|
|
|
|
// Determined the used resources
|
|
|
|
var usedMbits int
|
|
|
|
totalIdx := allocated.Flattened.Networks.NetIndex(n)
|
2017-02-17 02:28:11 +00:00
|
|
|
if totalIdx != -1 {
|
2018-10-03 16:47:18 +00:00
|
|
|
usedMbits = allocated.Flattened.Networks[totalIdx].MBits
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
unallocatedMbits := n.MBits - usedMbits
|
2021-09-06 08:49:44 +00:00
|
|
|
labels := append(baseLabels, metrics.Label{ //nolint:gocritic
|
2020-10-13 19:56:54 +00:00
|
|
|
Name: "device",
|
|
|
|
Value: n.Device,
|
|
|
|
})
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "network"}, float32(unallocatedMbits), labels)
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-11 18:21:44 +00:00
|
|
|
// No labels are required so we emit with only a key/value syntax
|
2019-09-03 16:11:11 +00:00
|
|
|
func (c *Client) setGaugeForUptime(hStats *stats.HostStats, baseLabels []metrics.Label) {
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "uptime"}, float32(hStats.Uptime), baseLabels)
|
2017-08-31 16:29:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// emitHostStats pushes host resource usage stats to remote metrics collection sinks
|
|
|
|
func (c *Client) emitHostStats() {
|
2017-09-12 04:42:10 +00:00
|
|
|
nodeID := c.NodeID()
|
2017-08-31 20:00:09 +00:00
|
|
|
hStats := c.hostStatsCollector.Stats()
|
2020-09-22 17:53:50 +00:00
|
|
|
labels := c.labels()
|
2017-08-31 20:00:09 +00:00
|
|
|
|
2019-09-03 16:11:11 +00:00
|
|
|
c.setGaugeForMemoryStats(nodeID, hStats, labels)
|
|
|
|
c.setGaugeForUptime(hStats, labels)
|
|
|
|
c.setGaugeForCPUStats(nodeID, hStats, labels)
|
|
|
|
c.setGaugeForDiskStats(nodeID, hStats, labels)
|
2017-08-31 16:29:15 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// emitClientMetrics emits lower volume client metrics
|
|
|
|
func (c *Client) emitClientMetrics() {
|
2017-09-12 04:42:10 +00:00
|
|
|
nodeID := c.NodeID()
|
2020-09-22 17:53:50 +00:00
|
|
|
labels := c.labels()
|
2017-03-09 20:37:41 +00:00
|
|
|
|
2020-09-22 17:53:50 +00:00
|
|
|
c.setGaugeForAllocationStats(nodeID, labels)
|
2017-09-25 16:05:49 +00:00
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// Emit allocation metrics
|
2017-08-10 17:56:51 +00:00
|
|
|
blocked, migrating, pending, running, terminal := 0, 0, 0, 0, 0
|
2017-03-09 20:37:41 +00:00
|
|
|
for _, ar := range c.getAllocRunners() {
|
2019-04-10 19:02:35 +00:00
|
|
|
switch ar.AllocState().ClientStatus {
|
2017-03-09 20:37:41 +00:00
|
|
|
case structs.AllocClientStatusPending:
|
2017-08-10 17:56:51 +00:00
|
|
|
switch {
|
2017-08-14 23:02:28 +00:00
|
|
|
case ar.IsWaiting():
|
2017-08-10 17:56:51 +00:00
|
|
|
blocked++
|
2017-08-14 23:02:28 +00:00
|
|
|
case ar.IsMigrating():
|
2017-08-10 17:56:51 +00:00
|
|
|
migrating++
|
|
|
|
default:
|
|
|
|
pending++
|
|
|
|
}
|
2017-03-09 20:37:41 +00:00
|
|
|
case structs.AllocClientStatusRunning:
|
|
|
|
running++
|
|
|
|
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:
|
|
|
|
terminal++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:56:54 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocations", "migrating"}, float32(migrating), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocations", "blocked"}, float32(blocked), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocations", "pending"}, float32(pending), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocations", "running"}, float32(running), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"client", "allocations", "terminal"}, float32(terminal), labels)
|
2017-03-09 20:37:41 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 17:53:50 +00:00
|
|
|
// labels takes the base labels and appends the node state
|
|
|
|
func (c *Client) labels() []metrics.Label {
|
2022-08-18 23:32:04 +00:00
|
|
|
node := c.Node()
|
2020-09-22 17:53:50 +00:00
|
|
|
|
|
|
|
return append(c.baseLabels,
|
2022-08-18 23:32:04 +00:00
|
|
|
metrics.Label{Name: "node_status", Value: node.Status},
|
|
|
|
metrics.Label{Name: "node_scheduling_eligibility", Value: node.SchedulingEligibility},
|
2020-09-22 17:53:50 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-10-04 21:33:09 +00:00
|
|
|
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.ComparableResources {
|
2018-10-03 16:47:18 +00:00
|
|
|
// Unfortunately the allocs only have IP so we need to match them to the
|
|
|
|
// device
|
|
|
|
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
|
|
|
|
for _, n := range selfNode.NodeResources.Networks {
|
|
|
|
_, ipnet, err := net.ParseCIDR(n.CIDR)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cidrToDevice[ipnet] = n.Device
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sum the allocated resources
|
2018-10-04 21:33:09 +00:00
|
|
|
var allocated structs.ComparableResources
|
2018-10-03 16:47:18 +00:00
|
|
|
allocatedDeviceMbits := make(map[string]int)
|
2019-05-07 19:54:36 +00:00
|
|
|
for _, ar := range c.getAllocRunners() {
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
if alloc.ServerTerminalStatus() || ar.AllocState().ClientTerminalStatus() {
|
2018-10-03 16:47:18 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the resources
|
|
|
|
// COMPAT(0.11): Just use the allocated resources
|
|
|
|
allocated.Add(alloc.ComparableResources())
|
|
|
|
|
|
|
|
// Add the used network
|
|
|
|
if alloc.AllocatedResources != nil {
|
|
|
|
for _, tr := range alloc.AllocatedResources.Tasks {
|
|
|
|
for _, allocatedNetwork := range tr.Networks {
|
|
|
|
for cidr, dev := range cidrToDevice {
|
|
|
|
ip := net.ParseIP(allocatedNetwork.IP)
|
|
|
|
if cidr.Contains(ip) {
|
|
|
|
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if alloc.Resources != nil {
|
|
|
|
for _, allocatedNetwork := range alloc.Resources.Networks {
|
|
|
|
for cidr, dev := range cidrToDevice {
|
|
|
|
ip := net.ParseIP(allocatedNetwork.IP)
|
|
|
|
if cidr.Contains(ip) {
|
|
|
|
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the networks
|
|
|
|
allocated.Flattened.Networks = nil
|
|
|
|
for dev, speed := range allocatedDeviceMbits {
|
|
|
|
net := &structs.NetworkResource{
|
|
|
|
Device: dev,
|
|
|
|
MBits: speed,
|
|
|
|
}
|
|
|
|
allocated.Flattened.Networks = append(allocated.Flattened.Networks, net)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &allocated
|
|
|
|
}
|
|
|
|
|
2018-12-18 03:36:06 +00:00
|
|
|
// GetTaskEventHandler returns an event handler for the given allocID and task name
|
|
|
|
func (c *Client) GetTaskEventHandler(allocID, taskName string) drivermanager.EventHandler {
|
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
|
|
|
if ar, ok := c.allocs[allocID]; ok {
|
|
|
|
return ar.GetTaskEventHandler(taskName)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// group wraps a func() in a goroutine and provides a way to block until it
|
|
|
|
// exits. Inspired by https://godoc.org/golang.org/x/sync/errgroup
|
|
|
|
type group struct {
|
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
2018-11-26 20:52:55 +00:00
|
|
|
// Go starts f in a goroutine and must be called before Wait.
|
2018-11-14 18:29:07 +00:00
|
|
|
func (g *group) Go(f func()) {
|
|
|
|
g.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer g.wg.Done()
|
|
|
|
f()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2021-12-20 10:44:21 +00:00
|
|
|
func (g *group) AddCh(ch <-chan struct{}) {
|
|
|
|
g.Go(func() {
|
2018-12-14 15:04:58 +00:00
|
|
|
<-ch
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-11-26 20:52:55 +00:00
|
|
|
// Wait for all goroutines to exit. Must be called after all calls to Go
|
|
|
|
// complete.
|
2018-11-14 18:29:07 +00:00
|
|
|
func (g *group) Wait() {
|
|
|
|
g.wg.Wait()
|
|
|
|
}
|
2023-05-31 19:34:16 +00:00
|
|
|
|
|
|
|
// pendingClientUpdates are the set of allocation updates that the client is
|
|
|
|
// waiting to send
|
|
|
|
type pendingClientUpdates struct {
|
|
|
|
updates map[string]*structs.Allocation
|
|
|
|
lock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPendingClientUpdates() *pendingClientUpdates {
|
|
|
|
return &pendingClientUpdates{
|
|
|
|
updates: make(map[string]*structs.Allocation, 64),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add overwrites a pending update. The updates we get from the allocrunner are
|
|
|
|
// lightweight copies of its *structs.Allocation (i.e. just the client state),
|
|
|
|
// serialized with an internal lock. So the latest update is always the
|
|
|
|
// authoritative one, and the server only cares about that one.
|
|
|
|
func (p *pendingClientUpdates) add(alloc *structs.Allocation) {
|
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
p.updates[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
|
|
|
|
// restore refills the pending updates map, but only if a newer update hasn't come in
|
|
|
|
func (p *pendingClientUpdates) restore(toRestore []*structs.Allocation) {
|
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
|
|
|
for _, alloc := range toRestore {
|
|
|
|
if _, ok := p.updates[alloc.ID]; !ok {
|
|
|
|
p.updates[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextBatch returns a list of client allocation updates we need to make in this
|
|
|
|
// tick of the allocSync. It returns nil if there's no updates to make yet. The
|
|
|
|
// caller is responsible for calling restore() if it can't successfully send the
|
|
|
|
// updates.
|
|
|
|
func (p *pendingClientUpdates) nextBatch(c *Client, updateTicks int) []*structs.Allocation {
|
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
|
|
|
// Fast path if there are no pending updates
|
|
|
|
if len(p.updates) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we never send an update before we've had at least one sync from
|
|
|
|
// the server
|
|
|
|
select {
|
|
|
|
case <-c.serversContactedCh:
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
toSync, urgent := p.filterAcknowledgedUpdatesLocked(c)
|
|
|
|
|
|
|
|
// Only update every 5th tick if there's no priority updates
|
|
|
|
if updateTicks%5 != 0 && !urgent {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear here so that allocrunners can queue up the next set of updates
|
|
|
|
// while we're waiting to hear from the server
|
|
|
|
maps.Clear(p.updates)
|
|
|
|
|
|
|
|
return toSync
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// filteredAcknowledgedUpdatesLocked returns a list of client alloc updates with
|
|
|
|
// the already-acknowledged updates removed, and the highest priority of any
|
|
|
|
// update. note: this method requires that p.lock is held
|
|
|
|
func (p *pendingClientUpdates) filterAcknowledgedUpdatesLocked(c *Client) ([]*structs.Allocation, bool) {
|
|
|
|
var urgent bool
|
|
|
|
sync := make([]*structs.Allocation, 0, len(p.updates))
|
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
|
|
|
|
|
|
|
for allocID, update := range p.updates {
|
|
|
|
if ar, ok := c.allocs[allocID]; ok {
|
|
|
|
switch ar.GetUpdatePriority(update) {
|
|
|
|
case cstructs.AllocUpdatePriorityUrgent:
|
|
|
|
sync = append(sync, update)
|
|
|
|
urgent = true
|
|
|
|
case cstructs.AllocUpdatePriorityTypical:
|
|
|
|
sync = append(sync, update)
|
|
|
|
case cstructs.AllocUpdatePriorityNone:
|
|
|
|
// update is dropped
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// no allocrunner (typically a failed placement), so we need
|
|
|
|
// to send update
|
|
|
|
sync = append(sync, update)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return sync, urgent
|
|
|
|
}
|