2015-08-20 22:25:09 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2016-10-03 16:59:57 +00:00
|
|
|
"archive/tar"
|
2016-09-22 00:06:52 +00:00
|
|
|
"errors"
|
2015-08-20 23:07:26 +00:00
|
|
|
"fmt"
|
2016-10-03 16:59:57 +00:00
|
|
|
"io"
|
2015-08-30 01:16:49 +00:00
|
|
|
"io/ioutil"
|
2015-08-20 22:25:09 +00:00
|
|
|
"log"
|
2015-08-20 23:07:26 +00:00
|
|
|
"net"
|
2015-08-20 22:25:09 +00:00
|
|
|
"os"
|
2015-08-30 01:16:49 +00:00
|
|
|
"path/filepath"
|
2015-08-20 23:07:26 +00:00
|
|
|
"strconv"
|
2016-08-16 00:24:09 +00:00
|
|
|
"strings"
|
2015-08-20 22:25:09 +00:00
|
|
|
"sync"
|
2015-08-20 23:07:26 +00:00
|
|
|
"time"
|
|
|
|
|
2016-05-31 02:02:03 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2017-04-29 22:43:23 +00:00
|
|
|
"github.com/boltdb/bolt"
|
2016-05-24 06:23:57 +00:00
|
|
|
consulapi "github.com/hashicorp/consul/api"
|
2016-05-03 07:15:29 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2015-08-30 01:16:49 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2016-10-29 00:16:56 +00:00
|
|
|
nomadapi "github.com/hashicorp/nomad/api"
|
2016-01-12 23:03:53 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2015-08-25 23:21:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2015-08-20 23:53:43 +00:00
|
|
|
"github.com/hashicorp/nomad/client/driver"
|
2015-08-20 23:41:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/fingerprint"
|
2016-05-09 15:55:19 +00:00
|
|
|
"github.com/hashicorp/nomad/client/stats"
|
2016-08-18 03:28:48 +00:00
|
|
|
"github.com/hashicorp/nomad/client/vaultclient"
|
2016-06-08 06:02:37 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
2017-02-03 00:24:32 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2016-10-25 23:05:37 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/tlsutil"
|
2015-08-20 23:07:26 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad"
|
2015-08-20 23:41:29 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2016-08-29 21:07:23 +00:00
|
|
|
vaultapi "github.com/hashicorp/vault/api"
|
2016-05-03 07:15:29 +00:00
|
|
|
"github.com/mitchellh/hashstructure"
|
2016-08-11 18:43:45 +00:00
|
|
|
"github.com/shirou/gopsutil/host"
|
2015-08-20 22:25:09 +00:00
|
|
|
)
|
|
|
|
|
2015-08-20 23:07:26 +00:00
|
|
|
const (
|
|
|
|
// clientRPCCache controls how long we keep an idle connection
|
|
|
|
// open to a server
|
2016-03-09 18:37:56 +00:00
|
|
|
clientRPCCache = 5 * time.Minute
|
2015-08-20 23:07:26 +00:00
|
|
|
|
|
|
|
// clientMaxStreams controsl how many idle streams we keep
|
|
|
|
// open to a server
|
|
|
|
clientMaxStreams = 2
|
2015-08-21 00:49:04 +00:00
|
|
|
|
2016-06-11 03:05:14 +00:00
|
|
|
// datacenterQueryLimit searches through up to this many adjacent
|
|
|
|
// datacenters looking for the Nomad server service.
|
2016-06-14 22:05:34 +00:00
|
|
|
datacenterQueryLimit = 9
|
2016-06-11 03:05:14 +00:00
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
// registerRetryIntv is minimum interval on which we retry
|
|
|
|
// registration. We pick a value between this and 2x this.
|
2015-08-24 00:40:14 +00:00
|
|
|
registerRetryIntv = 15 * time.Second
|
2015-08-23 02:31:22 +00:00
|
|
|
|
|
|
|
// getAllocRetryIntv is minimum interval on which we retry
|
|
|
|
// to fetch allocations. We pick a value between this and 2x this.
|
|
|
|
getAllocRetryIntv = 30 * time.Second
|
2015-08-24 00:40:14 +00:00
|
|
|
|
|
|
|
// devModeRetryIntv is the retry interval used for development
|
|
|
|
devModeRetryIntv = time.Second
|
2015-08-31 00:19:20 +00:00
|
|
|
|
|
|
|
// stateSnapshotIntv is how often the client snapshots state
|
|
|
|
stateSnapshotIntv = 60 * time.Second
|
2015-09-07 03:18:47 +00:00
|
|
|
|
2015-09-21 00:02:12 +00:00
|
|
|
// initialHeartbeatStagger is used to stagger the interval between
|
|
|
|
// starting and the intial heartbeat. After the intial heartbeat,
|
|
|
|
// we switch to using the TTL specified by the servers.
|
|
|
|
initialHeartbeatStagger = 10 * time.Second
|
2016-02-03 20:07:09 +00:00
|
|
|
|
|
|
|
// nodeUpdateRetryIntv is how often the client checks for updates to the
|
|
|
|
// node attributes or meta map.
|
2016-02-10 22:09:23 +00:00
|
|
|
nodeUpdateRetryIntv = 5 * time.Second
|
2016-02-22 03:20:50 +00:00
|
|
|
|
|
|
|
// allocSyncIntv is the batching period of allocation updates before they
|
|
|
|
// are synced with the server.
|
|
|
|
allocSyncIntv = 200 * time.Millisecond
|
2016-02-22 05:32:32 +00:00
|
|
|
|
|
|
|
// allocSyncRetryIntv is the interval on which we retry updating
|
|
|
|
// the status of the allocation
|
|
|
|
allocSyncRetryIntv = 5 * time.Second
|
2015-08-20 23:07:26 +00:00
|
|
|
)
|
|
|
|
|
2016-05-09 19:24:03 +00:00
|
|
|
// ClientStatsReporter exposes all the APIs related to resource usage of a Nomad
|
|
|
|
// Client
|
2016-05-09 15:55:19 +00:00
|
|
|
type ClientStatsReporter interface {
|
2016-06-12 16:32:38 +00:00
|
|
|
// GetAllocStats returns the AllocStatsReporter for the passed allocation.
|
|
|
|
// If it does not exist an error is reported.
|
|
|
|
GetAllocStats(allocID string) (AllocStatsReporter, error)
|
2016-05-25 20:12:09 +00:00
|
|
|
|
2016-06-12 03:15:50 +00:00
|
|
|
// LatestHostStats returns the latest resource usage stats for the host
|
|
|
|
LatestHostStats() *stats.HostStats
|
2016-05-09 15:55:19 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
// Client is used to implement the client interaction with Nomad. Clients
|
|
|
|
// are expected to register as a schedulable node to the servers, and to
|
|
|
|
// run allocations as determined by the servers.
|
|
|
|
type Client struct {
|
2016-02-10 22:09:23 +00:00
|
|
|
config *config.Config
|
|
|
|
start time.Time
|
|
|
|
|
2017-04-29 22:43:23 +00:00
|
|
|
// stateDB is used to efficiently store client state.
|
|
|
|
stateDB *bolt.DB
|
|
|
|
|
2016-02-10 22:09:23 +00:00
|
|
|
// configCopy is a copy that should be passed to alloc-runners.
|
|
|
|
configCopy *config.Config
|
2016-02-10 21:44:53 +00:00
|
|
|
configLock sync.RWMutex
|
2015-08-20 23:07:26 +00:00
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
logger *log.Logger
|
|
|
|
|
2015-08-20 23:07:26 +00:00
|
|
|
connPool *nomad.ConnPool
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// servers is the (optionally prioritized) list of nomad servers
|
|
|
|
servers *serverlist
|
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
// heartbeat related times for tracking how often to heartbeat
|
|
|
|
lastHeartbeat time.Time
|
|
|
|
heartbeatTTL time.Duration
|
|
|
|
heartbeatLock sync.Mutex
|
|
|
|
|
2016-09-26 22:20:43 +00:00
|
|
|
// triggerDiscoveryCh triggers Consul discovery; see triggerDiscovery
|
|
|
|
triggerDiscoveryCh chan struct{}
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2016-09-26 22:06:57 +00:00
|
|
|
// discovered will be ticked whenever Consul discovery completes
|
2016-09-22 00:06:52 +00:00
|
|
|
// succesfully
|
2016-09-26 22:20:43 +00:00
|
|
|
serversDiscoveredCh chan struct{}
|
2015-08-23 01:16:05 +00:00
|
|
|
|
2015-08-23 21:54:52 +00:00
|
|
|
// allocs is the current set of allocations
|
2015-08-23 22:32:46 +00:00
|
|
|
allocs map[string]*AllocRunner
|
2015-08-23 22:06:47 +00:00
|
|
|
allocLock sync.RWMutex
|
2015-08-23 21:54:52 +00:00
|
|
|
|
2016-08-22 16:34:24 +00:00
|
|
|
// blockedAllocations are allocations which are blocked because their
|
|
|
|
// chained allocations haven't finished running
|
|
|
|
blockedAllocations map[string]*structs.Allocation
|
|
|
|
blockedAllocsLock sync.RWMutex
|
|
|
|
|
2017-02-17 02:28:11 +00:00
|
|
|
// migratingAllocs is the set of allocs whose data migration is in flight
|
|
|
|
migratingAllocs map[string]*migrateAllocCtrl
|
2017-05-31 21:05:47 +00:00
|
|
|
migratingAllocsLock sync.RWMutex
|
2017-02-17 02:28:11 +00:00
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
// allocUpdates stores allocations that need to be synced to the server.
|
|
|
|
allocUpdates chan *structs.Allocation
|
|
|
|
|
2017-02-01 00:43:57 +00:00
|
|
|
// consulService is Nomad's custom Consul client for managing services
|
|
|
|
// and checks.
|
|
|
|
consulService ConsulServiceAPI
|
|
|
|
|
|
|
|
// consulCatalog is the subset of Consul's Catalog API Nomad uses.
|
|
|
|
consulCatalog consul.CatalogAPI
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2016-05-26 22:25:18 +00:00
|
|
|
// HostStatsCollector collects host resource usage stats
|
2016-05-22 09:04:27 +00:00
|
|
|
hostStatsCollector *stats.HostStatsCollector
|
2016-05-09 15:55:19 +00:00
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
shutdown bool
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
shutdownLock sync.Mutex
|
2016-08-18 03:28:48 +00:00
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// vaultClient is used to interact with Vault for token and secret renewals
|
2016-08-18 03:28:48 +00:00
|
|
|
vaultClient vaultclient.VaultClient
|
2016-10-03 16:59:57 +00:00
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
// garbageCollector is used to garbage collect terminal allocations present
|
|
|
|
// in the node automatically
|
|
|
|
garbageCollector *AllocGarbageCollector
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2017-01-23 18:35:00 +00:00
|
|
|
// migrateAllocCtrl indicates whether migration is complete
|
|
|
|
type migrateAllocCtrl struct {
|
2017-02-17 02:28:11 +00:00
|
|
|
alloc *structs.Allocation
|
2017-01-23 18:35:00 +00:00
|
|
|
ch chan struct{}
|
|
|
|
closed bool
|
|
|
|
chLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
2017-02-17 02:28:11 +00:00
|
|
|
func newMigrateAllocCtrl(alloc *structs.Allocation) *migrateAllocCtrl {
|
2017-01-23 18:35:00 +00:00
|
|
|
return &migrateAllocCtrl{
|
2017-02-17 02:28:11 +00:00
|
|
|
ch: make(chan struct{}),
|
|
|
|
alloc: alloc,
|
2017-01-23 18:35:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *migrateAllocCtrl) closeCh() {
|
|
|
|
m.chLock.Lock()
|
|
|
|
defer m.chLock.Unlock()
|
|
|
|
|
|
|
|
if m.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If channel is not closed then close it
|
|
|
|
m.closed = true
|
|
|
|
close(m.ch)
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:12:35 +00:00
|
|
|
var (
|
|
|
|
// noServersErr is returned by the RPC method when the client has no
|
|
|
|
// configured servers. This is used to trigger Consul discovery if
|
|
|
|
// enabled.
|
|
|
|
noServersErr = errors.New("no servers")
|
|
|
|
)
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
// NewClient is used to create a new client from the given configuration
|
2017-02-01 00:43:57 +00:00
|
|
|
func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulService ConsulServiceAPI, logger *log.Logger) (*Client, error) {
|
2016-10-25 23:01:53 +00:00
|
|
|
// Create the tls wrapper
|
2016-11-01 18:55:29 +00:00
|
|
|
var tlsWrap tlsutil.RegionWrapper
|
2016-10-25 22:57:38 +00:00
|
|
|
if cfg.TLSConfig.EnableRPC {
|
|
|
|
tw, err := cfg.TLSConfiguration().OutgoingTLSWrapper()
|
2016-10-24 05:22:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
tlsWrap = tw
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Create the client
|
2015-08-20 22:25:09 +00:00
|
|
|
c := &Client{
|
2016-10-03 21:56:50 +00:00
|
|
|
config: cfg,
|
2017-02-01 00:43:57 +00:00
|
|
|
consulCatalog: consulCatalog,
|
|
|
|
consulService: consulService,
|
2016-10-03 21:56:50 +00:00
|
|
|
start: time.Now(),
|
2016-10-24 05:22:00 +00:00
|
|
|
connPool: nomad.NewPool(cfg.LogOutput, clientRPCCache, clientMaxStreams, tlsWrap),
|
2016-10-03 21:56:50 +00:00
|
|
|
logger: logger,
|
|
|
|
allocs: make(map[string]*AllocRunner),
|
|
|
|
blockedAllocations: make(map[string]*structs.Allocation),
|
|
|
|
allocUpdates: make(chan *structs.Allocation, 64),
|
|
|
|
shutdownCh: make(chan struct{}),
|
2017-01-23 18:35:00 +00:00
|
|
|
migratingAllocs: make(map[string]*migrateAllocCtrl),
|
2016-10-03 21:56:50 +00:00
|
|
|
servers: newServerList(),
|
|
|
|
triggerDiscoveryCh: make(chan struct{}),
|
|
|
|
serversDiscoveredCh: make(chan struct{}),
|
2015-11-25 21:39:16 +00:00
|
|
|
}
|
|
|
|
|
2015-09-12 18:47:44 +00:00
|
|
|
// Initialize the client
|
|
|
|
if err := c.init(); err != nil {
|
2016-03-15 18:28:31 +00:00
|
|
|
return nil, fmt.Errorf("failed to initialize client: %v", err)
|
2015-09-12 18:47:44 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:32:20 +00:00
|
|
|
// Add the stats collector
|
2016-12-16 07:54:54 +00:00
|
|
|
statsCollector := stats.NewHostStatsCollector(logger, c.config.AllocDir)
|
|
|
|
c.hostStatsCollector = statsCollector
|
2017-01-31 23:32:20 +00:00
|
|
|
|
|
|
|
// Add the garbage collector
|
|
|
|
gcConfig := &GCConfig{
|
2017-05-11 00:39:45 +00:00
|
|
|
MaxAllocs: cfg.GCMaxAllocs,
|
2017-01-31 23:32:20 +00:00
|
|
|
DiskUsageThreshold: cfg.GCDiskUsageThreshold,
|
|
|
|
InodeUsageThreshold: cfg.GCInodeUsageThreshold,
|
|
|
|
Interval: cfg.GCInterval,
|
2017-03-11 00:27:00 +00:00
|
|
|
ParallelDestroys: cfg.GCParallelDestroys,
|
2017-01-31 23:32:20 +00:00
|
|
|
ReservedDiskMB: cfg.Node.Reserved.DiskMB,
|
|
|
|
}
|
2017-05-11 00:39:45 +00:00
|
|
|
c.garbageCollector = NewAllocGarbageCollector(logger, statsCollector, c, gcConfig)
|
|
|
|
go c.garbageCollector.Run()
|
2016-12-16 07:54:54 +00:00
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Setup the node
|
|
|
|
if err := c.setupNode(); err != nil {
|
|
|
|
return nil, fmt.Errorf("node setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fingerprint the node
|
|
|
|
if err := c.fingerprint(); err != nil {
|
|
|
|
return nil, fmt.Errorf("fingerprinting failed: %v", err)
|
|
|
|
}
|
2015-08-20 23:53:43 +00:00
|
|
|
|
|
|
|
// Scan for drivers
|
|
|
|
if err := c.setupDrivers(); err != nil {
|
|
|
|
return nil, fmt.Errorf("driver setup failed: %v", err)
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
|
2016-03-14 02:05:41 +00:00
|
|
|
// Setup the reserved resources
|
2016-09-02 00:23:15 +00:00
|
|
|
c.reservePorts()
|
2016-03-14 02:05:41 +00:00
|
|
|
|
2016-02-11 03:01:57 +00:00
|
|
|
// Store the config copy before restoring state but after it has been
|
|
|
|
// initialized.
|
2016-06-02 06:36:28 +00:00
|
|
|
c.configLock.Lock()
|
2016-02-11 03:01:57 +00:00
|
|
|
c.configCopy = c.config.Copy()
|
2016-06-02 06:36:28 +00:00
|
|
|
c.configLock.Unlock()
|
|
|
|
|
2016-09-26 23:51:53 +00:00
|
|
|
// Set the preconfigured list of static servers
|
2016-06-02 06:36:28 +00:00
|
|
|
c.configLock.RLock()
|
2016-09-22 00:06:52 +00:00
|
|
|
if len(c.configCopy.Servers) > 0 {
|
|
|
|
if err := c.SetServers(c.configCopy.Servers); err != nil {
|
2016-09-26 22:20:43 +00:00
|
|
|
logger.Printf("[WARN] client: None of the configured servers are valid: %v", err)
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
2016-06-02 06:36:28 +00:00
|
|
|
c.configLock.RUnlock()
|
2016-02-11 03:01:57 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
// Setup Consul discovery if enabled
|
2017-01-18 23:55:14 +00:00
|
|
|
if c.configCopy.ConsulConfig.ClientAutoJoin != nil && *c.configCopy.ConsulConfig.ClientAutoJoin {
|
2016-09-24 00:02:48 +00:00
|
|
|
go c.consulDiscovery()
|
|
|
|
if len(c.servers.all()) == 0 {
|
|
|
|
// No configured servers; trigger discovery manually
|
2016-09-26 22:52:40 +00:00
|
|
|
c.triggerDiscoveryCh <- struct{}{}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// Setup the vault client for token and secret renewals
|
|
|
|
if err := c.setupVaultClient(); err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to setup vault client: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// Restore the state
|
|
|
|
if err := c.restoreState(); err != nil {
|
2017-07-03 18:53:54 +00:00
|
|
|
logger.Printf("[ERR] client: failed to restore state: %v", err)
|
|
|
|
logger.Printf("[ERR] client: Nomad is unable to start due to corrupt state. "+
|
|
|
|
"The safest way to proceed is to manually stop running task processes "+
|
2017-07-06 17:24:52 +00:00
|
|
|
"and remove Nomad's state (%q) and alloc (%q) directories before "+
|
2017-07-03 19:29:21 +00:00
|
|
|
"restarting. Lost allocations will be rescheduled.",
|
|
|
|
c.config.StateDir, c.config.AllocDir)
|
2017-07-03 18:53:54 +00:00
|
|
|
logger.Printf("[ERR] client: Corrupt state is often caused by a bug. Please " +
|
|
|
|
"report as much information as possible to " +
|
|
|
|
"https://github.com/hashicorp/nomad/issues")
|
|
|
|
return nil, fmt.Errorf("failed to restore state")
|
2016-09-14 20:30:01 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// Register and then start heartbeating to the servers.
|
|
|
|
go c.registerAndHeartbeat()
|
|
|
|
|
|
|
|
// Begin periodic snapshotting of state.
|
|
|
|
go c.periodicSnapshot()
|
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
// Begin syncing allocations to the server
|
|
|
|
go c.allocSync()
|
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
// Start the client!
|
|
|
|
go c.run()
|
2015-11-18 12:59:57 +00:00
|
|
|
|
2016-05-09 15:55:19 +00:00
|
|
|
// Start collecting stats
|
2017-03-09 20:37:41 +00:00
|
|
|
go c.emitStats()
|
2016-05-09 15:55:19 +00:00
|
|
|
|
2016-10-21 00:46:04 +00:00
|
|
|
c.logger.Printf("[INFO] client: Node ID %q", c.Node().ID)
|
2015-08-20 22:25:09 +00:00
|
|
|
return c, nil
|
2015-08-23 23:53:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-12 18:47:44 +00:00
|
|
|
// init is used to initialize the client and perform any setup
|
|
|
|
// needed before we begin starting its various components.
|
|
|
|
func (c *Client) init() error {
|
2015-09-24 21:29:53 +00:00
|
|
|
// Ensure the state dir exists if we have one
|
|
|
|
if c.config.StateDir != "" {
|
|
|
|
if err := os.MkdirAll(c.config.StateDir, 0700); err != nil {
|
|
|
|
return fmt.Errorf("failed creating state dir: %s", err)
|
|
|
|
}
|
2015-09-25 17:04:08 +00:00
|
|
|
|
2015-11-11 00:03:18 +00:00
|
|
|
} else {
|
|
|
|
// Othewise make a temp directory to use.
|
|
|
|
p, err := ioutil.TempDir("", "NomadClient")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating temporary directory for the StateDir: %v", err)
|
|
|
|
}
|
2016-10-11 22:49:43 +00:00
|
|
|
|
|
|
|
p, err = filepath.EvalSymlinks(p)
|
|
|
|
if err != nil {
|
2016-10-11 23:16:06 +00:00
|
|
|
return fmt.Errorf("failed to find temporary directory for the StateDir: %v", err)
|
2016-10-11 22:49:43 +00:00
|
|
|
}
|
|
|
|
|
2015-11-11 00:03:18 +00:00
|
|
|
c.config.StateDir = p
|
2015-09-24 21:29:53 +00:00
|
|
|
}
|
2015-11-11 00:03:18 +00:00
|
|
|
c.logger.Printf("[INFO] client: using state directory %v", c.config.StateDir)
|
2015-09-24 21:29:53 +00:00
|
|
|
|
2017-04-29 22:43:23 +00:00
|
|
|
// Create or open the state database
|
|
|
|
db, err := bolt.Open(filepath.Join(c.config.StateDir, "state.db"), 0600, nil)
|
|
|
|
if err != nil {
|
2017-05-11 20:08:08 +00:00
|
|
|
return fmt.Errorf("failed to create state database: %v", err)
|
2017-04-29 22:43:23 +00:00
|
|
|
}
|
|
|
|
c.stateDB = db
|
|
|
|
|
2015-09-13 19:14:12 +00:00
|
|
|
// Ensure the alloc dir exists if we have one
|
|
|
|
if c.config.AllocDir != "" {
|
2017-05-25 21:44:13 +00:00
|
|
|
if err := os.MkdirAll(c.config.AllocDir, 0711); err != nil {
|
2015-09-13 19:14:12 +00:00
|
|
|
return fmt.Errorf("failed creating alloc dir: %s", err)
|
|
|
|
}
|
2015-09-26 01:12:11 +00:00
|
|
|
} else {
|
|
|
|
// Othewise make a temp directory to use.
|
|
|
|
p, err := ioutil.TempDir("", "NomadClient")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating temporary directory for the AllocDir: %v", err)
|
|
|
|
}
|
2016-10-11 22:49:43 +00:00
|
|
|
|
|
|
|
p, err = filepath.EvalSymlinks(p)
|
|
|
|
if err != nil {
|
2016-10-11 23:16:06 +00:00
|
|
|
return fmt.Errorf("failed to find temporary directory for the AllocDir: %v", err)
|
2016-10-11 22:49:43 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 21:21:13 +00:00
|
|
|
// Change the permissions to have the execute bit
|
2017-05-25 21:44:13 +00:00
|
|
|
if err := os.Chmod(p, 0711); err != nil {
|
2017-03-20 21:21:13 +00:00
|
|
|
return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-09-26 01:12:11 +00:00
|
|
|
c.config.AllocDir = p
|
2015-09-12 18:47:44 +00:00
|
|
|
}
|
2015-09-23 05:00:24 +00:00
|
|
|
|
2015-09-25 23:46:21 +00:00
|
|
|
c.logger.Printf("[INFO] client: using alloc directory %v", c.config.AllocDir)
|
2015-09-12 18:47:44 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-23 23:53:15 +00:00
|
|
|
// Leave is used to prepare the client to leave the cluster
|
|
|
|
func (c *Client) Leave() error {
|
|
|
|
// TODO
|
|
|
|
return nil
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 10:45:09 +00:00
|
|
|
// Datacenter returns the datacenter for the given client
|
|
|
|
func (c *Client) Datacenter() string {
|
2016-06-02 06:30:59 +00:00
|
|
|
c.configLock.RLock()
|
|
|
|
dc := c.configCopy.Node.Datacenter
|
|
|
|
c.configLock.RUnlock()
|
|
|
|
return dc
|
2016-05-27 10:45:09 +00:00
|
|
|
}
|
|
|
|
|
2016-05-23 18:09:31 +00:00
|
|
|
// Region returns the region for the given client
|
|
|
|
func (c *Client) Region() string {
|
|
|
|
return c.config.Region
|
|
|
|
}
|
|
|
|
|
2016-06-11 03:26:15 +00:00
|
|
|
// RPCMajorVersion returns the structs.ApiMajorVersion supported by the
|
2016-06-02 07:11:21 +00:00
|
|
|
// client.
|
2016-06-11 03:26:15 +00:00
|
|
|
func (c *Client) RPCMajorVersion() int {
|
2016-05-28 01:14:34 +00:00
|
|
|
return structs.ApiMajorVersion
|
|
|
|
}
|
|
|
|
|
2016-06-11 03:26:15 +00:00
|
|
|
// RPCMinorVersion returns the structs.ApiMinorVersion supported by the
|
2016-06-02 07:11:21 +00:00
|
|
|
// client.
|
2016-06-11 03:26:15 +00:00
|
|
|
func (c *Client) RPCMinorVersion() int {
|
2016-05-28 01:14:34 +00:00
|
|
|
return structs.ApiMinorVersion
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
// Shutdown is used to tear down the client
|
|
|
|
func (c *Client) Shutdown() error {
|
2015-08-20 23:07:26 +00:00
|
|
|
c.logger.Printf("[INFO] client: shutting down")
|
2015-08-20 22:25:09 +00:00
|
|
|
c.shutdownLock.Lock()
|
|
|
|
defer c.shutdownLock.Unlock()
|
|
|
|
|
|
|
|
if c.shutdown {
|
|
|
|
return nil
|
|
|
|
}
|
2015-10-04 20:36:03 +00:00
|
|
|
|
2017-04-29 22:43:23 +00:00
|
|
|
// Defer closing the database
|
|
|
|
defer func() {
|
|
|
|
if err := c.stateDB.Close(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to close state database on shutdown: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// Stop renewing tokens and secrets
|
|
|
|
if c.vaultClient != nil {
|
|
|
|
c.vaultClient.Stop()
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
// Stop Garbage collector
|
|
|
|
c.garbageCollector.Stop()
|
|
|
|
|
2015-10-04 20:36:03 +00:00
|
|
|
// Destroy all the running allocations.
|
|
|
|
if c.config.DevMode {
|
2017-01-04 01:10:15 +00:00
|
|
|
for _, ar := range c.getAllocRunners() {
|
2015-10-04 20:36:03 +00:00
|
|
|
ar.Destroy()
|
|
|
|
<-ar.WaitCh()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 22:25:09 +00:00
|
|
|
c.shutdown = true
|
|
|
|
close(c.shutdownCh)
|
2015-08-21 00:49:04 +00:00
|
|
|
c.connPool.Shutdown()
|
2017-05-09 17:50:24 +00:00
|
|
|
return c.saveState()
|
2015-08-20 22:25:09 +00:00
|
|
|
}
|
2015-08-20 23:07:26 +00:00
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// RPC is used to forward an RPC call to a nomad server, or fail if no servers.
|
2015-08-20 23:07:26 +00:00
|
|
|
func (c *Client) RPC(method string, args interface{}, reply interface{}) error {
|
2016-05-23 18:09:31 +00:00
|
|
|
// Invoke the RPCHandler if it exists
|
2015-08-20 23:07:26 +00:00
|
|
|
if c.config.RPCHandler != nil {
|
|
|
|
return c.config.RPCHandler.RPC(method, args, reply)
|
|
|
|
}
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
servers := c.servers.all()
|
|
|
|
if len(servers) == 0 {
|
2016-09-26 22:12:35 +00:00
|
|
|
return noServersErr
|
2015-08-20 23:07:26 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
var mErr multierror.Error
|
|
|
|
for _, s := range servers {
|
|
|
|
// Make the RPC request
|
|
|
|
if err := c.connPool.RPC(c.Region(), s.addr, c.RPCMajorVersion(), method, args, reply); err != nil {
|
|
|
|
errmsg := fmt.Errorf("RPC failed to server %s: %v", s.addr, err)
|
|
|
|
mErr.Errors = append(mErr.Errors, errmsg)
|
|
|
|
c.logger.Printf("[DEBUG] client: %v", errmsg)
|
|
|
|
c.servers.failed(s)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c.servers.good(s)
|
|
|
|
return nil
|
2016-01-29 03:08:28 +00:00
|
|
|
}
|
2016-09-22 00:06:52 +00:00
|
|
|
|
|
|
|
return mErr.ErrorOrNil()
|
2015-09-25 01:51:17 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:07:26 +00:00
|
|
|
// Stats is used to return statistics for debugging and insight
|
|
|
|
// for various sub-systems
|
|
|
|
func (c *Client) Stats() map[string]map[string]string {
|
2016-06-10 02:27:02 +00:00
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
2015-08-20 23:07:26 +00:00
|
|
|
stats := map[string]map[string]string{
|
2015-08-31 00:24:12 +00:00
|
|
|
"client": map[string]string{
|
2016-01-14 23:42:30 +00:00
|
|
|
"node_id": c.Node().ID,
|
2016-09-26 22:40:26 +00:00
|
|
|
"known_servers": c.servers.all().String(),
|
2017-05-11 00:39:45 +00:00
|
|
|
"num_allocations": strconv.Itoa(c.NumAllocs()),
|
2015-09-22 22:29:30 +00:00
|
|
|
"last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat)),
|
|
|
|
"heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL),
|
2015-08-20 23:07:26 +00:00
|
|
|
},
|
|
|
|
"runtime": nomad.RuntimeStats(),
|
|
|
|
}
|
|
|
|
return stats
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
// CollectAllocation garbage collects a single allocation
|
|
|
|
func (c *Client) CollectAllocation(allocID string) error {
|
2016-12-20 01:53:11 +00:00
|
|
|
return c.garbageCollector.Collect(allocID)
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CollectAllAllocs garbage collects all allocations on a node in the terminal
|
|
|
|
// state
|
|
|
|
func (c *Client) CollectAllAllocs() error {
|
2016-12-20 01:53:11 +00:00
|
|
|
return c.garbageCollector.CollectAll()
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// Node returns the locally registered node
|
|
|
|
func (c *Client) Node() *structs.Node {
|
2016-02-19 07:02:28 +00:00
|
|
|
c.configLock.RLock()
|
|
|
|
defer c.configLock.RUnlock()
|
2015-08-20 23:41:29 +00:00
|
|
|
return c.config.Node
|
|
|
|
}
|
|
|
|
|
2016-05-09 19:24:03 +00:00
|
|
|
// StatsReporter exposes the various APIs related resource usage of a Nomad
|
|
|
|
// client
|
2016-05-09 15:55:19 +00:00
|
|
|
func (c *Client) StatsReporter() ClientStatsReporter {
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2016-06-12 16:32:38 +00:00
|
|
|
func (c *Client) GetAllocStats(allocID string) (AllocStatsReporter, error) {
|
2016-06-12 03:15:50 +00:00
|
|
|
c.allocLock.RLock()
|
2016-06-12 16:32:38 +00:00
|
|
|
defer c.allocLock.RUnlock()
|
2016-06-12 03:15:50 +00:00
|
|
|
ar, ok := c.allocs[allocID]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unknown allocation ID %q", allocID)
|
|
|
|
}
|
2016-06-12 16:32:38 +00:00
|
|
|
return ar.StatsReporter(), nil
|
2016-05-09 15:55:19 +00:00
|
|
|
}
|
|
|
|
|
2016-05-09 19:24:03 +00:00
|
|
|
// HostStats returns all the stats related to a Nomad client
|
2016-06-12 03:15:50 +00:00
|
|
|
func (c *Client) LatestHostStats() *stats.HostStats {
|
2016-12-12 06:58:28 +00:00
|
|
|
return c.hostStatsCollector.Stats()
|
2016-04-29 18:06:19 +00:00
|
|
|
}
|
|
|
|
|
2016-01-14 21:45:48 +00:00
|
|
|
// GetAllocFS returns the AllocFS interface for the alloc dir of an allocation
|
2016-01-14 21:35:42 +00:00
|
|
|
func (c *Client) GetAllocFS(allocID string) (allocdir.AllocDirFS, error) {
|
2016-06-02 07:52:49 +00:00
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
|
|
|
|
2016-01-12 23:25:51 +00:00
|
|
|
ar, ok := c.allocs[allocID]
|
|
|
|
if !ok {
|
2017-03-31 22:57:10 +00:00
|
|
|
return nil, fmt.Errorf("unknown allocation ID %q", allocID)
|
2016-01-12 23:25:51 +00:00
|
|
|
}
|
2016-10-03 16:59:57 +00:00
|
|
|
return ar.GetAllocDir(), nil
|
2016-01-13 05:28:07 +00:00
|
|
|
}
|
|
|
|
|
2017-03-31 22:57:10 +00:00
|
|
|
// GetClientAlloc returns the allocation from the client
|
|
|
|
func (c *Client) GetClientAlloc(allocID string) (*structs.Allocation, error) {
|
|
|
|
all := c.allAllocs()
|
|
|
|
alloc, ok := all[allocID]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unknown allocation ID %q", allocID)
|
|
|
|
}
|
|
|
|
return alloc, nil
|
|
|
|
}
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// GetServers returns the list of nomad servers this client is aware of.
|
|
|
|
func (c *Client) GetServers() []string {
|
|
|
|
endpoints := c.servers.all()
|
|
|
|
res := make([]string, len(endpoints))
|
|
|
|
for i := range endpoints {
|
|
|
|
res[i] = endpoints[i].addr.String()
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetServers sets a new list of nomad servers to connect to. As long as one
|
|
|
|
// server is resolvable no error is returned.
|
|
|
|
func (c *Client) SetServers(servers []string) error {
|
|
|
|
endpoints := make([]*endpoint, 0, len(servers))
|
|
|
|
var merr multierror.Error
|
|
|
|
for _, s := range servers {
|
|
|
|
addr, err := resolveServer(s)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[DEBUG] client: ignoring server %s due to resolution error: %v", s, err)
|
|
|
|
merr.Errors = append(merr.Errors, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Valid endpoint, append it without a priority as this API
|
|
|
|
// doesn't support different priorities for different servers
|
|
|
|
endpoints = append(endpoints, &endpoint{name: s, addr: addr})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only return errors if no servers are valid
|
|
|
|
if len(endpoints) == 0 {
|
|
|
|
if len(merr.Errors) > 0 {
|
|
|
|
return merr.ErrorOrNil()
|
|
|
|
}
|
2016-09-26 22:12:35 +00:00
|
|
|
return noServersErr
|
2016-09-22 00:06:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.servers.set(endpoints)
|
|
|
|
return nil
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 21:12:26 +00:00
|
|
|
// restoreState is used to restore our state from the data dir
|
|
|
|
func (c *Client) restoreState() error {
|
|
|
|
if c.config.DevMode {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-02 20:31:56 +00:00
|
|
|
// COMPAT: Remove in 0.7.0
|
|
|
|
// 0.6.0 transistioned from individual state files to a single bolt-db.
|
|
|
|
// The upgrade path is to:
|
|
|
|
// Check if old state exists
|
|
|
|
// If so, restore from that and delete old state
|
|
|
|
// Restore using state database
|
|
|
|
|
|
|
|
// Allocs holds the IDs of the allocations being restored
|
|
|
|
var allocs []string
|
|
|
|
|
|
|
|
// Upgrading tracks whether this is a pre 0.6.0 upgrade path
|
|
|
|
var upgrading bool
|
2017-04-29 22:43:23 +00:00
|
|
|
|
2015-08-30 01:16:49 +00:00
|
|
|
// Scan the directory
|
2017-05-02 20:31:56 +00:00
|
|
|
allocDir := filepath.Join(c.config.StateDir, "alloc")
|
|
|
|
list, err := ioutil.ReadDir(allocDir)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2015-08-30 01:16:49 +00:00
|
|
|
return fmt.Errorf("failed to list alloc state: %v", err)
|
2017-05-02 20:31:56 +00:00
|
|
|
} else if err == nil && len(list) != 0 {
|
|
|
|
upgrading = true
|
|
|
|
for _, entry := range list {
|
|
|
|
allocs = append(allocs, entry.Name())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Normal path
|
|
|
|
err := c.stateDB.View(func(tx *bolt.Tx) error {
|
|
|
|
allocs, err = getAllAllocationIDs(tx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to list allocations: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load each alloc back
|
|
|
|
var mErr multierror.Error
|
2017-05-02 20:31:56 +00:00
|
|
|
for _, id := range allocs {
|
2015-08-30 01:16:49 +00:00
|
|
|
alloc := &structs.Allocation{ID: id}
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.RLock()
|
2017-04-29 22:43:23 +00:00
|
|
|
ar := NewAllocRunner(c.logger, c.configCopy, c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService)
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.RUnlock()
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2016-06-02 07:52:49 +00:00
|
|
|
c.allocLock.Lock()
|
2015-08-30 01:16:49 +00:00
|
|
|
c.allocs[id] = ar
|
2016-06-02 07:52:49 +00:00
|
|
|
c.allocLock.Unlock()
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2015-08-30 01:16:49 +00:00
|
|
|
if err := ar.RestoreState(); err != nil {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to restore state for alloc %q: %v", id, err)
|
2015-08-30 01:16:49 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
} else {
|
|
|
|
go ar.Run()
|
2017-05-09 17:50:24 +00:00
|
|
|
|
|
|
|
if upgrading {
|
|
|
|
if err := ar.SaveState(); err != nil {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[WARN] client: initial save state for alloc %q failed: %v", id, err)
|
2017-05-09 17:50:24 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-02 20:31:56 +00:00
|
|
|
|
|
|
|
// Delete all the entries
|
|
|
|
if upgrading {
|
|
|
|
if err := os.RemoveAll(allocDir); err != nil {
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-30 01:16:49 +00:00
|
|
|
return mErr.ErrorOrNil()
|
2015-08-23 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 17:50:24 +00:00
|
|
|
// saveState is used to snapshot our state into the data dir.
|
|
|
|
func (c *Client) saveState() error {
|
2015-08-23 21:12:26 +00:00
|
|
|
if c.config.DevMode {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-01 23:16:53 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
var l sync.Mutex
|
|
|
|
var mErr multierror.Error
|
|
|
|
runners := c.getAllocRunners()
|
|
|
|
wg.Add(len(runners))
|
|
|
|
|
2017-05-02 20:31:56 +00:00
|
|
|
for id, ar := range runners {
|
|
|
|
go func(id string, ar *AllocRunner) {
|
|
|
|
err := ar.SaveState()
|
2017-05-01 23:16:53 +00:00
|
|
|
if err != nil {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to save state for alloc %q: %v", id, err)
|
2017-05-01 23:16:53 +00:00
|
|
|
l.Lock()
|
|
|
|
multierror.Append(&mErr, err)
|
|
|
|
l.Unlock()
|
2017-05-01 22:06:18 +00:00
|
|
|
}
|
2017-05-01 23:16:53 +00:00
|
|
|
wg.Done()
|
2017-05-02 20:31:56 +00:00
|
|
|
}(id, ar)
|
2015-08-30 01:16:49 +00:00
|
|
|
}
|
2017-05-01 22:06:18 +00:00
|
|
|
|
2017-05-09 17:50:24 +00:00
|
|
|
wg.Wait()
|
|
|
|
return mErr.ErrorOrNil()
|
2015-08-23 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
2016-02-20 03:51:55 +00:00
|
|
|
// getAllocRunners returns a snapshot of the current set of alloc runners.
|
|
|
|
func (c *Client) getAllocRunners() map[string]*AllocRunner {
|
|
|
|
c.allocLock.RLock()
|
|
|
|
defer c.allocLock.RUnlock()
|
|
|
|
runners := make(map[string]*AllocRunner, len(c.allocs))
|
|
|
|
for id, ar := range c.allocs {
|
|
|
|
runners[id] = ar
|
|
|
|
}
|
|
|
|
return runners
|
|
|
|
}
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
// NumAllocs returns the number of allocs this client has. Used to
|
|
|
|
// fulfill the AllocCounter interface for the GC.
|
|
|
|
func (c *Client) NumAllocs() int {
|
|
|
|
c.allocLock.RLock()
|
|
|
|
n := len(c.allocs)
|
2017-05-31 21:05:47 +00:00
|
|
|
c.allocLock.RUnlock()
|
|
|
|
|
|
|
|
c.blockedAllocsLock.RLock()
|
2017-05-12 23:03:22 +00:00
|
|
|
n += len(c.blockedAllocations)
|
2017-05-31 21:05:47 +00:00
|
|
|
c.blockedAllocsLock.RUnlock()
|
|
|
|
|
|
|
|
c.migratingAllocsLock.RLock()
|
2017-05-12 23:03:22 +00:00
|
|
|
n += len(c.migratingAllocs)
|
2017-05-31 21:05:47 +00:00
|
|
|
c.migratingAllocsLock.RUnlock()
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-08-11 18:43:45 +00:00
|
|
|
// nodeID restores, or generates if necessary, a unique node ID and SecretID.
|
|
|
|
// The node ID is, if available, a persistent unique ID. The secret ID is a
|
|
|
|
// high-entropy random UUID.
|
|
|
|
func (c *Client) nodeID() (id, secret string, err error) {
|
|
|
|
var hostID string
|
|
|
|
hostInfo, err := host.Info()
|
2017-04-10 18:44:51 +00:00
|
|
|
if !c.config.NoHostUUID && err == nil {
|
|
|
|
if hashed, ok := helper.HashUUID(hostInfo.HostID); ok {
|
|
|
|
hostID = hashed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hostID == "" {
|
2016-08-11 18:43:45 +00:00
|
|
|
// Generate a random hostID if no constant ID is available on
|
|
|
|
// this platform.
|
|
|
|
hostID = structs.GenerateUUID()
|
|
|
|
}
|
|
|
|
|
2015-09-22 17:31:47 +00:00
|
|
|
// Do not persist in dev mode
|
|
|
|
if c.config.DevMode {
|
2017-02-02 06:20:52 +00:00
|
|
|
return hostID, structs.GenerateUUID(), nil
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to read existing ID
|
2016-08-16 06:11:57 +00:00
|
|
|
idPath := filepath.Join(c.config.StateDir, "client-id")
|
|
|
|
idBuf, err := ioutil.ReadFile(idPath)
|
2015-09-22 17:31:47 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2016-08-16 06:11:57 +00:00
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to read existing secret ID
|
|
|
|
secretPath := filepath.Join(c.config.StateDir, "secret-id")
|
|
|
|
secretBuf, err := ioutil.ReadFile(secretPath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return "", "", err
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Use existing ID if any
|
2016-08-19 02:01:24 +00:00
|
|
|
if len(idBuf) != 0 {
|
2017-02-07 00:20:17 +00:00
|
|
|
id = strings.ToLower(string(idBuf))
|
2016-08-19 02:01:24 +00:00
|
|
|
} else {
|
2016-08-11 18:43:45 +00:00
|
|
|
id = hostID
|
2016-08-19 02:01:24 +00:00
|
|
|
|
|
|
|
// Persist the ID
|
|
|
|
if err := ioutil.WriteFile(idPath, []byte(id), 0700); err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 02:01:24 +00:00
|
|
|
if len(secretBuf) != 0 {
|
|
|
|
secret = string(secretBuf)
|
|
|
|
} else {
|
|
|
|
// Generate new ID
|
|
|
|
secret = structs.GenerateUUID()
|
2015-09-22 17:31:47 +00:00
|
|
|
|
2016-08-19 02:01:24 +00:00
|
|
|
// Persist the ID
|
|
|
|
if err := ioutil.WriteFile(secretPath, []byte(secret), 0700); err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
}
|
2016-08-19 02:01:24 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
return id, secret, nil
|
2015-09-22 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// setupNode is used to setup the initial node
|
|
|
|
func (c *Client) setupNode() error {
|
|
|
|
node := c.config.Node
|
|
|
|
if node == nil {
|
|
|
|
node = &structs.Node{}
|
|
|
|
c.config.Node = node
|
|
|
|
}
|
2016-08-11 18:43:45 +00:00
|
|
|
// Generate an ID and secret for the node
|
2016-08-16 06:11:57 +00:00
|
|
|
id, secretID, err := c.nodeID()
|
2016-01-14 20:57:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node ID setup failed: %v", err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
|
|
|
|
node.ID = id
|
|
|
|
node.SecretID = secretID
|
2015-08-20 23:41:29 +00:00
|
|
|
if node.Attributes == nil {
|
|
|
|
node.Attributes = make(map[string]string)
|
|
|
|
}
|
|
|
|
if node.Links == nil {
|
|
|
|
node.Links = make(map[string]string)
|
|
|
|
}
|
|
|
|
if node.Meta == nil {
|
|
|
|
node.Meta = make(map[string]string)
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
if node.Resources == nil {
|
|
|
|
node.Resources = &structs.Resources{}
|
|
|
|
}
|
2016-03-14 02:05:41 +00:00
|
|
|
if node.Reserved == nil {
|
|
|
|
node.Reserved = &structs.Resources{}
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
if node.Datacenter == "" {
|
|
|
|
node.Datacenter = "dc1"
|
|
|
|
}
|
|
|
|
if node.Name == "" {
|
|
|
|
node.Name, _ = os.Hostname()
|
|
|
|
}
|
|
|
|
if node.Name == "" {
|
|
|
|
node.Name = node.ID
|
|
|
|
}
|
|
|
|
node.Status = structs.NodeStatusInit
|
2015-08-20 23:41:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-14 02:05:41 +00:00
|
|
|
// reservePorts is used to reserve ports on the fingerprinted network devices.
|
|
|
|
func (c *Client) reservePorts() {
|
2016-09-02 00:23:15 +00:00
|
|
|
c.configLock.RLock()
|
|
|
|
defer c.configLock.RUnlock()
|
2016-03-14 02:05:41 +00:00
|
|
|
global := c.config.GloballyReservedPorts
|
|
|
|
if len(global) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
node := c.config.Node
|
|
|
|
networks := node.Resources.Networks
|
|
|
|
reservedIndex := make(map[string]*structs.NetworkResource, len(networks))
|
|
|
|
for _, resNet := range node.Reserved.Networks {
|
|
|
|
reservedIndex[resNet.IP] = resNet
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through each network device and reserve ports on it.
|
|
|
|
for _, net := range networks {
|
|
|
|
res, ok := reservedIndex[net.IP]
|
|
|
|
if !ok {
|
|
|
|
res = net.Copy()
|
2016-04-07 22:47:02 +00:00
|
|
|
res.MBits = 0
|
2016-03-14 02:05:41 +00:00
|
|
|
reservedIndex[net.IP] = res
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, portVal := range global {
|
|
|
|
p := structs.Port{Value: portVal}
|
|
|
|
res.ReservedPorts = append(res.ReservedPorts, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the reserved networks.
|
|
|
|
if node.Reserved == nil {
|
|
|
|
node.Reserved = new(structs.Resources)
|
|
|
|
} else {
|
|
|
|
node.Reserved.Networks = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the reserved networks
|
|
|
|
for _, net := range reservedIndex {
|
|
|
|
node.Reserved.Networks = append(node.Reserved.Networks, net)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
// fingerprint is used to fingerprint the client and setup the node
|
|
|
|
func (c *Client) fingerprint() error {
|
2015-11-24 15:18:49 +00:00
|
|
|
whitelist := c.config.ReadStringListToMap("fingerprint.whitelist")
|
|
|
|
whitelistEnabled := len(whitelist) > 0
|
2016-11-08 17:29:44 +00:00
|
|
|
blacklist := c.config.ReadStringListToMap("fingerprint.blacklist")
|
|
|
|
|
2016-07-09 07:12:53 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: built-in fingerprints: %v", fingerprint.BuiltinFingerprints())
|
2015-11-24 15:18:49 +00:00
|
|
|
|
2015-08-20 23:41:29 +00:00
|
|
|
var applied []string
|
2015-11-24 15:18:49 +00:00
|
|
|
var skipped []string
|
2016-07-09 06:37:14 +00:00
|
|
|
for _, name := range fingerprint.BuiltinFingerprints() {
|
2015-11-24 15:18:49 +00:00
|
|
|
// Skip modules that are not in the whitelist if it is enabled.
|
|
|
|
if _, ok := whitelist[name]; whitelistEnabled && !ok {
|
|
|
|
skipped = append(skipped, name)
|
|
|
|
continue
|
|
|
|
}
|
2016-11-09 10:50:16 +00:00
|
|
|
// Skip modules that are in the blacklist
|
|
|
|
if _, ok := blacklist[name]; ok {
|
2016-11-08 17:29:44 +00:00
|
|
|
skipped = append(skipped, name)
|
|
|
|
continue
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
f, err := fingerprint.NewFingerprint(name, c.logger)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-10 21:44:53 +00:00
|
|
|
|
|
|
|
c.configLock.Lock()
|
|
|
|
applies, err := f.Fingerprint(c.config, c.config.Node)
|
|
|
|
c.configLock.Unlock()
|
2015-08-20 23:41:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if applies {
|
|
|
|
applied = append(applied, name)
|
|
|
|
}
|
2015-11-05 21:41:41 +00:00
|
|
|
p, period := f.Periodic()
|
|
|
|
if p {
|
|
|
|
// TODO: If more periodic fingerprinters are added, then
|
|
|
|
// fingerprintPeriodic should be used to handle all the periodic
|
|
|
|
// fingerprinters by using a priority queue.
|
|
|
|
go c.fingerprintPeriodic(name, f, period)
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
}
|
|
|
|
c.logger.Printf("[DEBUG] client: applied fingerprints %v", applied)
|
2015-11-24 15:18:49 +00:00
|
|
|
if len(skipped) != 0 {
|
2016-11-08 17:29:44 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: fingerprint modules skipped due to white/blacklist: %v", skipped)
|
2015-11-24 15:18:49 +00:00
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-08-20 23:53:43 +00:00
|
|
|
|
2015-11-06 02:47:16 +00:00
|
|
|
// fingerprintPeriodic runs a fingerprinter at the specified duration.
|
2015-11-05 21:41:41 +00:00
|
|
|
func (c *Client) fingerprintPeriodic(name string, f fingerprint.Fingerprint, d time.Duration) {
|
2016-05-27 10:51:22 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: fingerprinting %v every %v", name, d)
|
2015-11-05 21:41:41 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(d):
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.Lock()
|
|
|
|
if _, err := f.Fingerprint(c.config, c.config.Node); err != nil {
|
2015-11-06 02:47:16 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: periodic fingerprinting for %v failed: %v", name, err)
|
2015-11-05 21:41:41 +00:00
|
|
|
}
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.Unlock()
|
2015-11-05 21:41:41 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
// setupDrivers is used to find the available drivers
|
|
|
|
func (c *Client) setupDrivers() error {
|
2016-11-08 17:30:07 +00:00
|
|
|
// Build the white/blacklists of drivers.
|
2015-11-24 15:18:49 +00:00
|
|
|
whitelist := c.config.ReadStringListToMap("driver.whitelist")
|
2015-11-20 00:39:21 +00:00
|
|
|
whitelistEnabled := len(whitelist) > 0
|
2016-11-08 17:30:07 +00:00
|
|
|
blacklist := c.config.ReadStringListToMap("driver.blacklist")
|
2015-11-20 00:39:21 +00:00
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
var avail []string
|
2015-11-20 22:07:35 +00:00
|
|
|
var skipped []string
|
2017-05-23 00:46:40 +00:00
|
|
|
driverCtx := driver.NewDriverContext("", "", c.config, c.config.Node, c.logger, nil)
|
2015-08-20 23:53:43 +00:00
|
|
|
for name := range driver.BuiltinDrivers {
|
2015-11-20 00:39:21 +00:00
|
|
|
// Skip fingerprinting drivers that are not in the whitelist if it is
|
|
|
|
// enabled.
|
|
|
|
if _, ok := whitelist[name]; whitelistEnabled && !ok {
|
2015-11-20 22:07:35 +00:00
|
|
|
skipped = append(skipped, name)
|
2015-11-20 00:39:21 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-11-09 10:50:16 +00:00
|
|
|
// Skip fingerprinting drivers that are in the blacklist
|
|
|
|
if _, ok := blacklist[name]; ok {
|
2016-11-08 17:30:07 +00:00
|
|
|
skipped = append(skipped, name)
|
|
|
|
continue
|
|
|
|
}
|
2015-11-20 00:39:21 +00:00
|
|
|
|
2015-09-10 01:06:23 +00:00
|
|
|
d, err := driver.NewDriver(name, driverCtx)
|
2015-08-20 23:53:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.Lock()
|
|
|
|
applies, err := d.Fingerprint(c.config, c.config.Node)
|
|
|
|
c.configLock.Unlock()
|
2015-08-20 23:53:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if applies {
|
|
|
|
avail = append(avail, name)
|
|
|
|
}
|
2016-03-31 22:15:00 +00:00
|
|
|
|
|
|
|
p, period := d.Periodic()
|
|
|
|
if p {
|
|
|
|
go c.fingerprintPeriodic(name, d, period)
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
}
|
2015-11-20 00:39:21 +00:00
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: available drivers %v", avail)
|
2015-11-20 00:39:21 +00:00
|
|
|
|
2015-11-20 22:07:35 +00:00
|
|
|
if len(skipped) != 0 {
|
2016-11-08 17:30:07 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: drivers skipped due to white/blacklist: %v", skipped)
|
2015-11-20 00:39:21 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
|
2015-08-24 00:40:14 +00:00
|
|
|
// retryIntv calculates a retry interval value given the base
|
|
|
|
func (c *Client) retryIntv(base time.Duration) time.Duration {
|
|
|
|
if c.config.DevMode {
|
|
|
|
return devModeRetryIntv
|
|
|
|
}
|
2016-05-03 07:15:29 +00:00
|
|
|
return base + lib.RandomStagger(base)
|
2015-08-24 00:40:14 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
// registerAndHeartbeat is a long lived goroutine used to register the client
|
|
|
|
// and then start heartbeatng to the server.
|
|
|
|
func (c *Client) registerAndHeartbeat() {
|
|
|
|
// Register the node
|
2016-02-03 20:07:09 +00:00
|
|
|
c.retryRegisterNode()
|
|
|
|
|
2016-04-01 18:29:44 +00:00
|
|
|
// Start watching changes for node changes
|
|
|
|
go c.watchNodeUpdates()
|
|
|
|
|
2015-09-21 00:02:12 +00:00
|
|
|
// Setup the heartbeat timer, for the initial registration
|
|
|
|
// we want to do this quickly. We want to do it extra quickly
|
|
|
|
// in development mode.
|
|
|
|
var heartbeat <-chan time.Time
|
|
|
|
if c.config.DevMode {
|
|
|
|
heartbeat = time.After(0)
|
|
|
|
} else {
|
2016-05-03 07:15:29 +00:00
|
|
|
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
|
2015-09-21 00:02:12 +00:00
|
|
|
}
|
2015-08-23 01:16:05 +00:00
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
for {
|
|
|
|
select {
|
2016-09-26 22:20:43 +00:00
|
|
|
case <-c.serversDiscoveredCh:
|
2016-02-17 19:32:17 +00:00
|
|
|
case <-heartbeat:
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
|
|
|
|
if err := c.updateNodeStatus(); err != nil {
|
|
|
|
// The servers have changed such that this node has not been
|
|
|
|
// registered before
|
|
|
|
if strings.Contains(err.Error(), "node not found") {
|
|
|
|
// Re-register the node
|
|
|
|
c.logger.Printf("[INFO] client: re-registering node")
|
|
|
|
c.retryRegisterNode()
|
|
|
|
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
|
|
|
|
} else {
|
|
|
|
intv := c.retryIntv(registerRetryIntv)
|
|
|
|
c.logger.Printf("[ERR] client: heartbeating failed. Retrying in %v: %v", intv, err)
|
|
|
|
heartbeat = time.After(intv)
|
|
|
|
|
2016-09-26 22:06:57 +00:00
|
|
|
// if heartbeating fails, trigger Consul discovery
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
heartbeat = time.After(c.heartbeatTTL)
|
|
|
|
c.heartbeatLock.Unlock()
|
|
|
|
}
|
2016-02-17 19:32:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// periodicSnapshot is a long lived goroutine used to periodically snapshot the
|
|
|
|
// state of the client
|
|
|
|
func (c *Client) periodicSnapshot() {
|
2015-08-31 00:19:20 +00:00
|
|
|
// Create a snapshot timer
|
|
|
|
snapshot := time.After(stateSnapshotIntv)
|
|
|
|
|
2015-08-24 00:40:14 +00:00
|
|
|
for {
|
|
|
|
select {
|
2015-08-31 00:19:20 +00:00
|
|
|
case <-snapshot:
|
|
|
|
snapshot = time.After(stateSnapshotIntv)
|
2017-05-09 17:50:24 +00:00
|
|
|
if err := c.saveState(); err != nil {
|
2015-08-31 00:19:20 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to save state: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-17 19:32:17 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run is a long lived goroutine used to run the client
|
|
|
|
func (c *Client) run() {
|
|
|
|
// Watch for changes in allocations
|
2016-02-19 04:43:48 +00:00
|
|
|
allocUpdates := make(chan *allocUpdates, 8)
|
2016-02-17 19:32:17 +00:00
|
|
|
go c.watchAllocations(allocUpdates)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2016-02-01 21:57:35 +00:00
|
|
|
case update := <-allocUpdates:
|
|
|
|
c.runAllocs(update)
|
2015-08-24 00:40:14 +00:00
|
|
|
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-04 07:17:53 +00:00
|
|
|
// hasNodeChanged calculates a hash for the node attributes- and meta map.
|
|
|
|
// The new hash values are compared against the old (passed-in) hash values to
|
|
|
|
// determine if the node properties have changed. It returns the new hash values
|
|
|
|
// in case they are different from the old hash values.
|
2016-02-03 20:07:09 +00:00
|
|
|
func (c *Client) hasNodeChanged(oldAttrHash uint64, oldMetaHash uint64) (bool, uint64, uint64) {
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.RLock()
|
|
|
|
defer c.configLock.RUnlock()
|
2016-02-03 20:07:09 +00:00
|
|
|
newAttrHash, err := hashstructure.Hash(c.config.Node.Attributes, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
|
|
|
|
}
|
|
|
|
// Calculate node meta map hash
|
|
|
|
newMetaHash, err := hashstructure.Hash(c.config.Node.Meta, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
|
|
|
|
}
|
|
|
|
if newAttrHash != oldAttrHash || newMetaHash != oldMetaHash {
|
|
|
|
return true, newAttrHash, newMetaHash
|
|
|
|
}
|
2016-02-04 07:17:53 +00:00
|
|
|
return false, oldAttrHash, oldMetaHash
|
2016-02-03 20:07:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// retryRegisterNode is used to register the node or update the registration and
|
|
|
|
// retry in case of failure.
|
|
|
|
func (c *Client) retryRegisterNode() {
|
|
|
|
for {
|
2016-09-22 00:06:52 +00:00
|
|
|
err := c.registerNode()
|
|
|
|
if err == nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
// Registered!
|
2016-09-22 00:06:52 +00:00
|
|
|
return
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
|
2016-09-26 22:12:35 +00:00
|
|
|
if err == noServersErr {
|
2016-09-22 00:06:52 +00:00
|
|
|
c.logger.Print("[DEBUG] client: registration waiting on servers")
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
2016-08-16 06:11:57 +00:00
|
|
|
} else {
|
2016-09-22 00:06:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: registration failure: %v", err)
|
2016-02-03 20:07:09 +00:00
|
|
|
}
|
|
|
|
select {
|
2016-09-26 22:20:43 +00:00
|
|
|
case <-c.serversDiscoveredCh:
|
2016-02-03 20:07:09 +00:00
|
|
|
case <-time.After(c.retryIntv(registerRetryIntv)):
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
// registerNode is used to register the node or update the registration
|
|
|
|
func (c *Client) registerNode() error {
|
|
|
|
node := c.Node()
|
|
|
|
req := structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
2016-05-23 18:09:31 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: c.Region()},
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
2016-08-08 23:57:21 +00:00
|
|
|
if err := c.RPC("Node.Register", &req, &resp); err != nil {
|
2015-08-21 00:49:04 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-02-19 07:02:28 +00:00
|
|
|
|
|
|
|
// Update the node status to ready after we register.
|
|
|
|
c.configLock.Lock()
|
|
|
|
node.Status = structs.NodeStatusReady
|
|
|
|
c.configLock.Unlock()
|
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
c.logger.Printf("[INFO] client: node registration complete")
|
2015-08-21 00:49:04 +00:00
|
|
|
if len(resp.EvalIDs) != 0 {
|
|
|
|
c.logger.Printf("[DEBUG] client: %d evaluations triggered by node registration", len(resp.EvalIDs))
|
|
|
|
}
|
2016-02-10 06:43:16 +00:00
|
|
|
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
2015-08-23 01:16:05 +00:00
|
|
|
c.lastHeartbeat = time.Now()
|
|
|
|
c.heartbeatTTL = resp.HeartbeatTTL
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateNodeStatus is used to heartbeat and update the status of the node
|
|
|
|
func (c *Client) updateNodeStatus() error {
|
2016-09-22 00:06:52 +00:00
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
|
|
|
|
2015-08-23 01:16:05 +00:00
|
|
|
node := c.Node()
|
|
|
|
req := structs.NodeUpdateStatusRequest{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.NodeStatusReady,
|
2016-05-23 18:09:31 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: c.Region()},
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
2016-08-08 23:57:21 +00:00
|
|
|
if err := c.RPC("Node.UpdateStatus", &req, &resp); err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
2016-08-10 22:17:32 +00:00
|
|
|
return fmt.Errorf("failed to update status: %v", err)
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
|
|
|
if len(resp.EvalIDs) != 0 {
|
|
|
|
c.logger.Printf("[DEBUG] client: %d evaluations triggered by node update", len(resp.EvalIDs))
|
|
|
|
}
|
|
|
|
if resp.Index != 0 {
|
2015-08-24 00:40:14 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: state updated to %s", req.Status)
|
2015-08-23 01:16:05 +00:00
|
|
|
}
|
2016-02-10 06:43:16 +00:00
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// Update heartbeat time and ttl
|
2015-08-23 01:16:05 +00:00
|
|
|
c.lastHeartbeat = time.Now()
|
|
|
|
c.heartbeatTTL = resp.HeartbeatTTL
|
2016-05-23 18:09:31 +00:00
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// Convert []*NodeServerInfo to []*endpoints
|
|
|
|
localdc := c.Datacenter()
|
|
|
|
servers := make(endpoints, 0, len(resp.Servers))
|
|
|
|
for _, s := range resp.Servers {
|
|
|
|
addr, err := resolveServer(s.RPCAdvertiseAddr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
e := endpoint{name: s.RPCAdvertiseAddr, addr: addr}
|
|
|
|
if s.Datacenter != localdc {
|
|
|
|
// server is non-local; de-prioritize
|
|
|
|
e.priority = 1
|
|
|
|
}
|
|
|
|
servers = append(servers, &e)
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
2016-09-22 00:06:52 +00:00
|
|
|
if len(servers) == 0 {
|
|
|
|
return fmt.Errorf("server returned no valid servers")
|
|
|
|
}
|
|
|
|
c.servers.set(servers)
|
2016-05-23 18:09:31 +00:00
|
|
|
|
2016-06-10 02:27:02 +00:00
|
|
|
// Begin polling Consul if there is no Nomad leader. We could be
|
|
|
|
// heartbeating to a Nomad server that is in the minority of a
|
|
|
|
// partition of the Nomad server quorum, but this Nomad Agent still
|
|
|
|
// has connectivity to the existing majority of Nomad Servers, but
|
|
|
|
// only if it queries Consul.
|
|
|
|
if resp.LeaderRPCAddr == "" {
|
2016-09-24 00:02:48 +00:00
|
|
|
c.triggerDiscovery()
|
2016-06-10 02:27:02 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-08-23 02:31:22 +00:00
|
|
|
|
2015-08-29 21:22:24 +00:00
|
|
|
// updateAllocStatus is used to update the status of an allocation
|
2016-02-22 03:20:50 +00:00
|
|
|
func (c *Client) updateAllocStatus(alloc *structs.Allocation) {
|
2016-11-21 02:21:39 +00:00
|
|
|
// If this alloc was blocking another alloc and transitioned to a
|
|
|
|
// terminal state then start the blocked allocation
|
2017-05-31 21:05:47 +00:00
|
|
|
if alloc.Terminated() {
|
|
|
|
c.blockedAllocsLock.Lock()
|
|
|
|
blockedAlloc, ok := c.blockedAllocations[alloc.ID]
|
|
|
|
if ok {
|
|
|
|
var prevAllocDir *allocdir.AllocDir
|
|
|
|
if ar, ok := c.getAllocRunners()[alloc.ID]; ok {
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg != nil && tg.EphemeralDisk != nil && tg.EphemeralDisk.Sticky {
|
|
|
|
prevAllocDir = ar.GetAllocDir()
|
|
|
|
}
|
2016-11-21 02:21:39 +00:00
|
|
|
}
|
2017-05-31 21:05:47 +00:00
|
|
|
|
|
|
|
delete(c.blockedAllocations, blockedAlloc.PreviousAllocation)
|
|
|
|
c.blockedAllocsLock.Unlock()
|
|
|
|
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: unblocking alloc %q because alloc %q terminated", blockedAlloc.ID, alloc.ID)
|
2017-07-05 23:15:19 +00:00
|
|
|
|
2017-05-31 21:05:47 +00:00
|
|
|
// Need to call addAlloc without holding the lock
|
|
|
|
if err := c.addAlloc(blockedAlloc, prevAllocDir); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to add alloc which was previously blocked %q: %v",
|
|
|
|
blockedAlloc.ID, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c.blockedAllocsLock.Unlock()
|
2016-11-21 02:21:39 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 21:05:47 +00:00
|
|
|
// Mark the allocation for GC if it is in terminal state
|
2016-12-12 06:33:12 +00:00
|
|
|
if ar, ok := c.getAllocRunners()[alloc.ID]; ok {
|
|
|
|
if err := c.garbageCollector.MarkForCollection(ar); err != nil {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: couldn't add alloc %q for GC: %v", alloc.ID, err)
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-21 02:21:39 +00:00
|
|
|
// Strip all the information that can be reconstructed at the server. Only
|
|
|
|
// send the fields that are updatable by the client.
|
2016-02-22 03:20:50 +00:00
|
|
|
stripped := new(structs.Allocation)
|
|
|
|
stripped.ID = alloc.ID
|
2016-02-23 06:43:55 +00:00
|
|
|
stripped.NodeID = c.Node().ID
|
2016-02-22 03:20:50 +00:00
|
|
|
stripped.TaskStates = alloc.TaskStates
|
|
|
|
stripped.ClientStatus = alloc.ClientStatus
|
|
|
|
stripped.ClientDescription = alloc.ClientDescription
|
2016-11-21 02:21:39 +00:00
|
|
|
|
2016-02-22 05:32:32 +00:00
|
|
|
select {
|
|
|
|
case c.allocUpdates <- stripped:
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
}
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// allocSync is a long lived function that batches allocation updates to the
|
|
|
|
// server.
|
|
|
|
func (c *Client) allocSync() {
|
2016-02-22 05:32:32 +00:00
|
|
|
staggered := false
|
|
|
|
syncTicker := time.NewTicker(allocSyncIntv)
|
2016-02-22 03:20:50 +00:00
|
|
|
updates := make(map[string]*structs.Allocation)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
2016-02-22 05:32:32 +00:00
|
|
|
syncTicker.Stop()
|
2016-02-22 03:20:50 +00:00
|
|
|
return
|
|
|
|
case alloc := <-c.allocUpdates:
|
|
|
|
// Batch the allocation updates until the timer triggers.
|
|
|
|
updates[alloc.ID] = alloc
|
2016-02-22 05:32:32 +00:00
|
|
|
case <-syncTicker.C:
|
2016-02-22 03:20:50 +00:00
|
|
|
// Fast path if there are no updates
|
|
|
|
if len(updates) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2016-02-20 05:44:23 +00:00
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
sync := make([]*structs.Allocation, 0, len(updates))
|
|
|
|
for _, alloc := range updates {
|
|
|
|
sync = append(sync, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send to server.
|
|
|
|
args := structs.AllocUpdateRequest{
|
|
|
|
Alloc: sync,
|
2016-05-23 18:09:31 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: c.Region()},
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := c.RPC("Node.UpdateAlloc", &args, &resp); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to update allocations: %v", err)
|
2016-02-22 05:32:32 +00:00
|
|
|
syncTicker.Stop()
|
|
|
|
syncTicker = time.NewTicker(c.retryIntv(allocSyncRetryIntv))
|
|
|
|
staggered = true
|
2016-02-22 03:20:50 +00:00
|
|
|
} else {
|
|
|
|
updates = make(map[string]*structs.Allocation)
|
2016-02-22 05:32:32 +00:00
|
|
|
if staggered {
|
|
|
|
syncTicker.Stop()
|
|
|
|
syncTicker = time.NewTicker(allocSyncIntv)
|
|
|
|
staggered = false
|
|
|
|
}
|
2016-02-22 03:20:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-29 21:22:24 +00:00
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// allocUpdates holds the results of receiving updated allocations from the
|
|
|
|
// servers.
|
|
|
|
type allocUpdates struct {
|
|
|
|
// pulled is the set of allocations that were downloaded from the servers.
|
|
|
|
pulled map[string]*structs.Allocation
|
|
|
|
|
|
|
|
// filtered is the set of allocations that were not pulled because their
|
|
|
|
// AllocModifyIndex didn't change.
|
|
|
|
filtered map[string]struct{}
|
|
|
|
}
|
|
|
|
|
2015-08-23 02:31:22 +00:00
|
|
|
// watchAllocations is used to scan for updates to allocations
|
2016-02-01 21:57:35 +00:00
|
|
|
func (c *Client) watchAllocations(updates chan *allocUpdates) {
|
|
|
|
// The request and response for getting the map of allocations that should
|
|
|
|
// be running on the Node to their AllocModifyIndex which is incremented
|
|
|
|
// when the allocation is updated by the servers.
|
2016-08-16 06:11:57 +00:00
|
|
|
n := c.Node()
|
2015-08-23 02:31:22 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2016-08-16 06:11:57 +00:00
|
|
|
NodeID: n.ID,
|
|
|
|
SecretID: n.SecretID,
|
2015-08-23 02:31:22 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2016-05-23 18:09:31 +00:00
|
|
|
Region: c.Region(),
|
2015-08-24 00:40:14 +00:00
|
|
|
AllowStale: true,
|
2015-08-23 02:31:22 +00:00
|
|
|
},
|
|
|
|
}
|
2016-02-01 21:57:35 +00:00
|
|
|
var resp structs.NodeClientAllocsResponse
|
|
|
|
|
|
|
|
// The request and response for pulling down the set of allocations that are
|
|
|
|
// new, or updated server side.
|
|
|
|
allocsReq := structs.AllocsGetRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2016-05-23 18:09:31 +00:00
|
|
|
Region: c.Region(),
|
2016-02-01 21:57:35 +00:00
|
|
|
AllowStale: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var allocsResp structs.AllocsGetResponse
|
2015-08-23 02:31:22 +00:00
|
|
|
|
2017-01-21 00:30:40 +00:00
|
|
|
OUTER:
|
2015-08-23 02:31:22 +00:00
|
|
|
for {
|
2016-02-01 21:57:35 +00:00
|
|
|
// Get the allocation modify index map, blocking for updates. We will
|
|
|
|
// use this to determine exactly what allocations need to be downloaded
|
|
|
|
// in full.
|
|
|
|
resp = structs.NodeClientAllocsResponse{}
|
|
|
|
err := c.RPC("Node.GetClientAllocs", &req, &resp)
|
2015-08-23 02:31:22 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
// Shutdown often causes EOF errors, so check for shutdown first
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-11-08 00:52:08 +00:00
|
|
|
// COMPAT: Remove in 0.6. This is to allow the case in which the
|
|
|
|
// servers are not fully upgraded before the clients register. This
|
|
|
|
// can cause the SecretID to be lost
|
|
|
|
if strings.Contains(err.Error(), "node secret ID does not match") {
|
|
|
|
c.logger.Printf("[DEBUG] client: re-registering node as there was a secret ID mismatch: %v", err)
|
|
|
|
c.retryRegisterNode()
|
|
|
|
} else if err != noServersErr {
|
2016-09-22 00:06:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to query for node allocations: %v", err)
|
|
|
|
}
|
2015-08-24 00:40:14 +00:00
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
2015-08-23 02:31:22 +00:00
|
|
|
select {
|
2016-09-26 22:20:43 +00:00
|
|
|
case <-c.serversDiscoveredCh:
|
2016-09-22 00:06:52 +00:00
|
|
|
continue
|
2015-08-23 02:31:22 +00:00
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for shutdown
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Filter all allocations whose AllocModifyIndex was not incremented.
|
|
|
|
// These are the allocations who have either not been updated, or whose
|
|
|
|
// updates are a result of the client sending an update for the alloc.
|
|
|
|
// This lets us reduce the network traffic to the server as we don't
|
|
|
|
// need to pull all the allocations.
|
|
|
|
var pull []string
|
|
|
|
filtered := make(map[string]struct{})
|
2016-02-20 03:51:55 +00:00
|
|
|
runners := c.getAllocRunners()
|
2017-01-21 00:30:40 +00:00
|
|
|
var pullIndex uint64
|
2016-02-01 21:57:35 +00:00
|
|
|
for allocID, modifyIndex := range resp.Allocs {
|
|
|
|
// Pull the allocation if we don't have an alloc runner for the
|
|
|
|
// allocation or if the alloc runner requires an updated allocation.
|
2016-02-20 03:51:55 +00:00
|
|
|
runner, ok := runners[allocID]
|
2017-01-21 00:30:40 +00:00
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
if !ok || runner.shouldUpdate(modifyIndex) {
|
2017-01-21 00:30:40 +00:00
|
|
|
// Only pull allocs that are required. Filtered
|
|
|
|
// allocs might be at a higher index, so ignore
|
|
|
|
// it.
|
|
|
|
if modifyIndex > pullIndex {
|
|
|
|
pullIndex = modifyIndex
|
|
|
|
}
|
2016-02-01 21:57:35 +00:00
|
|
|
pull = append(pull, allocID)
|
2016-02-01 23:43:43 +00:00
|
|
|
} else {
|
|
|
|
filtered[allocID] = struct{}{}
|
2016-02-01 21:57:35 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-20 03:51:55 +00:00
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Pull the allocations that passed filtering.
|
|
|
|
allocsResp.Allocs = nil
|
2017-01-10 21:25:52 +00:00
|
|
|
var pulledAllocs map[string]*structs.Allocation
|
2016-02-01 21:57:35 +00:00
|
|
|
if len(pull) != 0 {
|
|
|
|
// Pull the allocations that need to be updated.
|
|
|
|
allocsReq.AllocIDs = pull
|
2017-01-21 00:30:40 +00:00
|
|
|
allocsReq.MinQueryIndex = pullIndex - 1
|
2016-02-01 21:57:35 +00:00
|
|
|
allocsResp = structs.AllocsGetResponse{}
|
|
|
|
if err := c.RPC("Alloc.GetAllocs", &allocsReq, &allocsResp); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to query updated allocations: %v", err)
|
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
|
|
|
select {
|
2016-09-26 22:20:43 +00:00
|
|
|
case <-c.serversDiscoveredCh:
|
2016-09-24 00:02:48 +00:00
|
|
|
continue
|
2016-02-01 21:57:35 +00:00
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-10 21:25:52 +00:00
|
|
|
// Ensure that we received all the allocations we wanted
|
|
|
|
pulledAllocs = make(map[string]*structs.Allocation, len(allocsResp.Allocs))
|
|
|
|
for _, alloc := range allocsResp.Allocs {
|
|
|
|
pulledAllocs[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, desiredID := range pull {
|
|
|
|
if _, ok := pulledAllocs[desiredID]; !ok {
|
|
|
|
// We didn't get everything we wanted. Do not update the
|
|
|
|
// MinQueryIndex, sleep and then retry.
|
2017-01-11 21:24:23 +00:00
|
|
|
wait := c.retryIntv(2 * time.Second)
|
2017-01-10 21:25:52 +00:00
|
|
|
select {
|
2017-01-11 21:24:23 +00:00
|
|
|
case <-time.After(wait):
|
2017-01-10 21:25:52 +00:00
|
|
|
// Wait for the server we contact to receive the
|
|
|
|
// allocations
|
2017-01-21 00:30:40 +00:00
|
|
|
continue OUTER
|
2017-01-10 21:25:52 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Check for shutdown
|
|
|
|
select {
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-10 21:25:52 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: updated allocations at index %d (total %d) (pulled %d) (filtered %d)",
|
|
|
|
resp.Index, len(resp.Allocs), len(allocsResp.Allocs), len(filtered))
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Update the query index.
|
2016-03-11 00:18:20 +00:00
|
|
|
if resp.Index > req.MinQueryIndex {
|
|
|
|
req.MinQueryIndex = resp.Index
|
2015-08-23 02:31:22 +00:00
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Push the updates.
|
|
|
|
update := &allocUpdates{
|
|
|
|
filtered: filtered,
|
2017-01-10 21:25:52 +00:00
|
|
|
pulled: pulledAllocs,
|
2016-02-01 21:57:35 +00:00
|
|
|
}
|
2015-08-23 02:31:22 +00:00
|
|
|
select {
|
2016-02-01 21:57:35 +00:00
|
|
|
case updates <- update:
|
2015-08-23 02:31:22 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
// watchNodeUpdates periodically checks for changes to the node attributes or meta map
|
|
|
|
func (c *Client) watchNodeUpdates() {
|
|
|
|
c.logger.Printf("[DEBUG] client: periodically checking for node changes at duration %v", nodeUpdateRetryIntv)
|
2016-07-21 22:22:02 +00:00
|
|
|
|
|
|
|
// Initialize the hashes
|
|
|
|
_, attrHash, metaHash := c.hasNodeChanged(0, 0)
|
2016-02-03 20:07:09 +00:00
|
|
|
var changed bool
|
|
|
|
for {
|
|
|
|
select {
|
2016-04-01 18:29:44 +00:00
|
|
|
case <-time.After(c.retryIntv(nodeUpdateRetryIntv)):
|
2016-02-03 20:07:09 +00:00
|
|
|
changed, attrHash, metaHash = c.hasNodeChanged(attrHash, metaHash)
|
|
|
|
if changed {
|
|
|
|
c.logger.Printf("[DEBUG] client: state changed, updating node.")
|
2016-02-10 22:09:23 +00:00
|
|
|
|
|
|
|
// Update the config copy.
|
|
|
|
c.configLock.Lock()
|
|
|
|
node := c.config.Node.Copy()
|
|
|
|
c.configCopy.Node = node
|
|
|
|
c.configLock.Unlock()
|
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
c.retryRegisterNode()
|
|
|
|
}
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-23 02:31:22 +00:00
|
|
|
// runAllocs is invoked when we get an updated set of allocations
|
2016-02-01 21:57:35 +00:00
|
|
|
func (c *Client) runAllocs(update *allocUpdates) {
|
2015-08-23 21:54:52 +00:00
|
|
|
// Get the existing allocs
|
2015-08-23 22:06:47 +00:00
|
|
|
c.allocLock.RLock()
|
2015-08-29 21:33:30 +00:00
|
|
|
exist := make([]*structs.Allocation, 0, len(c.allocs))
|
2015-08-30 01:16:49 +00:00
|
|
|
for _, ar := range c.allocs {
|
2016-02-10 21:44:53 +00:00
|
|
|
exist = append(exist, ar.alloc)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
2015-08-23 22:06:47 +00:00
|
|
|
c.allocLock.RUnlock()
|
2015-08-23 21:54:52 +00:00
|
|
|
|
|
|
|
// Diff the existing and updated allocations
|
2016-02-01 21:57:35 +00:00
|
|
|
diff := diffAllocs(exist, update)
|
2015-08-23 21:54:52 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: %#v", diff)
|
|
|
|
|
|
|
|
// Remove the old allocations
|
|
|
|
for _, remove := range diff.removed {
|
|
|
|
if err := c.removeAlloc(remove); err != nil {
|
2017-05-02 20:31:56 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to remove alloc '%s': %v", remove.ID, err)
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the existing allocations
|
|
|
|
for _, update := range diff.updated {
|
|
|
|
if err := c.updateAlloc(update.exist, update.updated); err != nil {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to update alloc %q: %v",
|
2015-08-23 21:54:52 +00:00
|
|
|
update.exist.ID, err)
|
|
|
|
}
|
2016-10-03 16:59:57 +00:00
|
|
|
|
|
|
|
// See if the updated alloc is getting migrated
|
2017-05-31 21:05:47 +00:00
|
|
|
c.migratingAllocsLock.RLock()
|
2016-10-03 16:59:57 +00:00
|
|
|
ch, ok := c.migratingAllocs[update.updated.ID]
|
2017-05-31 21:05:47 +00:00
|
|
|
c.migratingAllocsLock.RUnlock()
|
2016-10-03 16:59:57 +00:00
|
|
|
if ok {
|
|
|
|
// Stopping the migration if the allocation doesn't need any
|
|
|
|
// migration
|
|
|
|
if !update.updated.ShouldMigrate() {
|
2017-01-23 18:35:00 +00:00
|
|
|
ch.closeCh()
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-23 21:54:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start the new allocations
|
|
|
|
for _, add := range diff.added {
|
2016-10-03 16:59:57 +00:00
|
|
|
// If the allocation is chained and the previous allocation hasn't
|
2016-08-22 16:34:24 +00:00
|
|
|
// terminated yet, then add the alloc to the blocked queue.
|
2016-11-29 15:37:43 +00:00
|
|
|
c.blockedAllocsLock.Lock()
|
2016-10-03 16:59:57 +00:00
|
|
|
ar, ok := c.getAllocRunners()[add.PreviousAllocation]
|
|
|
|
if ok && !ar.Alloc().Terminated() {
|
2016-11-29 15:37:43 +00:00
|
|
|
// Check if the alloc is already present in the blocked allocations
|
|
|
|
// map
|
|
|
|
if _, ok := c.blockedAllocations[add.PreviousAllocation]; !ok {
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: added alloc %q to blocked queue for previous alloc %q",
|
2017-07-05 23:15:19 +00:00
|
|
|
add.ID, add.PreviousAllocation)
|
2016-11-29 15:37:43 +00:00
|
|
|
c.blockedAllocations[add.PreviousAllocation] = add
|
|
|
|
}
|
2016-08-22 16:34:24 +00:00
|
|
|
c.blockedAllocsLock.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
2016-11-29 15:37:43 +00:00
|
|
|
c.blockedAllocsLock.Unlock()
|
2016-08-22 16:34:24 +00:00
|
|
|
|
2016-10-03 16:59:57 +00:00
|
|
|
// This means the allocation has a previous allocation on another node
|
|
|
|
// so we will block for the previous allocation to complete
|
|
|
|
if add.PreviousAllocation != "" && !ok {
|
2016-11-29 15:37:43 +00:00
|
|
|
// Ensure that we are not blocking for the remote allocation if we
|
|
|
|
// have already blocked
|
2016-10-03 16:59:57 +00:00
|
|
|
c.migratingAllocsLock.Lock()
|
2016-11-29 15:37:43 +00:00
|
|
|
if _, ok := c.migratingAllocs[add.ID]; !ok {
|
2016-12-13 20:34:23 +00:00
|
|
|
// Check that we don't have an alloc runner already. This
|
|
|
|
// prevents a race between a finishing blockForRemoteAlloc and
|
|
|
|
// another invocation of runAllocs
|
|
|
|
if _, ok := c.getAllocRunners()[add.PreviousAllocation]; !ok {
|
2017-02-17 02:28:11 +00:00
|
|
|
c.migratingAllocs[add.ID] = newMigrateAllocCtrl(add)
|
2016-12-13 20:34:23 +00:00
|
|
|
go c.blockForRemoteAlloc(add)
|
|
|
|
}
|
2016-11-29 15:37:43 +00:00
|
|
|
}
|
2016-10-03 16:59:57 +00:00
|
|
|
c.migratingAllocsLock.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting the previous allocdir if the allocation had a terminal
|
|
|
|
// previous allocation
|
|
|
|
var prevAllocDir *allocdir.AllocDir
|
|
|
|
tg := add.Job.LookupTaskGroup(add.TaskGroup)
|
2016-11-21 02:21:39 +00:00
|
|
|
if tg != nil && tg.EphemeralDisk != nil && tg.EphemeralDisk.Sticky && ar != nil {
|
2016-10-03 16:59:57 +00:00
|
|
|
prevAllocDir = ar.GetAllocDir()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.addAlloc(add, prevAllocDir); err != nil {
|
2015-08-23 21:54:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: failed to add alloc '%s': %v",
|
|
|
|
add.ID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:59:57 +00:00
|
|
|
// blockForRemoteAlloc blocks until the previous allocation of an allocation has
|
|
|
|
// been terminated and migrates the snapshot data
|
|
|
|
func (c *Client) blockForRemoteAlloc(alloc *structs.Allocation) {
|
|
|
|
// Removing the allocation from the set of allocs which are currently
|
|
|
|
// undergoing migration
|
|
|
|
defer func() {
|
|
|
|
c.migratingAllocsLock.Lock()
|
|
|
|
delete(c.migratingAllocs, alloc.ID)
|
|
|
|
c.migratingAllocsLock.Unlock()
|
|
|
|
}()
|
|
|
|
|
2016-10-27 22:39:12 +00:00
|
|
|
// prevAllocDir is the allocation directory of the previous allocation
|
|
|
|
var prevAllocDir *allocdir.AllocDir
|
|
|
|
|
2016-10-27 19:04:55 +00:00
|
|
|
// If the allocation is not sticky then we won't wait for the previous
|
|
|
|
// allocation to be terminal
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
c.logger.Printf("[ERR] client: task group %q not found in job %q", tg.Name, alloc.Job.ID)
|
2016-10-27 22:39:12 +00:00
|
|
|
goto ADDALLOC
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 19:04:55 +00:00
|
|
|
// Wait for the remote previous alloc to be terminal if the alloc is sticky
|
2016-12-13 21:06:33 +00:00
|
|
|
if tg.EphemeralDisk != nil && tg.EphemeralDisk.Sticky && tg.EphemeralDisk.Migrate {
|
2016-10-27 22:39:12 +00:00
|
|
|
c.logger.Printf("[DEBUG] client: blocking alloc %q for previous allocation %q", alloc.ID, alloc.PreviousAllocation)
|
2016-10-27 19:04:55 +00:00
|
|
|
// Block until the previous allocation migrates to terminal state
|
2016-12-13 21:06:33 +00:00
|
|
|
stopCh := c.migratingAllocs[alloc.ID]
|
|
|
|
prevAlloc, err := c.waitForAllocTerminal(alloc.PreviousAllocation, stopCh)
|
2016-10-27 19:04:55 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error waiting for allocation %q: %v",
|
|
|
|
alloc.PreviousAllocation, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Migrate the data from the remote node
|
|
|
|
prevAllocDir, err = c.migrateRemoteAllocDir(prevAlloc, alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error migrating data from remote alloc %q: %v",
|
|
|
|
alloc.PreviousAllocation, err)
|
|
|
|
}
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 22:39:12 +00:00
|
|
|
ADDALLOC:
|
2016-10-03 16:59:57 +00:00
|
|
|
// Add the allocation
|
|
|
|
if err := c.addAlloc(alloc, prevAllocDir); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error adding alloc: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForAllocTerminal waits for an allocation with the given alloc id to
|
|
|
|
// transition to terminal state and blocks the caller until then.
|
2017-01-23 18:35:00 +00:00
|
|
|
func (c *Client) waitForAllocTerminal(allocID string, stopCh *migrateAllocCtrl) (*structs.Allocation, error) {
|
2016-10-03 16:59:57 +00:00
|
|
|
req := structs.AllocSpecificRequest{
|
|
|
|
AllocID: allocID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: c.Region(),
|
|
|
|
AllowStale: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp := structs.SingleAllocResponse{}
|
|
|
|
err := c.RPC("Alloc.GetAlloc", &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to query allocation %q: %v", allocID, err)
|
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
|
|
|
select {
|
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
2017-01-23 18:35:00 +00:00
|
|
|
case <-stopCh.ch:
|
2017-07-06 17:24:52 +00:00
|
|
|
return nil, fmt.Errorf("giving up waiting on alloc %q since migration is not needed", allocID)
|
2016-10-03 16:59:57 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return nil, fmt.Errorf("aborting because client is shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if resp.Alloc == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if resp.Alloc.Terminated() {
|
|
|
|
return resp.Alloc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the query index.
|
|
|
|
if resp.Index > req.MinQueryIndex {
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// migrateRemoteAllocDir migrates the allocation directory from a remote node to
|
|
|
|
// the current node
|
|
|
|
func (c *Client) migrateRemoteAllocDir(alloc *structs.Allocation, allocID string) (*allocdir.AllocDir, error) {
|
|
|
|
if alloc == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
return nil, fmt.Errorf("Task Group %q not found in job %q", tg.Name, alloc.Job.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip migration of data if the ephemeral disk is not sticky or
|
|
|
|
// migration is turned off.
|
2016-11-15 18:03:06 +00:00
|
|
|
if tg.EphemeralDisk == nil || !tg.EphemeralDisk.Sticky || !tg.EphemeralDisk.Migrate {
|
2016-10-03 16:59:57 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
node, err := c.getNode(alloc.NodeID)
|
|
|
|
|
|
|
|
// If the node is down then skip migrating the data
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error retreiving node %v: %v", alloc.NodeID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if node is nil
|
|
|
|
if node == nil {
|
|
|
|
return nil, fmt.Errorf("node %q doesn't exist", alloc.NodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip migration if the remote node is down
|
|
|
|
if node.Status == structs.NodeStatusDown {
|
|
|
|
c.logger.Printf("[INFO] client: not migrating data from alloc %q since node %q is down", alloc.ID, alloc.NodeID)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the previous alloc dir
|
|
|
|
pathToAllocDir := filepath.Join(c.config.AllocDir, alloc.ID)
|
|
|
|
if err := os.MkdirAll(pathToAllocDir, 0777); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error creating previous allocation dir: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the snapshot
|
2016-10-29 00:16:56 +00:00
|
|
|
scheme := "http"
|
|
|
|
if node.TLSEnabled {
|
|
|
|
scheme = "https"
|
|
|
|
}
|
|
|
|
// Create an API client
|
|
|
|
apiConfig := nomadapi.DefaultConfig()
|
|
|
|
apiConfig.Address = fmt.Sprintf("%s://%s", scheme, node.HTTPAddr)
|
|
|
|
apiConfig.TLSConfig = &nomadapi.TLSConfig{
|
|
|
|
CACert: c.config.TLSConfig.CAFile,
|
|
|
|
ClientCert: c.config.TLSConfig.CertFile,
|
|
|
|
ClientKey: c.config.TLSConfig.KeyFile,
|
|
|
|
}
|
|
|
|
apiClient, err := nomadapi.NewClient(apiConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
url := fmt.Sprintf("/v1/client/allocation/%v/snapshot", alloc.ID)
|
|
|
|
resp, err := apiClient.Raw().Response(url, nil)
|
2016-10-03 16:59:57 +00:00
|
|
|
if err != nil {
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2017-07-06 17:24:52 +00:00
|
|
|
c.logger.Printf("[ERR] client: error getting snapshot for alloc %q: %v", alloc.ID, err)
|
|
|
|
return nil, fmt.Errorf("error getting snapshot for alloc %q: %v", alloc.ID, err)
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
2016-10-29 00:16:56 +00:00
|
|
|
|
2016-12-05 21:52:31 +00:00
|
|
|
if err := c.unarchiveAllocDir(resp, allocID, pathToAllocDir); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there were no errors then we create the allocdir
|
2017-01-05 23:57:58 +00:00
|
|
|
prevAllocDir := allocdir.NewAllocDir(c.logger, pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
return prevAllocDir, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unarchiveAllocDir reads the stream of a compressed allocation directory and
|
|
|
|
// writes them to the disk.
|
|
|
|
func (c *Client) unarchiveAllocDir(resp io.ReadCloser, allocID string, pathToAllocDir string) error {
|
2016-10-29 00:16:56 +00:00
|
|
|
tr := tar.NewReader(resp)
|
|
|
|
defer resp.Close()
|
2016-10-03 16:59:57 +00:00
|
|
|
|
|
|
|
buf := make([]byte, 1024)
|
|
|
|
|
|
|
|
stopMigrating, ok := c.migratingAllocs[allocID]
|
|
|
|
if !ok {
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2017-02-28 00:00:19 +00:00
|
|
|
return fmt.Errorf("Allocation %q is not marked for remote migration", allocID)
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
for {
|
|
|
|
// See if the alloc still needs migration
|
|
|
|
select {
|
2017-01-23 18:35:00 +00:00
|
|
|
case <-stopMigrating.ch:
|
2016-10-03 16:59:57 +00:00
|
|
|
os.RemoveAll(pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
c.logger.Printf("[INFO] client: stopping migration of allocdir for alloc: %v", allocID)
|
|
|
|
return nil
|
2016-10-03 16:59:57 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
c.logger.Printf("[INFO] client: stopping migration of alloc %q since client is shutting down", allocID)
|
|
|
|
return nil
|
2016-10-03 16:59:57 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the next header
|
|
|
|
hdr, err := tr.Next()
|
|
|
|
|
2016-12-05 21:52:31 +00:00
|
|
|
// Snapshot has ended
|
2016-10-03 16:59:57 +00:00
|
|
|
if err == io.EOF {
|
2016-12-05 21:52:31 +00:00
|
|
|
return nil
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
// If there is an error then we avoid creating the alloc dir
|
|
|
|
if err != nil {
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
return fmt.Errorf("error creating alloc dir for alloc %q: %v", allocID, err)
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the header is for a directory we create the directory
|
|
|
|
if hdr.Typeflag == tar.TypeDir {
|
2016-12-05 21:52:31 +00:00
|
|
|
os.MkdirAll(filepath.Join(pathToAllocDir, hdr.Name), os.FileMode(hdr.Mode))
|
2016-10-03 16:59:57 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-06-03 11:15:00 +00:00
|
|
|
// If the header is for a symlink we create the symlink
|
|
|
|
if hdr.Typeflag == tar.TypeSymlink {
|
2017-06-04 06:26:22 +00:00
|
|
|
if err = os.Symlink(hdr.Linkname, filepath.Join(pathToAllocDir, hdr.Name)); err != nil {
|
2017-06-03 11:15:00 +00:00
|
|
|
c.logger.Printf("[ERR] client: error creating symlink: %v", err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2016-10-03 16:59:57 +00:00
|
|
|
// If the header is a file, we write to a file
|
|
|
|
if hdr.Typeflag == tar.TypeReg {
|
|
|
|
f, err := os.Create(filepath.Join(pathToAllocDir, hdr.Name))
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error creating file: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-12-05 21:52:31 +00:00
|
|
|
// Setting the permissions of the file as the origin.
|
|
|
|
if err := f.Chmod(os.FileMode(hdr.Mode)); err != nil {
|
|
|
|
f.Close()
|
|
|
|
c.logger.Printf("[ERR] client: error chmod-ing file %s: %v", f.Name(), err)
|
|
|
|
return fmt.Errorf("error chmoding file %v", err)
|
|
|
|
}
|
|
|
|
if err := f.Chown(hdr.Uid, hdr.Gid); err != nil {
|
|
|
|
f.Close()
|
|
|
|
c.logger.Printf("[ERR] client: error chown-ing file %s: %v", f.Name(), err)
|
|
|
|
return fmt.Errorf("error chowning file %v", err)
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:59:57 +00:00
|
|
|
// We write in chunks of 32 bytes so that we can test if
|
|
|
|
// the client is still alive
|
|
|
|
for {
|
|
|
|
if c.shutdown {
|
|
|
|
f.Close()
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
c.logger.Printf("[INFO] client: stopping migration of alloc %q because client is shutting down", allocID)
|
|
|
|
return nil
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n, err := tr.Read(buf)
|
|
|
|
if err != nil {
|
|
|
|
f.Close()
|
|
|
|
if err != io.EOF {
|
2016-12-05 21:52:31 +00:00
|
|
|
return fmt.Errorf("error reading snapshot: %v", err)
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if _, err := f.Write(buf[:n]); err != nil {
|
|
|
|
f.Close()
|
|
|
|
os.RemoveAll(pathToAllocDir)
|
2016-12-05 21:52:31 +00:00
|
|
|
return fmt.Errorf("error writing to file %q: %v", f.Name(), err)
|
2016-10-03 16:59:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getNode gets the node from the server with the given Node ID
|
|
|
|
func (c *Client) getNode(nodeID string) (*structs.Node, error) {
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
NodeID: nodeID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: c.Region(),
|
|
|
|
AllowStale: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := structs.SingleNodeResponse{}
|
|
|
|
for {
|
|
|
|
err := c.RPC("Node.GetNode", &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to query node info %q: %v", nodeID, err)
|
|
|
|
retry := c.retryIntv(getAllocRetryIntv)
|
|
|
|
select {
|
|
|
|
case <-time.After(retry):
|
|
|
|
continue
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return nil, fmt.Errorf("aborting because client is shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp.Node, nil
|
|
|
|
}
|
|
|
|
|
2015-08-23 21:54:52 +00:00
|
|
|
// removeAlloc is invoked when we should remove an allocation
|
|
|
|
func (c *Client) removeAlloc(alloc *structs.Allocation) error {
|
2015-08-29 21:33:30 +00:00
|
|
|
c.allocLock.Lock()
|
2015-08-30 01:16:49 +00:00
|
|
|
ar, ok := c.allocs[alloc.ID]
|
2015-08-23 22:06:47 +00:00
|
|
|
if !ok {
|
2016-02-20 03:51:55 +00:00
|
|
|
c.allocLock.Unlock()
|
2015-08-23 22:06:47 +00:00
|
|
|
c.logger.Printf("[WARN] client: missing context for alloc '%s'", alloc.ID)
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-29 21:33:30 +00:00
|
|
|
delete(c.allocs, alloc.ID)
|
2016-02-20 03:51:55 +00:00
|
|
|
c.allocLock.Unlock()
|
|
|
|
|
2017-03-11 00:27:00 +00:00
|
|
|
// Ensure the GC has a reference and then collect. Collecting through the GC
|
|
|
|
// applies rate limiting
|
|
|
|
c.garbageCollector.MarkForCollection(ar)
|
|
|
|
go c.garbageCollector.Collect(alloc.ID)
|
2016-12-20 19:14:22 +00:00
|
|
|
|
2015-08-23 21:54:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateAlloc is invoked when we should update an allocation
|
|
|
|
func (c *Client) updateAlloc(exist, update *structs.Allocation) error {
|
2015-08-23 22:06:47 +00:00
|
|
|
c.allocLock.RLock()
|
2015-08-30 01:16:49 +00:00
|
|
|
ar, ok := c.allocs[exist.ID]
|
2016-02-20 03:51:55 +00:00
|
|
|
c.allocLock.RUnlock()
|
2015-08-23 22:06:47 +00:00
|
|
|
if !ok {
|
|
|
|
c.logger.Printf("[WARN] client: missing context for alloc '%s'", exist.ID)
|
|
|
|
return nil
|
|
|
|
}
|
2016-02-20 03:51:55 +00:00
|
|
|
|
2015-08-30 01:16:49 +00:00
|
|
|
ar.Update(update)
|
2015-08-23 21:54:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// addAlloc is invoked when we should add an allocation
|
2016-10-03 16:59:57 +00:00
|
|
|
func (c *Client) addAlloc(alloc *structs.Allocation, prevAllocDir *allocdir.AllocDir) error {
|
2016-12-13 20:34:23 +00:00
|
|
|
// Check if we already have an alloc runner
|
2017-01-05 21:06:56 +00:00
|
|
|
c.allocLock.Lock()
|
2016-12-13 20:34:23 +00:00
|
|
|
if _, ok := c.allocs[alloc.ID]; ok {
|
|
|
|
c.logger.Printf("[DEBUG]: client: dropping duplicate add allocation request: %q", alloc.ID)
|
2017-01-05 21:18:48 +00:00
|
|
|
c.allocLock.Unlock()
|
2016-12-13 20:34:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-01-05 21:06:56 +00:00
|
|
|
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.RLock()
|
2017-04-29 22:43:23 +00:00
|
|
|
ar := NewAllocRunner(c.logger, c.configCopy, c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService)
|
2016-10-03 16:59:57 +00:00
|
|
|
ar.SetPreviousAllocDir(prevAllocDir)
|
2016-02-10 21:44:53 +00:00
|
|
|
c.configLock.RUnlock()
|
2017-05-02 20:31:56 +00:00
|
|
|
|
2017-07-05 23:15:19 +00:00
|
|
|
// Store the alloc runner.
|
|
|
|
c.allocs[alloc.ID] = ar
|
|
|
|
|
2017-05-02 20:31:56 +00:00
|
|
|
if err := ar.SaveState(); err != nil {
|
|
|
|
c.logger.Printf("[WARN] client: initial save state for alloc %q failed: %v", alloc.ID, err)
|
|
|
|
}
|
|
|
|
|
2017-07-05 23:15:19 +00:00
|
|
|
// Must release allocLock as GC acquires it to count allocs
|
|
|
|
c.allocLock.Unlock()
|
2016-02-20 03:51:55 +00:00
|
|
|
|
2017-07-05 23:15:19 +00:00
|
|
|
// Make room for the allocation before running it
|
|
|
|
if err := c.garbageCollector.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
|
|
|
c.logger.Printf("[ERR] client: error making room for allocation: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
go ar.Run()
|
2015-08-23 21:54:52 +00:00
|
|
|
return nil
|
2015-08-23 02:31:22 +00:00
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
// setupVaultClient creates an object to periodically renew tokens and secrets
|
|
|
|
// with vault.
|
|
|
|
func (c *Client) setupVaultClient() error {
|
|
|
|
var err error
|
2016-08-30 01:30:06 +00:00
|
|
|
if c.vaultClient, err =
|
|
|
|
vaultclient.NewVaultClient(c.config.VaultConfig, c.logger, c.deriveToken); err != nil {
|
2016-08-18 03:28:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-23 21:10:00 +00:00
|
|
|
if c.vaultClient == nil {
|
|
|
|
c.logger.Printf("[ERR] client: failed to create vault client")
|
|
|
|
return fmt.Errorf("failed to create vault client")
|
|
|
|
}
|
|
|
|
|
2016-09-14 20:30:01 +00:00
|
|
|
// Start renewing tokens and secrets
|
|
|
|
c.vaultClient.Start()
|
|
|
|
|
2016-08-18 03:28:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-30 01:30:06 +00:00
|
|
|
// deriveToken takes in an allocation and a set of tasks and derives vault
|
|
|
|
// tokens for each of the tasks, unwraps all of them using the supplied vault
|
|
|
|
// client and returns a map of unwrapped tokens, indexed by the task name.
|
|
|
|
func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vclient *vaultapi.Client) (map[string]string, error) {
|
2016-08-29 21:07:23 +00:00
|
|
|
if alloc == nil {
|
|
|
|
return nil, fmt.Errorf("nil allocation")
|
|
|
|
}
|
|
|
|
|
|
|
|
if taskNames == nil || len(taskNames) == 0 {
|
|
|
|
return nil, fmt.Errorf("missing task names")
|
|
|
|
}
|
|
|
|
|
|
|
|
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if group == nil {
|
|
|
|
return nil, fmt.Errorf("group name in allocation is not present in job")
|
|
|
|
}
|
|
|
|
|
|
|
|
verifiedTasks := []string{}
|
|
|
|
found := false
|
|
|
|
// Check if the given task names actually exist in the allocation
|
|
|
|
for _, taskName := range taskNames {
|
|
|
|
found = false
|
|
|
|
for _, task := range group.Tasks {
|
|
|
|
if task.Name == taskName {
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
c.logger.Printf("[ERR] task %q not found in the allocation", taskName)
|
|
|
|
return nil, fmt.Errorf("task %q not found in the allocaition", taskName)
|
|
|
|
}
|
|
|
|
verifiedTasks = append(verifiedTasks, taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeriveVaultToken of nomad server can take in a set of tasks and
|
|
|
|
// creates tokens for all the tasks.
|
|
|
|
req := &structs.DeriveVaultTokenRequest{
|
|
|
|
NodeID: c.Node().ID,
|
|
|
|
SecretID: c.Node().SecretID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
Tasks: verifiedTasks,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: c.Region(),
|
2016-12-01 19:13:36 +00:00
|
|
|
AllowStale: false,
|
2016-08-29 21:07:23 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Derive the tokens
|
|
|
|
var resp structs.DeriveVaultTokenResponse
|
|
|
|
if err := c.RPC("Node.DeriveVaultToken", &req, &resp); err != nil {
|
2016-10-23 01:20:50 +00:00
|
|
|
c.logger.Printf("[ERR] client.vault: DeriveVaultToken RPC failed: %v", err)
|
|
|
|
return nil, fmt.Errorf("DeriveVaultToken RPC failed: %v", err)
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
if resp.Error != nil {
|
|
|
|
c.logger.Printf("[ERR] client.vault: failed to derive vault tokens: %v", resp.Error)
|
|
|
|
return nil, resp.Error
|
2016-08-29 21:07:23 +00:00
|
|
|
}
|
|
|
|
if resp.Tasks == nil {
|
|
|
|
c.logger.Printf("[ERR] client.vault: failed to derive vault token: invalid response")
|
|
|
|
return nil, fmt.Errorf("failed to derive vault tokens: invalid response")
|
|
|
|
}
|
|
|
|
|
|
|
|
unwrappedTokens := make(map[string]string)
|
|
|
|
|
|
|
|
// Retrieve the wrapped tokens from the response and unwrap it
|
|
|
|
for _, taskName := range verifiedTasks {
|
|
|
|
// Get the wrapped token
|
|
|
|
wrappedToken, ok := resp.Tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
c.logger.Printf("[ERR] client.vault: wrapped token missing for task %q", taskName)
|
|
|
|
return nil, fmt.Errorf("wrapped token missing for task %q", taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unwrap the vault token
|
|
|
|
unwrapResp, err := vclient.Logical().Unwrap(wrappedToken)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to unwrap the token for task %q: %v", taskName, err)
|
|
|
|
}
|
|
|
|
if unwrapResp == nil || unwrapResp.Auth == nil || unwrapResp.Auth.ClientToken == "" {
|
|
|
|
return nil, fmt.Errorf("failed to unwrap the token for task %q", taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the unwrapped token to the return value
|
|
|
|
unwrappedTokens[taskName] = unwrapResp.Auth.ClientToken
|
|
|
|
}
|
|
|
|
|
|
|
|
return unwrappedTokens, nil
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:06:57 +00:00
|
|
|
// triggerDiscovery causes a Consul discovery to begin (if one hasn't alread)
|
2016-09-24 00:02:48 +00:00
|
|
|
func (c *Client) triggerDiscovery() {
|
|
|
|
select {
|
2016-09-26 22:52:40 +00:00
|
|
|
case c.triggerDiscoveryCh <- struct{}{}:
|
2016-09-24 00:02:48 +00:00
|
|
|
// Discovery goroutine was released to execute
|
|
|
|
default:
|
|
|
|
// Discovery goroutine was already running
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:52:40 +00:00
|
|
|
// consulDiscovery waits for the signal to attempt server discovery via Consul.
|
|
|
|
// It's intended to be started in a goroutine. See triggerDiscovery() for
|
|
|
|
// causing consul discovery from other code locations.
|
2016-09-24 00:02:48 +00:00
|
|
|
func (c *Client) consulDiscovery() {
|
|
|
|
for {
|
|
|
|
select {
|
2016-09-26 22:52:40 +00:00
|
|
|
case <-c.triggerDiscoveryCh:
|
|
|
|
if err := c.consulDiscoveryImpl(); err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
c.logger.Printf("[ERR] client.consul: error discovering nomad servers: %v", err)
|
|
|
|
}
|
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
2016-05-24 06:23:57 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-23 22:28:55 +00:00
|
|
|
|
2016-09-26 22:52:40 +00:00
|
|
|
func (c *Client) consulDiscoveryImpl() error {
|
2016-09-24 00:02:48 +00:00
|
|
|
// Acquire heartbeat lock to prevent heartbeat from running
|
|
|
|
// concurrently with discovery. Concurrent execution is safe, however
|
|
|
|
// discovery is usually triggered when heartbeating has failed so
|
|
|
|
// there's no point in allowing it.
|
|
|
|
c.heartbeatLock.Lock()
|
|
|
|
defer c.heartbeatLock.Unlock()
|
|
|
|
|
2017-02-01 00:43:57 +00:00
|
|
|
dcs, err := c.consulCatalog.Datacenters()
|
2016-09-24 00:02:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("client.consul: unable to query Consul datacenters: %v", err)
|
|
|
|
}
|
|
|
|
if len(dcs) > 2 {
|
|
|
|
// Query the local DC first, then shuffle the
|
|
|
|
// remaining DCs. Future heartbeats will cause Nomad
|
|
|
|
// Clients to fixate on their local datacenter so
|
|
|
|
// it's okay to talk with remote DCs. If the no
|
|
|
|
// Nomad servers are available within
|
|
|
|
// datacenterQueryLimit, the next heartbeat will pick
|
|
|
|
// a new set of servers so it's okay.
|
|
|
|
shuffleStrings(dcs[1:])
|
|
|
|
dcs = dcs[0:lib.MinInt(len(dcs), datacenterQueryLimit)]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query for servers in this client's region only
|
|
|
|
region := c.Region()
|
|
|
|
rpcargs := structs.GenericRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
serviceName := c.configCopy.ConsulConfig.ServerServiceName
|
|
|
|
var mErr multierror.Error
|
|
|
|
var servers endpoints
|
|
|
|
c.logger.Printf("[DEBUG] client.consul: bootstrap contacting following Consul DCs: %+q", dcs)
|
|
|
|
DISCOLOOP:
|
|
|
|
for _, dc := range dcs {
|
|
|
|
consulOpts := &consulapi.QueryOptions{
|
|
|
|
AllowStale: true,
|
|
|
|
Datacenter: dc,
|
|
|
|
Near: "_agent",
|
|
|
|
WaitTime: consul.DefaultQueryWaitDuration,
|
|
|
|
}
|
2017-02-01 00:43:57 +00:00
|
|
|
consulServices, _, err := c.consulCatalog.Service(serviceName, consul.ServiceTagRPC, consulOpts)
|
2016-05-24 06:23:57 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, fmt.Errorf("unable to query service %+q from Consul datacenter %+q: %v", serviceName, dc, err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range consulServices {
|
|
|
|
port := strconv.Itoa(s.ServicePort)
|
|
|
|
addrstr := s.ServiceAddress
|
|
|
|
if addrstr == "" {
|
|
|
|
addrstr = s.Address
|
2016-06-11 03:05:14 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(addrstr, port))
|
2016-06-11 03:05:14 +00:00
|
|
|
if err != nil {
|
2016-09-24 00:02:48 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var peers []string
|
|
|
|
if err := c.connPool.RPC(region, addr, c.RPCMajorVersion(), "Status.Peers", rpcargs, &peers); err != nil {
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
2016-03-31 04:51:50 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-06-10 02:27:02 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
// Successfully received the Server peers list of the correct
|
|
|
|
// region
|
|
|
|
for _, p := range peers {
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", p)
|
2016-06-11 03:05:14 +00:00
|
|
|
if err != nil {
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
servers = append(servers, &endpoint{name: p, addr: addr})
|
2016-06-11 03:05:14 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
if len(servers) > 0 {
|
|
|
|
break DISCOLOOP
|
2016-06-11 03:05:14 +00:00
|
|
|
}
|
2016-05-24 06:23:57 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
if len(servers) == 0 {
|
|
|
|
if len(mErr.Errors) > 0 {
|
|
|
|
return mErr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
return fmt.Errorf("no Nomad Servers advertising service %q in Consul datacenters: %+q", serviceName, dcs)
|
|
|
|
}
|
2016-06-07 15:59:17 +00:00
|
|
|
|
2016-09-24 00:02:48 +00:00
|
|
|
c.logger.Printf("[INFO] client.consul: discovered following Servers: %s", servers)
|
|
|
|
c.servers.set(servers)
|
2016-09-22 00:06:52 +00:00
|
|
|
|
2016-09-26 23:05:21 +00:00
|
|
|
// Notify waiting rpc calls. If a goroutine just failed an RPC call and
|
|
|
|
// isn't receiving on this chan yet they'll still retry eventually.
|
|
|
|
// This is a shortcircuit for the longer retry intervals.
|
2016-09-24 00:02:48 +00:00
|
|
|
for {
|
|
|
|
select {
|
2016-09-26 22:20:43 +00:00
|
|
|
case c.serversDiscoveredCh <- struct{}{}:
|
2016-09-26 23:05:21 +00:00
|
|
|
default:
|
2016-09-24 00:02:48 +00:00
|
|
|
return nil
|
2016-06-07 15:59:17 +00:00
|
|
|
}
|
2016-05-24 06:23:57 +00:00
|
|
|
}
|
2016-09-24 00:02:48 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// emitStats collects host resource usage stats periodically
|
|
|
|
func (c *Client) emitStats() {
|
2016-05-26 22:12:48 +00:00
|
|
|
// Start collecting host stats right away and then keep collecting every
|
|
|
|
// collection interval
|
|
|
|
next := time.NewTimer(0)
|
|
|
|
defer next.Stop()
|
2016-05-09 15:55:19 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next.C:
|
2016-12-12 06:58:28 +00:00
|
|
|
err := c.hostStatsCollector.Collect()
|
2016-06-03 21:23:18 +00:00
|
|
|
next.Reset(c.config.StatsCollectionInterval)
|
2016-05-09 15:55:19 +00:00
|
|
|
if err != nil {
|
2016-06-03 21:23:18 +00:00
|
|
|
c.logger.Printf("[WARN] client: error fetching host resource usage stats: %v", err)
|
2016-05-25 04:44:11 +00:00
|
|
|
continue
|
2016-05-09 15:55:19 +00:00
|
|
|
}
|
2016-06-03 21:23:18 +00:00
|
|
|
|
2016-08-02 02:49:01 +00:00
|
|
|
// Publish Node metrics if operator has opted in
|
|
|
|
if c.config.PublishNodeMetrics {
|
2017-03-09 20:37:41 +00:00
|
|
|
c.emitHostStats(c.hostStatsCollector.Stats())
|
2016-08-02 02:49:01 +00:00
|
|
|
}
|
2017-03-09 20:37:41 +00:00
|
|
|
|
|
|
|
c.emitClientMetrics()
|
2016-05-09 15:55:19 +00:00
|
|
|
case <-c.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-31 02:02:03 +00:00
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// emitHostStats pushes host resource usage stats to remote metrics collection sinks
|
|
|
|
func (c *Client) emitHostStats(hStats *stats.HostStats) {
|
2016-08-16 06:11:57 +00:00
|
|
|
nodeID := c.Node().ID
|
2016-06-11 20:09:34 +00:00
|
|
|
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "total"}, float32(hStats.Memory.Total))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "available"}, float32(hStats.Memory.Available))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "used"}, float32(hStats.Memory.Used))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "free"}, float32(hStats.Memory.Free))
|
2016-05-31 02:02:03 +00:00
|
|
|
|
2016-06-11 20:09:34 +00:00
|
|
|
metrics.SetGauge([]string{"uptime"}, float32(hStats.Uptime))
|
2016-05-31 02:02:03 +00:00
|
|
|
|
|
|
|
for _, cpu := range hStats.CPU {
|
2016-06-11 20:09:34 +00:00
|
|
|
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "total"}, float32(cpu.Total))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "user"}, float32(cpu.User))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "idle"}, float32(cpu.Idle))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "system"}, float32(cpu.System))
|
2016-05-31 02:02:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, disk := range hStats.DiskStats {
|
2016-06-11 20:09:34 +00:00
|
|
|
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "size"}, float32(disk.Size))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used"}, float32(disk.Used))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "available"}, float32(disk.Available))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used_percent"}, float32(disk.UsedPercent))
|
|
|
|
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "inodes_percent"}, float32(disk.InodesUsedPercent))
|
2016-05-31 02:02:03 +00:00
|
|
|
}
|
2017-02-17 02:28:11 +00:00
|
|
|
|
|
|
|
// Get all the resources for the node
|
|
|
|
c.configLock.RLock()
|
|
|
|
node := c.configCopy.Node
|
|
|
|
c.configLock.RUnlock()
|
|
|
|
total := node.Resources
|
|
|
|
res := node.Reserved
|
|
|
|
allocated := c.getAllocatedResources(node)
|
|
|
|
|
|
|
|
// Emit allocated
|
|
|
|
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.MemoryMB))
|
|
|
|
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.DiskMB))
|
|
|
|
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.CPU))
|
|
|
|
metrics.SetGauge([]string{"client", "allocated", "iops", nodeID}, float32(allocated.IOPS))
|
|
|
|
|
|
|
|
for _, n := range allocated.Networks {
|
|
|
|
metrics.SetGauge([]string{"client", "allocated", "network", n.Device, nodeID}, float32(n.MBits))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit unallocated
|
|
|
|
unallocatedMem := total.MemoryMB - res.MemoryMB - allocated.MemoryMB
|
|
|
|
unallocatedDisk := total.DiskMB - res.DiskMB - allocated.DiskMB
|
|
|
|
unallocatedCpu := total.CPU - res.CPU - allocated.CPU
|
|
|
|
unallocatedIops := total.IOPS - res.IOPS - allocated.IOPS
|
|
|
|
metrics.SetGauge([]string{"client", "unallocated", "memory", nodeID}, float32(unallocatedMem))
|
|
|
|
metrics.SetGauge([]string{"client", "unallocated", "disk", nodeID}, float32(unallocatedDisk))
|
|
|
|
metrics.SetGauge([]string{"client", "unallocated", "cpu", nodeID}, float32(unallocatedCpu))
|
|
|
|
metrics.SetGauge([]string{"client", "unallocated", "iops", nodeID}, float32(unallocatedIops))
|
|
|
|
|
|
|
|
for _, n := range allocated.Networks {
|
|
|
|
totalMbits := 0
|
|
|
|
|
|
|
|
totalIdx := total.NetIndex(n)
|
|
|
|
if totalIdx != -1 {
|
|
|
|
totalMbits = total.Networks[totalIdx].MBits
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
unallocatedMbits := totalMbits - n.MBits
|
|
|
|
metrics.SetGauge([]string{"client", "unallocated", "network", n.Device, nodeID}, float32(unallocatedMbits))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-09 20:37:41 +00:00
|
|
|
// emitClientMetrics emits lower volume client metrics
|
|
|
|
func (c *Client) emitClientMetrics() {
|
|
|
|
nodeID := c.Node().ID
|
|
|
|
|
|
|
|
// Emit allocation metrics
|
2017-05-31 21:05:47 +00:00
|
|
|
c.blockedAllocsLock.RLock()
|
2017-03-09 20:37:41 +00:00
|
|
|
blocked := len(c.blockedAllocations)
|
2017-05-31 21:05:47 +00:00
|
|
|
c.blockedAllocsLock.RUnlock()
|
|
|
|
|
|
|
|
c.migratingAllocsLock.RLock()
|
|
|
|
migrating := len(c.migratingAllocs)
|
|
|
|
c.migratingAllocsLock.RUnlock()
|
2017-03-09 20:37:41 +00:00
|
|
|
|
|
|
|
pending, running, terminal := 0, 0, 0
|
|
|
|
for _, ar := range c.getAllocRunners() {
|
|
|
|
switch ar.Alloc().ClientStatus {
|
|
|
|
case structs.AllocClientStatusPending:
|
|
|
|
pending++
|
|
|
|
case structs.AllocClientStatusRunning:
|
|
|
|
running++
|
|
|
|
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:
|
|
|
|
terminal++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics.SetGauge([]string{"client", "allocations", "migrating", nodeID}, float32(migrating))
|
|
|
|
metrics.SetGauge([]string{"client", "allocations", "blocked", nodeID}, float32(blocked))
|
|
|
|
metrics.SetGauge([]string{"client", "allocations", "pending", nodeID}, float32(pending))
|
|
|
|
metrics.SetGauge([]string{"client", "allocations", "running", nodeID}, float32(running))
|
|
|
|
metrics.SetGauge([]string{"client", "allocations", "terminal", nodeID}, float32(terminal))
|
|
|
|
}
|
|
|
|
|
2017-02-17 02:28:11 +00:00
|
|
|
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Resources {
|
|
|
|
// Unfortunately the allocs only have IP so we need to match them to the
|
|
|
|
// device
|
|
|
|
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
|
|
|
|
for _, n := range selfNode.Resources.Networks {
|
|
|
|
_, ipnet, err := net.ParseCIDR(n.CIDR)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cidrToDevice[ipnet] = n.Device
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sum the allocated resources
|
|
|
|
allocs := c.allAllocs()
|
|
|
|
var allocated structs.Resources
|
|
|
|
allocatedDeviceMbits := make(map[string]int)
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
if !alloc.TerminalStatus() {
|
|
|
|
allocated.Add(alloc.Resources)
|
|
|
|
for _, allocatedNetwork := range alloc.Resources.Networks {
|
|
|
|
for cidr, dev := range cidrToDevice {
|
|
|
|
ip := net.ParseIP(allocatedNetwork.IP)
|
|
|
|
if cidr.Contains(ip) {
|
|
|
|
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the networks
|
|
|
|
allocated.Networks = nil
|
|
|
|
for dev, speed := range allocatedDeviceMbits {
|
|
|
|
net := &structs.NetworkResource{
|
|
|
|
Device: dev,
|
|
|
|
MBits: speed,
|
|
|
|
}
|
|
|
|
allocated.Networks = append(allocated.Networks, net)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &allocated
|
|
|
|
}
|
|
|
|
|
|
|
|
// allAllocs returns all the allocations managed by the client
|
2017-03-31 22:57:10 +00:00
|
|
|
func (c *Client) allAllocs() map[string]*structs.Allocation {
|
|
|
|
allocs := make(map[string]*structs.Allocation, 16)
|
2017-02-17 02:28:11 +00:00
|
|
|
for _, ar := range c.getAllocRunners() {
|
2017-03-31 22:57:10 +00:00
|
|
|
a := ar.Alloc()
|
|
|
|
allocs[a.ID] = a
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
2017-05-31 21:05:47 +00:00
|
|
|
c.blockedAllocsLock.RLock()
|
2017-02-17 02:28:11 +00:00
|
|
|
for _, alloc := range c.blockedAllocations {
|
2017-03-31 22:57:10 +00:00
|
|
|
allocs[alloc.ID] = alloc
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
2017-05-31 21:05:47 +00:00
|
|
|
c.blockedAllocsLock.RUnlock()
|
2017-02-17 02:28:11 +00:00
|
|
|
|
2017-05-31 21:05:47 +00:00
|
|
|
c.migratingAllocsLock.RLock()
|
2017-02-17 02:28:11 +00:00
|
|
|
for _, ctrl := range c.migratingAllocs {
|
2017-03-31 22:57:10 +00:00
|
|
|
allocs[ctrl.alloc.ID] = ctrl.alloc
|
2017-02-17 02:28:11 +00:00
|
|
|
}
|
2017-05-31 21:05:47 +00:00
|
|
|
c.migratingAllocsLock.RUnlock()
|
2017-02-17 02:28:11 +00:00
|
|
|
return allocs
|
2016-05-31 02:02:03 +00:00
|
|
|
}
|
2016-05-23 18:09:31 +00:00
|
|
|
|
2016-09-22 00:06:52 +00:00
|
|
|
// resolveServer given a sever's address as a string, return it's resolved
|
|
|
|
// net.Addr or an error.
|
|
|
|
func resolveServer(s string) (net.Addr, error) {
|
|
|
|
const defaultClientPort = "4647" // default client RPC port
|
|
|
|
host, port, err := net.SplitHostPort(s)
|
|
|
|
if err != nil {
|
|
|
|
if strings.Contains(err.Error(), "missing port") {
|
|
|
|
host = s
|
|
|
|
port = defaultClientPort
|
|
|
|
} else {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return net.ResolveTCPAddr("tcp", net.JoinHostPort(host, port))
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|