Merge branch 'master' into docker-caps
This commit is contained in:
commit
4abd269a68
|
@ -6,6 +6,9 @@ language: go
|
||||||
go:
|
go:
|
||||||
- 1.9.x
|
- 1.9.x
|
||||||
|
|
||||||
|
addons:
|
||||||
|
chrome: stable
|
||||||
|
|
||||||
git:
|
git:
|
||||||
depth: 300
|
depth: 300
|
||||||
|
|
||||||
|
@ -28,13 +31,11 @@ matrix:
|
||||||
- os: osx
|
- os: osx
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- ui/node_modules
|
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-mac-priv.sh ; fi
|
- if [[ "$TRAVIS_OS_NAME" == "osx" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-mac-priv.sh ; fi
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-linux.sh ; fi
|
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ -z "$SKIP_NOMAD_TESTS" ]]; then sudo -E bash ./scripts/travis-linux.sh ; fi
|
||||||
|
- if [[ "$RUN_UI_TESTS" ]]; then curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version 1.0.1 ; fi
|
||||||
|
- if [[ "$RUN_UI_TESTS" ]]; then export PATH="$HOME/.yarn/bin:$PATH" ; fi
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- if [[ -z "$SKIP_NOMAD_TESTS" ]]; then make deps ; fi
|
- if [[ -z "$SKIP_NOMAD_TESTS" ]]; then make deps ; fi
|
||||||
|
|
|
@ -5,6 +5,12 @@ __BACKWARDS INCOMPATIBILITIES:__
|
||||||
that absolute URLs are not allowed, but it was not enforced. Absolute URLs
|
that absolute URLs are not allowed, but it was not enforced. Absolute URLs
|
||||||
in HTTP check paths will now fail to validate. [[GH-3685](https://github.com/hashicorp/nomad/issues/3685)]
|
in HTTP check paths will now fail to validate. [[GH-3685](https://github.com/hashicorp/nomad/issues/3685)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
* core: A set of features (Autopilot) has been added to allow for automatic operator-friendly management of Nomad servers. For more information about Autopilot, see the [Autopilot Guide](https://www.nomadproject.io/guides/cluster/autopilot.html). [[GH-3670](https://github.com/hashicorp/nomad/pull/3670)]
|
||||||
|
* discovery: Allow `check_restart` to be specified in the `service` stanza.
|
||||||
|
[[GH-3718](https://github.com/hashicorp/nomad/issues/3718)]
|
||||||
|
* driver/lxc: Add volumes config to LXC driver [GH-3687]
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
* core: Fix search endpoint forwarding for multi-region clusters [[GH-3680](https://github.com/hashicorp/nomad/issues/3680)]
|
* core: Fix search endpoint forwarding for multi-region clusters [[GH-3680](https://github.com/hashicorp/nomad/issues/3680)]
|
||||||
* core: Fix an issue in which batch jobs with queued placements and lost
|
* core: Fix an issue in which batch jobs with queued placements and lost
|
||||||
|
@ -665,7 +671,7 @@ BUG FIXES:
|
||||||
* client: Killing an allocation doesn't cause allocation stats to block
|
* client: Killing an allocation doesn't cause allocation stats to block
|
||||||
[[GH-1454](https://github.com/hashicorp/nomad/issues/1454)]
|
[[GH-1454](https://github.com/hashicorp/nomad/issues/1454)]
|
||||||
* driver/docker: Disable swap on docker driver [[GH-1480](https://github.com/hashicorp/nomad/issues/1480)]
|
* driver/docker: Disable swap on docker driver [[GH-1480](https://github.com/hashicorp/nomad/issues/1480)]
|
||||||
* driver/docker: Fix improper gating on privileged mode [[GH-1506](https://github.com/hashicorp/nomad/issues/1506)]
|
* driver/docker: Fix improper gating on priviledged mode [[GH-1506](https://github.com/hashicorp/nomad/issues/1506)]
|
||||||
* driver/docker: Default network type is "nat" on Windows [[GH-1521](https://github.com/hashicorp/nomad/issues/1521)]
|
* driver/docker: Default network type is "nat" on Windows [[GH-1521](https://github.com/hashicorp/nomad/issues/1521)]
|
||||||
* driver/docker: Cleanup created volume when destroying container [[GH-1519](https://github.com/hashicorp/nomad/issues/1519)]
|
* driver/docker: Cleanup created volume when destroying container [[GH-1519](https://github.com/hashicorp/nomad/issues/1519)]
|
||||||
* driver/rkt: Set host environment variables [[GH-1581](https://github.com/hashicorp/nomad/issues/1581)]
|
* driver/rkt: Set host environment variables [[GH-1581](https://github.com/hashicorp/nomad/issues/1581)]
|
||||||
|
|
|
@ -283,8 +283,8 @@ static-assets: ## Compile the static routes to serve alongside the API
|
||||||
.PHONY: test-ui
|
.PHONY: test-ui
|
||||||
test-ui: ## Run Nomad UI test suite
|
test-ui: ## Run Nomad UI test suite
|
||||||
@echo "--> Installing JavaScript assets"
|
@echo "--> Installing JavaScript assets"
|
||||||
|
@cd ui && npm rebuild node-sass
|
||||||
@cd ui && yarn install
|
@cd ui && yarn install
|
||||||
@cd ui && npm install phantomjs-prebuilt
|
|
||||||
@echo "--> Running ember tests"
|
@echo "--> Running ember tests"
|
||||||
@cd ui && phantomjs --version
|
@cd ui && phantomjs --version
|
||||||
@cd ui && npm test
|
@cd ui && npm test
|
||||||
|
|
|
@ -258,7 +258,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
||||||
},
|
},
|
||||||
Services: []*Service{
|
Services: []*Service{
|
||||||
{
|
{
|
||||||
Name: "global-redis-check",
|
Name: "redis-cache",
|
||||||
Tags: []string{"global", "cache"},
|
Tags: []string{"global", "cache"},
|
||||||
PortLabel: "db",
|
PortLabel: "db",
|
||||||
Checks: []ServiceCheck{
|
Checks: []ServiceCheck{
|
||||||
|
@ -368,7 +368,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
||||||
},
|
},
|
||||||
Services: []*Service{
|
Services: []*Service{
|
||||||
{
|
{
|
||||||
Name: "global-redis-check",
|
Name: "redis-cache",
|
||||||
Tags: []string{"global", "cache"},
|
Tags: []string{"global", "cache"},
|
||||||
PortLabel: "db",
|
PortLabel: "db",
|
||||||
AddressMode: "auto",
|
AddressMode: "auto",
|
||||||
|
|
|
@ -76,8 +76,6 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
|
||||||
}
|
}
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
|
|
||||||
// TODO (alexdadgar) Currently we made address a query parameter. Once
|
|
||||||
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
|
|
||||||
r.params.Set("address", address)
|
r.params.Set("address", address)
|
||||||
|
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
@ -88,3 +86,23 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RaftRemovePeerByID is used to kick a stale peer (one that is in the Raft
|
||||||
|
// quorum but no longer known to Serf or the catalog) by ID.
|
||||||
|
func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
|
||||||
|
r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
|
||||||
|
r.params.Set("id", id)
|
||||||
|
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,232 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
|
||||||
|
// Autopilot helps manage operator tasks related to Nomad servers like removing
|
||||||
|
// failed servers from the Raft quorum.
|
||||||
|
type AutopilotConfiguration struct {
|
||||||
|
// CleanupDeadServers controls whether to remove dead servers from the Raft
|
||||||
|
// peer list when a new server joins
|
||||||
|
CleanupDeadServers bool
|
||||||
|
|
||||||
|
// LastContactThreshold is the limit on the amount of time a server can go
|
||||||
|
// without leader contact before being considered unhealthy.
|
||||||
|
LastContactThreshold *ReadableDuration
|
||||||
|
|
||||||
|
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
|
||||||
|
// be behind before being considered unhealthy.
|
||||||
|
MaxTrailingLogs uint64
|
||||||
|
|
||||||
|
// ServerStabilizationTime is the minimum amount of time a server must be
|
||||||
|
// in a stable, healthy state before it can be added to the cluster. Only
|
||||||
|
// applicable with Raft protocol version 3 or higher.
|
||||||
|
ServerStabilizationTime *ReadableDuration
|
||||||
|
|
||||||
|
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
|
||||||
|
// servers into zones for redundancy. If left blank, this feature will be disabled.
|
||||||
|
RedundancyZoneTag string
|
||||||
|
|
||||||
|
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
|
||||||
|
// strategy of waiting until enough newer-versioned servers have been added to the
|
||||||
|
// cluster before promoting them to voters.
|
||||||
|
DisableUpgradeMigration bool
|
||||||
|
|
||||||
|
// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
|
||||||
|
// performing upgrade migrations. If left blank, the Nomad version will be used.
|
||||||
|
UpgradeVersionTag string
|
||||||
|
|
||||||
|
// CreateIndex holds the index corresponding the creation of this configuration.
|
||||||
|
// This is a read-only field.
|
||||||
|
CreateIndex uint64
|
||||||
|
|
||||||
|
// ModifyIndex will be set to the index of the last update when retrieving the
|
||||||
|
// Autopilot configuration. Resubmitting a configuration with
|
||||||
|
// AutopilotCASConfiguration will perform a check-and-set operation which ensures
|
||||||
|
// there hasn't been a subsequent update since the configuration was retrieved.
|
||||||
|
ModifyIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerHealth is the health (from the leader's point of view) of a server.
|
||||||
|
type ServerHealth struct {
|
||||||
|
// ID is the raft ID of the server.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Name is the node name of the server.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Address is the address of the server.
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// The status of the SerfHealth check for the server.
|
||||||
|
SerfStatus string
|
||||||
|
|
||||||
|
// Version is the Nomad version of the server.
|
||||||
|
Version string
|
||||||
|
|
||||||
|
// Leader is whether this server is currently the leader.
|
||||||
|
Leader bool
|
||||||
|
|
||||||
|
// LastContact is the time since this node's last contact with the leader.
|
||||||
|
LastContact *ReadableDuration
|
||||||
|
|
||||||
|
// LastTerm is the highest leader term this server has a record of in its Raft log.
|
||||||
|
LastTerm uint64
|
||||||
|
|
||||||
|
// LastIndex is the last log index this server has a record of in its Raft log.
|
||||||
|
LastIndex uint64
|
||||||
|
|
||||||
|
// Healthy is whether or not the server is healthy according to the current
|
||||||
|
// Autopilot config.
|
||||||
|
Healthy bool
|
||||||
|
|
||||||
|
// Voter is whether this is a voting server.
|
||||||
|
Voter bool
|
||||||
|
|
||||||
|
// StableSince is the last time this server's Healthy value changed.
|
||||||
|
StableSince time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperatorHealthReply is a representation of the overall health of the cluster
|
||||||
|
type OperatorHealthReply struct {
|
||||||
|
// Healthy is true if all the servers in the cluster are healthy.
|
||||||
|
Healthy bool
|
||||||
|
|
||||||
|
// FailureTolerance is the number of healthy servers that could be lost without
|
||||||
|
// an outage occurring.
|
||||||
|
FailureTolerance int
|
||||||
|
|
||||||
|
// Servers holds the health of each server.
|
||||||
|
Servers []ServerHealth
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadableDuration is a duration type that is serialized to JSON in human readable format.
|
||||||
|
type ReadableDuration time.Duration
|
||||||
|
|
||||||
|
func NewReadableDuration(dur time.Duration) *ReadableDuration {
|
||||||
|
d := ReadableDuration(dur)
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ReadableDuration) String() string {
|
||||||
|
return d.Duration().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ReadableDuration) Duration() time.Duration {
|
||||||
|
if d == nil {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
return time.Duration(*d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
|
||||||
|
if d == nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal to nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
str := string(raw)
|
||||||
|
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
|
||||||
|
return fmt.Errorf("must be enclosed with quotes: %s", str)
|
||||||
|
}
|
||||||
|
dur, err := time.ParseDuration(str[1 : len(str)-1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = ReadableDuration(dur)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
|
||||||
|
func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
|
||||||
|
r, err := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out AutopilotConfiguration
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotSetConfiguration is used to set the current Autopilot configuration.
|
||||||
|
func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
|
||||||
|
r, err := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = conf
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
|
||||||
|
// Autopilot configuration. The ModifyIndex value will be respected. Returns
|
||||||
|
// true on success or false on failures.
|
||||||
|
func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
|
||||||
|
r, err := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
|
||||||
|
r.obj = conf
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
res := strings.Contains(buf.String(), "true")
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotServerHealth is used to query Autopilot's top-level view of the health
|
||||||
|
// of each Nomad server.
|
||||||
|
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
|
||||||
|
r, err := op.c.newRequest("GET", "/v1/operator/autopilot/health")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out OperatorHealthReply
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
|
@ -0,0 +1,89 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil/retry"
|
||||||
|
"github.com/hashicorp/nomad/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
operator := c.Operator()
|
||||||
|
config, err := operator.AutopilotGetConfiguration(nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.True(config.CleanupDeadServers)
|
||||||
|
|
||||||
|
// Change a config setting
|
||||||
|
newConf := &AutopilotConfiguration{CleanupDeadServers: false}
|
||||||
|
err = operator.AutopilotSetConfiguration(newConf, nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
config, err = operator.AutopilotGetConfiguration(nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.False(config.CleanupDeadServers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
operator := c.Operator()
|
||||||
|
config, err := operator.AutopilotGetConfiguration(nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.True(config.CleanupDeadServers)
|
||||||
|
|
||||||
|
// Pass an invalid ModifyIndex
|
||||||
|
{
|
||||||
|
newConf := &AutopilotConfiguration{
|
||||||
|
CleanupDeadServers: false,
|
||||||
|
ModifyIndex: config.ModifyIndex - 1,
|
||||||
|
}
|
||||||
|
resp, err := operator.AutopilotCASConfiguration(newConf, nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.False(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass a valid ModifyIndex
|
||||||
|
{
|
||||||
|
newConf := &AutopilotConfiguration{
|
||||||
|
CleanupDeadServers: false,
|
||||||
|
ModifyIndex: config.ModifyIndex,
|
||||||
|
}
|
||||||
|
resp, err := operator.AutopilotCASConfiguration(newConf, nil)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.True(resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_OperatorAutopilotServerHealth(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
|
||||||
|
c.AdvertiseAddrs.RPC = "127.0.0.1"
|
||||||
|
c.Server.RaftProtocol = 3
|
||||||
|
})
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
operator := c.Operator()
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
out, err := operator.AutopilotServerHealth(nil)
|
||||||
|
if err != nil {
|
||||||
|
r.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Servers) != 1 ||
|
||||||
|
!out.Servers[0].Healthy ||
|
||||||
|
out.Servers[0].Name != fmt.Sprintf("%s.global", s.Config.NodeName) {
|
||||||
|
r.Fatalf("bad: %v", out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -36,3 +36,18 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOperator_RaftRemovePeerByID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
// If we get this error, it proves we sent the address all the way
|
||||||
|
// through.
|
||||||
|
operator := c.Operator()
|
||||||
|
err := operator.RaftRemovePeerByID("nope", nil)
|
||||||
|
if err == nil || !strings.Contains(err.Error(),
|
||||||
|
"id \"nope\" was not found in the Raft configuration") {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
14
api/tasks.go
14
api/tasks.go
|
@ -128,15 +128,15 @@ func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
if nc.Limit == 0 {
|
if o.Limit > 0 {
|
||||||
nc.Limit = o.Limit
|
nc.Limit = o.Limit
|
||||||
}
|
}
|
||||||
|
|
||||||
if nc.Grace == nil {
|
if o.Grace != nil {
|
||||||
nc.Grace = o.Grace
|
nc.Grace = o.Grace
|
||||||
}
|
}
|
||||||
|
|
||||||
if nc.IgnoreWarnings {
|
if o.IgnoreWarnings {
|
||||||
nc.IgnoreWarnings = o.IgnoreWarnings
|
nc.IgnoreWarnings = o.IgnoreWarnings
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,13 +185,11 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||||
s.AddressMode = "auto"
|
s.AddressMode = "auto"
|
||||||
}
|
}
|
||||||
|
|
||||||
s.CheckRestart.Canonicalize()
|
|
||||||
|
|
||||||
// Canonicallize CheckRestart on Checks and merge Service.CheckRestart
|
// Canonicallize CheckRestart on Checks and merge Service.CheckRestart
|
||||||
// into each check.
|
// into each check.
|
||||||
for _, c := range s.Checks {
|
for i, check := range s.Checks {
|
||||||
c.CheckRestart.Canonicalize()
|
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
|
||||||
c.CheckRestart = c.CheckRestart.Merge(s.CheckRestart)
|
s.Checks[i].CheckRestart.Canonicalize()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package api
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/nomad/helper"
|
"github.com/hashicorp/nomad/helper"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -266,3 +267,51 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) {
|
||||||
tg.Canonicalize(job)
|
tg.Canonicalize(job)
|
||||||
assert.Nil(t, tg.Update)
|
assert.Nil(t, tg.Update)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
|
||||||
|
// inherited by Checks.
|
||||||
|
func TestService_CheckRestart(t *testing.T) {
|
||||||
|
job := &Job{Name: helper.StringToPtr("job")}
|
||||||
|
tg := &TaskGroup{Name: helper.StringToPtr("group")}
|
||||||
|
task := &Task{Name: "task"}
|
||||||
|
service := &Service{
|
||||||
|
CheckRestart: &CheckRestart{
|
||||||
|
Limit: 11,
|
||||||
|
Grace: helper.TimeToPtr(11 * time.Second),
|
||||||
|
IgnoreWarnings: true,
|
||||||
|
},
|
||||||
|
Checks: []ServiceCheck{
|
||||||
|
{
|
||||||
|
Name: "all-set",
|
||||||
|
CheckRestart: &CheckRestart{
|
||||||
|
Limit: 22,
|
||||||
|
Grace: helper.TimeToPtr(22 * time.Second),
|
||||||
|
IgnoreWarnings: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "some-set",
|
||||||
|
CheckRestart: &CheckRestart{
|
||||||
|
Limit: 33,
|
||||||
|
Grace: helper.TimeToPtr(33 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "unset",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
service.Canonicalize(task, tg, job)
|
||||||
|
assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
|
||||||
|
assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
|
||||||
|
assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
|
||||||
|
|
||||||
|
assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
|
||||||
|
assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
|
||||||
|
assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
|
||||||
|
|
||||||
|
assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
|
||||||
|
assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
|
||||||
|
assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
|
||||||
|
}
|
||||||
|
|
|
@ -31,6 +31,11 @@ const (
|
||||||
// Config.Options map.
|
// Config.Options map.
|
||||||
lxcConfigOption = "driver.lxc.enable"
|
lxcConfigOption = "driver.lxc.enable"
|
||||||
|
|
||||||
|
// lxcVolumesConfigOption is the key for enabling the use of
|
||||||
|
// custom bind volumes to arbitrary host paths
|
||||||
|
lxcVolumesConfigOption = "lxc.volumes.enabled"
|
||||||
|
lxcVolumesConfigDefault = true
|
||||||
|
|
||||||
// containerMonitorIntv is the interval at which the driver checks if the
|
// containerMonitorIntv is the interval at which the driver checks if the
|
||||||
// container is still alive
|
// container is still alive
|
||||||
containerMonitorIntv = 2 * time.Second
|
containerMonitorIntv = 2 * time.Second
|
||||||
|
@ -69,6 +74,7 @@ type LxcDriverConfig struct {
|
||||||
TemplateArgs []string `mapstructure:"template_args"`
|
TemplateArgs []string `mapstructure:"template_args"`
|
||||||
LogLevel string `mapstructure:"log_level"`
|
LogLevel string `mapstructure:"log_level"`
|
||||||
Verbosity string
|
Verbosity string
|
||||||
|
Volumes []string `mapstructure:"volumes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLxcDriver returns a new instance of the LXC driver
|
// NewLxcDriver returns a new instance of the LXC driver
|
||||||
|
@ -137,6 +143,10 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error {
|
||||||
Type: fields.TypeString,
|
Type: fields.TypeString,
|
||||||
Required: false,
|
Required: false,
|
||||||
},
|
},
|
||||||
|
"volumes": {
|
||||||
|
Type: fields.TypeArray,
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,6 +154,21 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
volumes, _ := fd.GetOk("volumes")
|
||||||
|
for _, volDesc := range volumes.([]interface{}) {
|
||||||
|
volStr := volDesc.(string)
|
||||||
|
paths := strings.Split(volStr, ":")
|
||||||
|
if len(paths) != 2 {
|
||||||
|
return fmt.Errorf("invalid volume bind mount entry: '%s'", volStr)
|
||||||
|
}
|
||||||
|
if len(paths[0]) == 0 || len(paths[1]) == 0 {
|
||||||
|
return fmt.Errorf("invalid volume bind mount entry: '%s'", volStr)
|
||||||
|
}
|
||||||
|
if paths[1][0] == '/' {
|
||||||
|
return fmt.Errorf("unsupported absolute container mount point: '%s'", paths[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,6 +195,12 @@ func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, e
|
||||||
}
|
}
|
||||||
node.Attributes["driver.lxc.version"] = version
|
node.Attributes["driver.lxc.version"] = version
|
||||||
node.Attributes["driver.lxc"] = "1"
|
node.Attributes["driver.lxc"] = "1"
|
||||||
|
|
||||||
|
// Advertise if this node supports lxc volumes
|
||||||
|
if d.config.ReadBoolDefault(lxcVolumesConfigOption, lxcVolumesConfigDefault) {
|
||||||
|
node.Attributes["driver."+lxcVolumesConfigOption] = "1"
|
||||||
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,6 +281,25 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
|
||||||
fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.TaskDir.SharedAllocDir),
|
fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.TaskDir.SharedAllocDir),
|
||||||
fmt.Sprintf("%s secrets none rw,bind,create=dir", ctx.TaskDir.SecretsDir),
|
fmt.Sprintf("%s secrets none rw,bind,create=dir", ctx.TaskDir.SecretsDir),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
volumesEnabled := d.config.ReadBoolDefault(lxcVolumesConfigOption, lxcVolumesConfigDefault)
|
||||||
|
|
||||||
|
for _, volDesc := range driverConfig.Volumes {
|
||||||
|
// the format was checked in Validate()
|
||||||
|
paths := strings.Split(volDesc, ":")
|
||||||
|
|
||||||
|
if filepath.IsAbs(paths[0]) {
|
||||||
|
if !volumesEnabled {
|
||||||
|
return nil, fmt.Errorf("absolute bind-mount volume in config but '%v' is false", lxcVolumesConfigOption)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Relative source paths are treated as relative to alloc dir
|
||||||
|
paths[0] = filepath.Join(ctx.TaskDir.Dir, paths[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
mounts = append(mounts, fmt.Sprintf("%s %s none rw,bind,create=dir", paths[0], paths[1]))
|
||||||
|
}
|
||||||
|
|
||||||
for _, mnt := range mounts {
|
for _, mnt := range mounts {
|
||||||
if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {
|
if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {
|
||||||
return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err)
|
return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err)
|
||||||
|
|
|
@ -3,8 +3,11 @@
|
||||||
package driver
|
package driver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -69,11 +72,25 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
||||||
Driver: "lxc",
|
Driver: "lxc",
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
"template": "/usr/share/lxc/templates/lxc-busybox",
|
"template": "/usr/share/lxc/templates/lxc-busybox",
|
||||||
|
"volumes": []string{"/tmp/:mnt/tmp"},
|
||||||
},
|
},
|
||||||
KillTimeout: 10 * time.Second,
|
KillTimeout: 10 * time.Second,
|
||||||
Resources: structs.DefaultResources(),
|
Resources: structs.DefaultResources(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
testFileContents := []byte("this should be visible under /mnt/tmp")
|
||||||
|
tmpFile, err := ioutil.TempFile("/tmp", "testlxcdriver_start_wait")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error writing temp file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(tmpFile.Name())
|
||||||
|
if _, err := tmpFile.Write(testFileContents); err != nil {
|
||||||
|
t.Fatalf("error writing temp file: %v", err)
|
||||||
|
}
|
||||||
|
if err := tmpFile.Close(); err != nil {
|
||||||
|
t.Fatalf("error closing temp file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
ctx := testDriverContexts(t, task)
|
ctx := testDriverContexts(t, task)
|
||||||
defer ctx.AllocDir.Destroy()
|
defer ctx.AllocDir.Destroy()
|
||||||
d := NewLxcDriver(ctx.DriverCtx)
|
d := NewLxcDriver(ctx.DriverCtx)
|
||||||
|
@ -106,7 +123,7 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
||||||
|
|
||||||
// Look for mounted directories in their proper location
|
// Look for mounted directories in their proper location
|
||||||
containerName := fmt.Sprintf("%s-%s", task.Name, ctx.DriverCtx.allocID)
|
containerName := fmt.Sprintf("%s-%s", task.Name, ctx.DriverCtx.allocID)
|
||||||
for _, mnt := range []string{"alloc", "local", "secrets"} {
|
for _, mnt := range []string{"alloc", "local", "secrets", "mnt/tmp"} {
|
||||||
fullpath := filepath.Join(lxcHandle.lxcPath, containerName, "rootfs", mnt)
|
fullpath := filepath.Join(lxcHandle.lxcPath, containerName, "rootfs", mnt)
|
||||||
stat, err := os.Stat(fullpath)
|
stat, err := os.Stat(fullpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -117,6 +134,16 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that /mnt/tmp/$tempFile exists in the container:
|
||||||
|
mountedContents, err := exec.Command("lxc-attach", "-n", containerName, "--", "cat", filepath.Join("/mnt/", tmpFile.Name())).Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err reading temp file in bind mount: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(mountedContents, testFileContents) {
|
||||||
|
t.Fatalf("contents of temp bind mounted file did not match, was '%s'", mountedContents)
|
||||||
|
}
|
||||||
|
|
||||||
// Desroy the container
|
// Desroy the container
|
||||||
if err := sresp.Handle.Kill(); err != nil {
|
if err := sresp.Handle.Kill(); err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
|
@ -200,3 +227,98 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
|
||||||
func lxcPresent(t *testing.T) bool {
|
func lxcPresent(t *testing.T) bool {
|
||||||
return lxc.Version() != ""
|
return lxc.Version() != ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLxcDriver_Volumes_ConfigValidation(t *testing.T) {
|
||||||
|
if !testutil.IsTravis() {
|
||||||
|
t.Parallel()
|
||||||
|
}
|
||||||
|
if !lxcPresent(t) {
|
||||||
|
t.Skip("lxc not present")
|
||||||
|
}
|
||||||
|
ctestutil.RequireRoot(t)
|
||||||
|
|
||||||
|
brokenVolumeConfigs := [][]string{
|
||||||
|
{
|
||||||
|
"foo:/var",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
":",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"abc:",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
":def",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"abc:def:ghi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, bc := range brokenVolumeConfigs {
|
||||||
|
if err := testVolumeConfig(t, bc); err == nil {
|
||||||
|
t.Fatalf("error expected in validate for config %+v", bc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := testVolumeConfig(t, []string{"abc:def"}); err != nil {
|
||||||
|
t.Fatalf("error in validate for syntactically valid config abc:def")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testVolumeConfig(t *testing.T, volConfig []string) error {
|
||||||
|
task := &structs.Task{
|
||||||
|
Name: "voltest",
|
||||||
|
Driver: "lxc",
|
||||||
|
KillTimeout: 10 * time.Second,
|
||||||
|
Resources: structs.DefaultResources(),
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"template": "busybox",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task.Config["volumes"] = volConfig
|
||||||
|
|
||||||
|
ctx := testDriverContexts(t, task)
|
||||||
|
defer ctx.AllocDir.Destroy()
|
||||||
|
|
||||||
|
driver := NewLxcDriver(ctx.DriverCtx)
|
||||||
|
|
||||||
|
err := driver.Validate(task.Config)
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLxcDriver_Start_NoVolumes(t *testing.T) {
|
||||||
|
if !testutil.IsTravis() {
|
||||||
|
t.Parallel()
|
||||||
|
}
|
||||||
|
if !lxcPresent(t) {
|
||||||
|
t.Skip("lxc not present")
|
||||||
|
}
|
||||||
|
ctestutil.RequireRoot(t)
|
||||||
|
|
||||||
|
task := &structs.Task{
|
||||||
|
Name: "foo",
|
||||||
|
Driver: "lxc",
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"template": "/usr/share/lxc/templates/lxc-busybox",
|
||||||
|
"volumes": []string{"/tmp/:mnt/tmp"},
|
||||||
|
},
|
||||||
|
KillTimeout: 10 * time.Second,
|
||||||
|
Resources: structs.DefaultResources(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := testDriverContexts(t, task)
|
||||||
|
defer ctx.AllocDir.Destroy()
|
||||||
|
|
||||||
|
ctx.DriverCtx.config.Options = map[string]string{lxcVolumesConfigOption: "false"}
|
||||||
|
|
||||||
|
d := NewLxcDriver(ctx.DriverCtx)
|
||||||
|
|
||||||
|
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
|
||||||
|
t.Fatalf("prestart err: %v", err)
|
||||||
|
}
|
||||||
|
_, err := d.Start(ctx.ExecCtx, task)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error in start, got nil.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -160,6 +160,32 @@ func convertServerConfig(agentConfig *Config, logOutput io.Writer) (*nomad.Confi
|
||||||
if agentConfig.Sentinel != nil {
|
if agentConfig.Sentinel != nil {
|
||||||
conf.SentinelConfig = agentConfig.Sentinel
|
conf.SentinelConfig = agentConfig.Sentinel
|
||||||
}
|
}
|
||||||
|
if agentConfig.Server.NonVotingServer {
|
||||||
|
conf.NonVoter = true
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot != nil {
|
||||||
|
if agentConfig.Autopilot.CleanupDeadServers != nil {
|
||||||
|
conf.AutopilotConfig.CleanupDeadServers = *agentConfig.Autopilot.CleanupDeadServers
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.ServerStabilizationTime != 0 {
|
||||||
|
conf.AutopilotConfig.ServerStabilizationTime = agentConfig.Autopilot.ServerStabilizationTime
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.LastContactThreshold != 0 {
|
||||||
|
conf.AutopilotConfig.LastContactThreshold = agentConfig.Autopilot.LastContactThreshold
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.MaxTrailingLogs != 0 {
|
||||||
|
conf.AutopilotConfig.MaxTrailingLogs = uint64(agentConfig.Autopilot.MaxTrailingLogs)
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.RedundancyZoneTag != "" {
|
||||||
|
conf.AutopilotConfig.RedundancyZoneTag = agentConfig.Autopilot.RedundancyZoneTag
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.DisableUpgradeMigration != nil {
|
||||||
|
conf.AutopilotConfig.DisableUpgradeMigration = *agentConfig.Autopilot.DisableUpgradeMigration
|
||||||
|
}
|
||||||
|
if agentConfig.Autopilot.UpgradeVersionTag != "" {
|
||||||
|
conf.AutopilotConfig.UpgradeVersionTag = agentConfig.Autopilot.UpgradeVersionTag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set up the bind addresses
|
// Set up the bind addresses
|
||||||
rpcAddr, err := net.ResolveTCPAddr("tcp", agentConfig.normalizedAddrs.RPC)
|
rpcAddr, err := net.ResolveTCPAddr("tcp", agentConfig.normalizedAddrs.RPC)
|
||||||
|
|
|
@ -67,6 +67,7 @@ server {
|
||||||
bootstrap_expect = 5
|
bootstrap_expect = 5
|
||||||
data_dir = "/tmp/data"
|
data_dir = "/tmp/data"
|
||||||
protocol_version = 3
|
protocol_version = 3
|
||||||
|
raft_protocol = 3
|
||||||
num_schedulers = 2
|
num_schedulers = 2
|
||||||
enabled_schedulers = ["test"]
|
enabled_schedulers = ["test"]
|
||||||
node_gc_threshold = "12h"
|
node_gc_threshold = "12h"
|
||||||
|
@ -81,6 +82,7 @@ server {
|
||||||
retry_max = 3
|
retry_max = 3
|
||||||
retry_interval = "15s"
|
retry_interval = "15s"
|
||||||
rejoin_after_leave = true
|
rejoin_after_leave = true
|
||||||
|
non_voting_server = true
|
||||||
encrypt = "abc"
|
encrypt = "abc"
|
||||||
}
|
}
|
||||||
acl {
|
acl {
|
||||||
|
@ -159,3 +161,12 @@ sentinel {
|
||||||
args = ["x", "y", "z"]
|
args = ["x", "y", "z"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
autopilot {
|
||||||
|
cleanup_dead_servers = true
|
||||||
|
disable_upgrade_migration = true
|
||||||
|
last_contact_threshold = "12705s"
|
||||||
|
max_trailing_logs = 17849
|
||||||
|
redundancy_zone_tag = "foo"
|
||||||
|
server_stabilization_time = "23057s"
|
||||||
|
upgrade_version_tag = "bar"
|
||||||
|
}
|
||||||
|
|
|
@ -130,6 +130,9 @@ type Config struct {
|
||||||
|
|
||||||
// Sentinel holds sentinel related settings
|
// Sentinel holds sentinel related settings
|
||||||
Sentinel *config.SentinelConfig `mapstructure:"sentinel"`
|
Sentinel *config.SentinelConfig `mapstructure:"sentinel"`
|
||||||
|
|
||||||
|
// Autopilot contains the configuration for Autopilot behavior.
|
||||||
|
Autopilot *config.AutopilotConfig `mapstructure:"autopilot"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConfig is configuration specific to the client mode
|
// ClientConfig is configuration specific to the client mode
|
||||||
|
@ -327,6 +330,10 @@ type ServerConfig struct {
|
||||||
// true, we ignore the leave, and rejoin the cluster on start.
|
// true, we ignore the leave, and rejoin the cluster on start.
|
||||||
RejoinAfterLeave bool `mapstructure:"rejoin_after_leave"`
|
RejoinAfterLeave bool `mapstructure:"rejoin_after_leave"`
|
||||||
|
|
||||||
|
// NonVotingServer is whether this server will act as a non-voting member
|
||||||
|
// of the cluster to help provide read scalability. (Enterprise-only)
|
||||||
|
NonVotingServer bool `mapstructure:"non_voting_server"`
|
||||||
|
|
||||||
// Encryption key to use for the Serf communication
|
// Encryption key to use for the Serf communication
|
||||||
EncryptKey string `mapstructure:"encrypt" json:"-"`
|
EncryptKey string `mapstructure:"encrypt" json:"-"`
|
||||||
}
|
}
|
||||||
|
@ -604,6 +611,7 @@ func DefaultConfig() *Config {
|
||||||
TLSConfig: &config.TLSConfig{},
|
TLSConfig: &config.TLSConfig{},
|
||||||
Sentinel: &config.SentinelConfig{},
|
Sentinel: &config.SentinelConfig{},
|
||||||
Version: version.GetVersion(),
|
Version: version.GetVersion(),
|
||||||
|
Autopilot: config.DefaultAutopilotConfig(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,6 +770,13 @@ func (c *Config) Merge(b *Config) *Config {
|
||||||
result.Sentinel = result.Sentinel.Merge(b.Sentinel)
|
result.Sentinel = result.Sentinel.Merge(b.Sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if result.Autopilot == nil && b.Autopilot != nil {
|
||||||
|
autopilot := *b.Autopilot
|
||||||
|
result.Autopilot = &autopilot
|
||||||
|
} else if b.Autopilot != nil {
|
||||||
|
result.Autopilot = result.Autopilot.Merge(b.Autopilot)
|
||||||
|
}
|
||||||
|
|
||||||
// Merge config files lists
|
// Merge config files lists
|
||||||
result.Files = append(result.Files, b.Files...)
|
result.Files = append(result.Files, b.Files...)
|
||||||
|
|
||||||
|
@ -1016,6 +1031,9 @@ func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
|
||||||
if b.RejoinAfterLeave {
|
if b.RejoinAfterLeave {
|
||||||
result.RejoinAfterLeave = true
|
result.RejoinAfterLeave = true
|
||||||
}
|
}
|
||||||
|
if b.NonVotingServer {
|
||||||
|
result.NonVotingServer = true
|
||||||
|
}
|
||||||
if b.EncryptKey != "" {
|
if b.EncryptKey != "" {
|
||||||
result.EncryptKey = b.EncryptKey
|
result.EncryptKey = b.EncryptKey
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,6 +98,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
||||||
"http_api_response_headers",
|
"http_api_response_headers",
|
||||||
"acl",
|
"acl",
|
||||||
"sentinel",
|
"sentinel",
|
||||||
|
"autopilot",
|
||||||
}
|
}
|
||||||
if err := helper.CheckHCLKeys(list, valid); err != nil {
|
if err := helper.CheckHCLKeys(list, valid); err != nil {
|
||||||
return multierror.Prefix(err, "config:")
|
return multierror.Prefix(err, "config:")
|
||||||
|
@ -121,6 +122,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
||||||
delete(m, "http_api_response_headers")
|
delete(m, "http_api_response_headers")
|
||||||
delete(m, "acl")
|
delete(m, "acl")
|
||||||
delete(m, "sentinel")
|
delete(m, "sentinel")
|
||||||
|
delete(m, "autopilot")
|
||||||
|
|
||||||
// Decode the rest
|
// Decode the rest
|
||||||
if err := mapstructure.WeakDecode(m, result); err != nil {
|
if err := mapstructure.WeakDecode(m, result); err != nil {
|
||||||
|
@ -204,6 +206,13 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse Autopilot config
|
||||||
|
if o := list.Filter("autopilot"); len(o.Items) > 0 {
|
||||||
|
if err := parseAutopilot(&result.Autopilot, o); err != nil {
|
||||||
|
return multierror.Prefix(err, "autopilot->")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Parse out http_api_response_headers fields. These are in HCL as a list so
|
// Parse out http_api_response_headers fields. These are in HCL as a list so
|
||||||
// we need to iterate over them and merge them.
|
// we need to iterate over them and merge them.
|
||||||
if headersO := list.Filter("http_api_response_headers"); len(headersO.Items) > 0 {
|
if headersO := list.Filter("http_api_response_headers"); len(headersO.Items) > 0 {
|
||||||
|
@ -509,6 +518,7 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
||||||
"bootstrap_expect",
|
"bootstrap_expect",
|
||||||
"data_dir",
|
"data_dir",
|
||||||
"protocol_version",
|
"protocol_version",
|
||||||
|
"raft_protocol",
|
||||||
"num_schedulers",
|
"num_schedulers",
|
||||||
"enabled_schedulers",
|
"enabled_schedulers",
|
||||||
"node_gc_threshold",
|
"node_gc_threshold",
|
||||||
|
@ -525,6 +535,7 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
|
||||||
"rejoin_after_leave",
|
"rejoin_after_leave",
|
||||||
"encrypt",
|
"encrypt",
|
||||||
"authoritative_region",
|
"authoritative_region",
|
||||||
|
"non_voting_server",
|
||||||
}
|
}
|
||||||
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
|
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -838,3 +849,49 @@ func parseSentinel(result **config.SentinelConfig, list *ast.ObjectList) error {
|
||||||
*result = &config
|
*result = &config
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseAutopilot(result **config.AutopilotConfig, list *ast.ObjectList) error {
|
||||||
|
list = list.Elem()
|
||||||
|
if len(list.Items) > 1 {
|
||||||
|
return fmt.Errorf("only one 'autopilot' block allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get our Autopilot object
|
||||||
|
listVal := list.Items[0].Val
|
||||||
|
|
||||||
|
// Check for invalid keys
|
||||||
|
valid := []string{
|
||||||
|
"cleanup_dead_servers",
|
||||||
|
"server_stabilization_time",
|
||||||
|
"last_contact_threshold",
|
||||||
|
"max_trailing_logs",
|
||||||
|
"redundancy_zone_tag",
|
||||||
|
"disable_upgrade_migration",
|
||||||
|
"upgrade_version_tag",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&m, listVal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
autopilotConfig := config.DefaultAutopilotConfig()
|
||||||
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||||
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||||
|
WeaklyTypedInput: true,
|
||||||
|
Result: &autopilotConfig,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := dec.Decode(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*result = autopilotConfig
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -88,6 +88,7 @@ func TestConfig_Parse(t *testing.T) {
|
||||||
BootstrapExpect: 5,
|
BootstrapExpect: 5,
|
||||||
DataDir: "/tmp/data",
|
DataDir: "/tmp/data",
|
||||||
ProtocolVersion: 3,
|
ProtocolVersion: 3,
|
||||||
|
RaftProtocol: 3,
|
||||||
NumSchedulers: 2,
|
NumSchedulers: 2,
|
||||||
EnabledSchedulers: []string{"test"},
|
EnabledSchedulers: []string{"test"},
|
||||||
NodeGCThreshold: "12h",
|
NodeGCThreshold: "12h",
|
||||||
|
@ -102,6 +103,7 @@ func TestConfig_Parse(t *testing.T) {
|
||||||
RetryInterval: "15s",
|
RetryInterval: "15s",
|
||||||
RejoinAfterLeave: true,
|
RejoinAfterLeave: true,
|
||||||
RetryMaxAttempts: 3,
|
RetryMaxAttempts: 3,
|
||||||
|
NonVotingServer: true,
|
||||||
EncryptKey: "abc",
|
EncryptKey: "abc",
|
||||||
},
|
},
|
||||||
ACL: &ACLConfig{
|
ACL: &ACLConfig{
|
||||||
|
@ -186,6 +188,15 @@ func TestConfig_Parse(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Autopilot: &config.AutopilotConfig{
|
||||||
|
CleanupDeadServers: &trueValue,
|
||||||
|
ServerStabilizationTime: 23057 * time.Second,
|
||||||
|
LastContactThreshold: 12705 * time.Second,
|
||||||
|
MaxTrailingLogs: 17849,
|
||||||
|
RedundancyZoneTag: "foo",
|
||||||
|
DisableUpgradeMigration: &trueValue,
|
||||||
|
UpgradeVersionTag: "bar",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
|
|
|
@ -35,6 +35,7 @@ func TestConfig_Merge(t *testing.T) {
|
||||||
Vault: &config.VaultConfig{},
|
Vault: &config.VaultConfig{},
|
||||||
Consul: &config.ConsulConfig{},
|
Consul: &config.ConsulConfig{},
|
||||||
Sentinel: &config.SentinelConfig{},
|
Sentinel: &config.SentinelConfig{},
|
||||||
|
Autopilot: &config.AutopilotConfig{},
|
||||||
}
|
}
|
||||||
|
|
||||||
c2 := &Config{
|
c2 := &Config{
|
||||||
|
@ -100,6 +101,7 @@ func TestConfig_Merge(t *testing.T) {
|
||||||
BootstrapExpect: 1,
|
BootstrapExpect: 1,
|
||||||
DataDir: "/tmp/data1",
|
DataDir: "/tmp/data1",
|
||||||
ProtocolVersion: 1,
|
ProtocolVersion: 1,
|
||||||
|
RaftProtocol: 1,
|
||||||
NumSchedulers: 1,
|
NumSchedulers: 1,
|
||||||
NodeGCThreshold: "1h",
|
NodeGCThreshold: "1h",
|
||||||
HeartbeatGrace: 30 * time.Second,
|
HeartbeatGrace: 30 * time.Second,
|
||||||
|
@ -158,6 +160,15 @@ func TestConfig_Merge(t *testing.T) {
|
||||||
ClientAutoJoin: &falseValue,
|
ClientAutoJoin: &falseValue,
|
||||||
ChecksUseAdvertise: &falseValue,
|
ChecksUseAdvertise: &falseValue,
|
||||||
},
|
},
|
||||||
|
Autopilot: &config.AutopilotConfig{
|
||||||
|
CleanupDeadServers: &falseValue,
|
||||||
|
ServerStabilizationTime: 1 * time.Second,
|
||||||
|
LastContactThreshold: 1 * time.Second,
|
||||||
|
MaxTrailingLogs: 1,
|
||||||
|
RedundancyZoneTag: "1",
|
||||||
|
DisableUpgradeMigration: &falseValue,
|
||||||
|
UpgradeVersionTag: "1",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
c3 := &Config{
|
c3 := &Config{
|
||||||
|
@ -248,6 +259,7 @@ func TestConfig_Merge(t *testing.T) {
|
||||||
RetryJoin: []string{"1.1.1.1"},
|
RetryJoin: []string{"1.1.1.1"},
|
||||||
RetryInterval: "10s",
|
RetryInterval: "10s",
|
||||||
retryInterval: time.Second * 10,
|
retryInterval: time.Second * 10,
|
||||||
|
NonVotingServer: true,
|
||||||
},
|
},
|
||||||
ACL: &ACLConfig{
|
ACL: &ACLConfig{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
|
@ -311,6 +323,15 @@ func TestConfig_Merge(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Autopilot: &config.AutopilotConfig{
|
||||||
|
CleanupDeadServers: &trueValue,
|
||||||
|
ServerStabilizationTime: 2 * time.Second,
|
||||||
|
LastContactThreshold: 2 * time.Second,
|
||||||
|
MaxTrailingLogs: 2,
|
||||||
|
RedundancyZoneTag: "2",
|
||||||
|
DisableUpgradeMigration: &trueValue,
|
||||||
|
UpgradeVersionTag: "2",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
result := c0.Merge(c1)
|
result := c0.Merge(c1)
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||||
"github.com/hashicorp/nomad/helper/tlsutil"
|
"github.com/hashicorp/nomad/helper/tlsutil"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/rs/cors"
|
"github.com/rs/cors"
|
||||||
"github.com/ugorji/go/codec"
|
"github.com/ugorji/go/codec"
|
||||||
)
|
)
|
||||||
|
@ -183,7 +184,9 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
|
||||||
|
|
||||||
s.mux.HandleFunc("/v1/search", s.wrap(s.SearchRequest))
|
s.mux.HandleFunc("/v1/search", s.wrap(s.SearchRequest))
|
||||||
|
|
||||||
s.mux.HandleFunc("/v1/operator/", s.wrap(s.OperatorRequest))
|
s.mux.HandleFunc("/v1/operator/raft/", s.wrap(s.OperatorRequest))
|
||||||
|
s.mux.HandleFunc("/v1/operator/autopilot/configuration", s.wrap(s.OperatorAutopilotConfiguration))
|
||||||
|
s.mux.HandleFunc("/v1/operator/autopilot/health", s.wrap(s.OperatorServerHealth))
|
||||||
|
|
||||||
s.mux.HandleFunc("/v1/system/gc", s.wrap(s.GarbageCollectRequest))
|
s.mux.HandleFunc("/v1/system/gc", s.wrap(s.GarbageCollectRequest))
|
||||||
s.mux.HandleFunc("/v1/system/reconcile/summaries", s.wrap(s.ReconcileJobSummaries))
|
s.mux.HandleFunc("/v1/system/reconcile/summaries", s.wrap(s.ReconcileJobSummaries))
|
||||||
|
@ -337,6 +340,24 @@ func decodeBody(req *http.Request, out interface{}) error {
|
||||||
return dec.Decode(&out)
|
return dec.Decode(&out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeBodyFunc is used to decode a JSON request body invoking
|
||||||
|
// a given callback function
|
||||||
|
func decodeBodyFunc(req *http.Request, out interface{}, cb func(interface{}) error) error {
|
||||||
|
var raw interface{}
|
||||||
|
dec := json.NewDecoder(req.Body)
|
||||||
|
if err := dec.Decode(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke the callback prior to decode
|
||||||
|
if cb != nil {
|
||||||
|
if err := cb(raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mapstructure.Decode(raw, out)
|
||||||
|
}
|
||||||
|
|
||||||
// setIndex is used to set the index response header
|
// setIndex is used to set the index response header
|
||||||
func setIndex(resp http.ResponseWriter, index uint64) {
|
func setIndex(resp http.ResponseWriter, index uint64) {
|
||||||
resp.Header().Set("X-Nomad-Index", strconv.FormatUint(index, 10))
|
resp.Header().Set("X-Nomad-Index", strconv.FormatUint(index, 10))
|
||||||
|
|
|
@ -1212,6 +1212,10 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||||
Name: "serviceA",
|
Name: "serviceA",
|
||||||
Tags: []string{"1", "2"},
|
Tags: []string{"1", "2"},
|
||||||
PortLabel: "foo",
|
PortLabel: "foo",
|
||||||
|
CheckRestart: &api.CheckRestart{
|
||||||
|
Limit: 4,
|
||||||
|
Grace: helper.TimeToPtr(11 * time.Second),
|
||||||
|
},
|
||||||
Checks: []api.ServiceCheck{
|
Checks: []api.ServiceCheck{
|
||||||
{
|
{
|
||||||
Id: "hello",
|
Id: "hello",
|
||||||
|
@ -1228,10 +1232,17 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||||
InitialStatus: "ok",
|
InitialStatus: "ok",
|
||||||
CheckRestart: &api.CheckRestart{
|
CheckRestart: &api.CheckRestart{
|
||||||
Limit: 3,
|
Limit: 3,
|
||||||
Grace: helper.TimeToPtr(10 * time.Second),
|
|
||||||
IgnoreWarnings: true,
|
IgnoreWarnings: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Id: "check2id",
|
||||||
|
Name: "check2",
|
||||||
|
Type: "tcp",
|
||||||
|
PortLabel: "foo",
|
||||||
|
Interval: 4 * time.Second,
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1425,10 +1436,21 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||||
InitialStatus: "ok",
|
InitialStatus: "ok",
|
||||||
CheckRestart: &structs.CheckRestart{
|
CheckRestart: &structs.CheckRestart{
|
||||||
Limit: 3,
|
Limit: 3,
|
||||||
Grace: 10 * time.Second,
|
Grace: 11 * time.Second,
|
||||||
IgnoreWarnings: true,
|
IgnoreWarnings: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "check2",
|
||||||
|
Type: "tcp",
|
||||||
|
PortLabel: "foo",
|
||||||
|
Interval: 4 * time.Second,
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
CheckRestart: &structs.CheckRestart{
|
||||||
|
Limit: 4,
|
||||||
|
Grace: 11 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -4,6 +4,12 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
)
|
)
|
||||||
|
@ -49,21 +55,222 @@ func (s *HTTPServer) OperatorRaftPeer(resp http.ResponseWriter, req *http.Reques
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var args structs.RaftPeerByAddressRequest
|
|
||||||
s.parseWriteRequest(req, &args.WriteRequest)
|
|
||||||
|
|
||||||
params := req.URL.Query()
|
params := req.URL.Query()
|
||||||
if _, ok := params["address"]; ok {
|
_, hasID := params["id"]
|
||||||
args.Address = raft.ServerAddress(params.Get("address"))
|
_, hasAddress := params["address"]
|
||||||
} else {
|
|
||||||
|
if !hasID && !hasAddress {
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
resp.Write([]byte("Must specify ?address with IP:port of peer to remove"))
|
fmt.Fprint(resp, "Must specify either ?id with the server's ID or ?address with IP:port of peer to remove")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if hasID && hasAddress {
|
||||||
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprint(resp, "Must specify only one of ?id or ?address")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var reply struct{}
|
if hasID {
|
||||||
if err := s.agent.RPC("Operator.RaftRemovePeerByAddress", &args, &reply); err != nil {
|
var args structs.RaftPeerByIDRequest
|
||||||
return nil, err
|
s.parseWriteRequest(req, &args.WriteRequest)
|
||||||
|
|
||||||
|
var reply struct{}
|
||||||
|
args.ID = raft.ServerID(params.Get("id"))
|
||||||
|
if err := s.agent.RPC("Operator.RaftRemovePeerByID", &args, &reply); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var args structs.RaftPeerByAddressRequest
|
||||||
|
s.parseWriteRequest(req, &args.WriteRequest)
|
||||||
|
|
||||||
|
var reply struct{}
|
||||||
|
args.Address = raft.ServerAddress(params.Get("address"))
|
||||||
|
if err := s.agent.RPC("Operator.RaftRemovePeerByAddress", &args, &reply); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OperatorAutopilotConfiguration is used to inspect the current Autopilot configuration.
|
||||||
|
// This supports the stale query mode in case the cluster doesn't have a leader.
|
||||||
|
func (s *HTTPServer) OperatorAutopilotConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
|
// Switch on the method
|
||||||
|
switch req.Method {
|
||||||
|
case "GET":
|
||||||
|
var args structs.GenericRequest
|
||||||
|
if done := s.parse(resp, req, &args.Region, &args.QueryOptions); done {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply autopilot.Config
|
||||||
|
if err := s.agent.RPC("Operator.AutopilotGetConfiguration", &args, &reply); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := api.AutopilotConfiguration{
|
||||||
|
CleanupDeadServers: reply.CleanupDeadServers,
|
||||||
|
LastContactThreshold: api.NewReadableDuration(reply.LastContactThreshold),
|
||||||
|
MaxTrailingLogs: reply.MaxTrailingLogs,
|
||||||
|
ServerStabilizationTime: api.NewReadableDuration(reply.ServerStabilizationTime),
|
||||||
|
RedundancyZoneTag: reply.RedundancyZoneTag,
|
||||||
|
DisableUpgradeMigration: reply.DisableUpgradeMigration,
|
||||||
|
UpgradeVersionTag: reply.UpgradeVersionTag,
|
||||||
|
CreateIndex: reply.CreateIndex,
|
||||||
|
ModifyIndex: reply.ModifyIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
|
||||||
|
case "PUT":
|
||||||
|
var args structs.AutopilotSetConfigRequest
|
||||||
|
s.parseRegion(req, &args.Region)
|
||||||
|
s.parseToken(req, &args.AuthToken)
|
||||||
|
|
||||||
|
var conf api.AutopilotConfiguration
|
||||||
|
durations := NewDurationFixer("lastcontactthreshold", "serverstabilizationtime")
|
||||||
|
if err := decodeBodyFunc(req, &conf, durations.FixupDurations); err != nil {
|
||||||
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprintf(resp, "Error parsing autopilot config: %v", err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Config = autopilot.Config{
|
||||||
|
CleanupDeadServers: conf.CleanupDeadServers,
|
||||||
|
LastContactThreshold: conf.LastContactThreshold.Duration(),
|
||||||
|
MaxTrailingLogs: conf.MaxTrailingLogs,
|
||||||
|
ServerStabilizationTime: conf.ServerStabilizationTime.Duration(),
|
||||||
|
RedundancyZoneTag: conf.RedundancyZoneTag,
|
||||||
|
DisableUpgradeMigration: conf.DisableUpgradeMigration,
|
||||||
|
UpgradeVersionTag: conf.UpgradeVersionTag,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for cas value
|
||||||
|
params := req.URL.Query()
|
||||||
|
if _, ok := params["cas"]; ok {
|
||||||
|
casVal, err := strconv.ParseUint(params.Get("cas"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprintf(resp, "Error parsing cas value: %v", err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
args.Config.ModifyIndex = casVal
|
||||||
|
args.CAS = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply bool
|
||||||
|
if err := s.agent.RPC("Operator.AutopilotSetConfiguration", &args, &reply); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use the out value if this was a CAS
|
||||||
|
if !args.CAS {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return reply, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperatorServerHealth is used to get the health of the servers in the given Region.
|
||||||
|
func (s *HTTPServer) OperatorServerHealth(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
|
if req.Method != "GET" {
|
||||||
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var args structs.GenericRequest
|
||||||
|
if done := s.parse(resp, req, &args.Region, &args.QueryOptions); done {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply autopilot.OperatorHealthReply
|
||||||
|
if err := s.agent.RPC("Operator.ServerHealth", &args, &reply); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reply with status 429 if something is unhealthy
|
||||||
|
if !reply.Healthy {
|
||||||
|
resp.WriteHeader(http.StatusTooManyRequests)
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &api.OperatorHealthReply{
|
||||||
|
Healthy: reply.Healthy,
|
||||||
|
FailureTolerance: reply.FailureTolerance,
|
||||||
|
}
|
||||||
|
for _, server := range reply.Servers {
|
||||||
|
out.Servers = append(out.Servers, api.ServerHealth{
|
||||||
|
ID: server.ID,
|
||||||
|
Name: server.Name,
|
||||||
|
Address: server.Address,
|
||||||
|
Version: server.Version,
|
||||||
|
Leader: server.Leader,
|
||||||
|
SerfStatus: server.SerfStatus.String(),
|
||||||
|
LastContact: api.NewReadableDuration(server.LastContact),
|
||||||
|
LastTerm: server.LastTerm,
|
||||||
|
LastIndex: server.LastIndex,
|
||||||
|
Healthy: server.Healthy,
|
||||||
|
Voter: server.Voter,
|
||||||
|
StableSince: server.StableSince.Round(time.Second).UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type durationFixer map[string]bool
|
||||||
|
|
||||||
|
func NewDurationFixer(fields ...string) durationFixer {
|
||||||
|
d := make(map[string]bool)
|
||||||
|
for _, field := range fields {
|
||||||
|
d[field] = true
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixupDurations is used to handle parsing any field names in the map to time.Durations
|
||||||
|
func (d durationFixer) FixupDurations(raw interface{}) error {
|
||||||
|
rawMap, ok := raw.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for key, val := range rawMap {
|
||||||
|
switch val.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
if err := d.FixupDurations(val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case []interface{}:
|
||||||
|
for _, v := range val.([]interface{}) {
|
||||||
|
if err := d.FixupDurations(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case []map[string]interface{}:
|
||||||
|
for _, v := range val.([]map[string]interface{}) {
|
||||||
|
if err := d.FixupDurations(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
if d[strings.ToLower(key)] {
|
||||||
|
// Convert a string value into an integer
|
||||||
|
if vStr, ok := val.(string); ok {
|
||||||
|
dur, err := time.ParseDuration(vStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rawMap[key] = dur
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -2,12 +2,18 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/consul/testutil/retry"
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHTTP_OperatorRaftConfiguration(t *testing.T) {
|
func TestHTTP_OperatorRaftConfiguration(t *testing.T) {
|
||||||
|
@ -40,13 +46,12 @@ func TestHTTP_OperatorRaftConfiguration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHTTP_OperatorRaftPeer(t *testing.T) {
|
func TestHTTP_OperatorRaftPeer(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
httpTest(t, nil, func(s *TestAgent) {
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
body := bytes.NewBuffer(nil)
|
body := bytes.NewBuffer(nil)
|
||||||
req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?address=nope", body)
|
req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?address=nope", body)
|
||||||
if err != nil {
|
assert.Nil(err)
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we get this error, it proves we sent the address all the
|
// If we get this error, it proves we sent the address all the
|
||||||
// way through.
|
// way through.
|
||||||
|
@ -57,4 +62,244 @@ func TestHTTP_OperatorRaftPeer(t *testing.T) {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer(nil)
|
||||||
|
req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?id=nope", body)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// If we get this error, it proves we sent the address all the
|
||||||
|
// way through.
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
_, err = s.Server.OperatorRaftPeer(resp, req)
|
||||||
|
if err == nil || !strings.Contains(err.Error(),
|
||||||
|
"id \"nope\" was not found in the Raft configuration") {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_AutopilotGetConfiguration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer(nil)
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/operator/autopilot/configuration", body)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := s.Server.OperatorAutopilotConfiguration(resp, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Code != 200 {
|
||||||
|
t.Fatalf("bad code: %d", resp.Code)
|
||||||
|
}
|
||||||
|
out, ok := obj.(api.AutopilotConfiguration)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("unexpected: %T", obj)
|
||||||
|
}
|
||||||
|
if !out.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_AutopilotSetConfiguration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`))
|
||||||
|
req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
if _, err := s.Server.OperatorAutopilotConfiguration(resp, req); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Code != 200 {
|
||||||
|
t.Fatalf("bad code: %d", resp.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := structs.GenericRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{
|
||||||
|
Region: s.Config.Region,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply autopilot.Config
|
||||||
|
if err := s.RPC("Operator.AutopilotGetConfiguration", &args, &reply); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if reply.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", reply)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_AutopilotCASConfiguration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`))
|
||||||
|
req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
if _, err := s.Server.OperatorAutopilotConfiguration(resp, req); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Code != 200 {
|
||||||
|
t.Fatalf("bad code: %d", resp.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := structs.GenericRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{
|
||||||
|
Region: s.Config.Region,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply autopilot.Config
|
||||||
|
if err := s.RPC("Operator.AutopilotGetConfiguration", &args, &reply); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if reply.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", reply)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a CAS request, bad index
|
||||||
|
{
|
||||||
|
buf := bytes.NewBuffer([]byte(`{"CleanupDeadServers": true}`))
|
||||||
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/operator/autopilot/configuration?cas=%d", reply.ModifyIndex-1), buf)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := s.Server.OperatorAutopilotConfiguration(resp, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res := obj.(bool); res {
|
||||||
|
t.Fatalf("should NOT work")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a CAS request, good index
|
||||||
|
{
|
||||||
|
buf := bytes.NewBuffer([]byte(`{"CleanupDeadServers": true}`))
|
||||||
|
req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/operator/autopilot/configuration?cas=%d", reply.ModifyIndex), buf)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := s.Server.OperatorAutopilotConfiguration(resp, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res := obj.(bool); !res {
|
||||||
|
t.Fatalf("should work")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the update
|
||||||
|
if err := s.RPC("Operator.AutopilotGetConfiguration", &args, &reply); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if !reply.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", reply)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_ServerHealth(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
httpTest(t, func(c *Config) {
|
||||||
|
c.Server.RaftProtocol = 3
|
||||||
|
}, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer(nil)
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/operator/autopilot/health", body)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := s.Server.OperatorServerHealth(resp, req)
|
||||||
|
if err != nil {
|
||||||
|
r.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Code != 200 {
|
||||||
|
r.Fatalf("bad code: %d", resp.Code)
|
||||||
|
}
|
||||||
|
out, ok := obj.(*api.OperatorHealthReply)
|
||||||
|
if !ok {
|
||||||
|
r.Fatalf("unexpected: %T", obj)
|
||||||
|
}
|
||||||
|
if len(out.Servers) != 1 ||
|
||||||
|
!out.Servers[0].Healthy ||
|
||||||
|
out.Servers[0].Name != s.server.LocalMember().Name ||
|
||||||
|
out.Servers[0].SerfStatus != "alive" ||
|
||||||
|
out.FailureTolerance != 0 {
|
||||||
|
r.Fatalf("bad: %v, %q", out, s.server.LocalMember().Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_ServerHealth_Unhealthy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
httpTest(t, func(c *Config) {
|
||||||
|
c.Server.RaftProtocol = 3
|
||||||
|
c.Autopilot.LastContactThreshold = -1 * time.Second
|
||||||
|
}, func(s *TestAgent) {
|
||||||
|
body := bytes.NewBuffer(nil)
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/operator/autopilot/health", body)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := s.Server.OperatorServerHealth(resp, req)
|
||||||
|
if err != nil {
|
||||||
|
r.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Code != 429 {
|
||||||
|
r.Fatalf("bad code: %d, %v", resp.Code, obj.(*api.OperatorHealthReply))
|
||||||
|
}
|
||||||
|
out, ok := obj.(*api.OperatorHealthReply)
|
||||||
|
if !ok {
|
||||||
|
r.Fatalf("unexpected: %T", obj)
|
||||||
|
}
|
||||||
|
if len(out.Servers) != 1 ||
|
||||||
|
out.Healthy ||
|
||||||
|
out.Servers[0].Name != s.server.LocalMember().Name {
|
||||||
|
r.Fatalf("bad: %#v", out.Servers)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDurationFixer(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
obj := map[string]interface{}{
|
||||||
|
"key1": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"subkey1": "10s",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subkey2": "5d",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"key2": map[string]interface{}{
|
||||||
|
"subkey3": "30s",
|
||||||
|
"subkey4": "20m",
|
||||||
|
},
|
||||||
|
"key3": "11s",
|
||||||
|
"key4": "49h",
|
||||||
|
}
|
||||||
|
expected := map[string]interface{}{
|
||||||
|
"key1": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"subkey1": 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subkey2": "5d",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"key2": map[string]interface{}{
|
||||||
|
"subkey3": "30s",
|
||||||
|
"subkey4": 20 * time.Minute,
|
||||||
|
},
|
||||||
|
"key3": "11s",
|
||||||
|
"key4": 49 * time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
fixer := NewDurationFixer("key4", "subkey1", "subkey4")
|
||||||
|
if err := fixer.FixupDurations(obj); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we only processed the intended fieldnames
|
||||||
|
assert.Equal(obj, expected)
|
||||||
}
|
}
|
||||||
|
|
|
@ -301,6 +301,11 @@ func (a *TestAgent) config() *Config {
|
||||||
config.RaftConfig.StartAsLeader = true
|
config.RaftConfig.StartAsLeader = true
|
||||||
config.RaftTimeout = 500 * time.Millisecond
|
config.RaftTimeout = 500 * time.Millisecond
|
||||||
|
|
||||||
|
// Tighten the autopilot timing
|
||||||
|
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
|
||||||
|
config.ServerHealthInterval = 50 * time.Millisecond
|
||||||
|
config.AutopilotInterval = 100 * time.Millisecond
|
||||||
|
|
||||||
// Bootstrap ourselves
|
// Bootstrap ourselves
|
||||||
config.Bootstrap = true
|
config.Bootstrap = true
|
||||||
config.BootstrapExpect = 1
|
config.BootstrapExpect = 1
|
||||||
|
|
|
@ -310,7 +310,7 @@ job "example" {
|
||||||
# https://www.nomadproject.io/docs/job-specification/service.html
|
# https://www.nomadproject.io/docs/job-specification/service.html
|
||||||
#
|
#
|
||||||
service {
|
service {
|
||||||
name = "global-redis-check"
|
name = "redis-cache"
|
||||||
tags = ["global", "cache"]
|
tags = ["global", "cache"]
|
||||||
port = "db"
|
port = "db"
|
||||||
check {
|
check {
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OperatorAutopilotCommand struct {
|
||||||
|
Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotCommand) Run(args []string) int {
|
||||||
|
return cli.RunResultHelp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotCommand) Synopsis() string {
|
||||||
|
return "Provides tools for modifying Autopilot configuration"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotCommand) Help() string {
|
||||||
|
helpText := `
|
||||||
|
Usage: nomad operator autopilot <subcommand> [options]
|
||||||
|
|
||||||
|
The Autopilot operator command is used to interact with Nomad's Autopilot
|
||||||
|
subsystem. The command can be used to view or modify the current configuration.
|
||||||
|
`
|
||||||
|
return strings.TrimSpace(helpText)
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/posener/complete"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OperatorAutopilotGetCommand struct {
|
||||||
|
Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotGetCommand) AutocompleteFlags() complete.Flags {
|
||||||
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotGetCommand) AutocompleteArgs() complete.Predictor {
|
||||||
|
return complete.PredictNothing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotGetCommand) Run(args []string) int {
|
||||||
|
flags := c.Meta.FlagSet("autopilot", FlagSetClient)
|
||||||
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
|
||||||
|
if err := flags.Parse(args); err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Failed to parse args: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up a client.
|
||||||
|
client, err := c.Meta.Client()
|
||||||
|
if err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the current configuration.
|
||||||
|
config, err := client.Operator().AutopilotGetConfiguration(nil)
|
||||||
|
if err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Error querying Autopilot configuration: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.Ui.Output(fmt.Sprintf("CleanupDeadServers = %v", config.CleanupDeadServers))
|
||||||
|
c.Ui.Output(fmt.Sprintf("LastContactThreshold = %v", config.LastContactThreshold.String()))
|
||||||
|
c.Ui.Output(fmt.Sprintf("MaxTrailingLogs = %v", config.MaxTrailingLogs))
|
||||||
|
c.Ui.Output(fmt.Sprintf("ServerStabilizationTime = %v", config.ServerStabilizationTime.String()))
|
||||||
|
c.Ui.Output(fmt.Sprintf("RedundancyZoneTag = %q", config.RedundancyZoneTag))
|
||||||
|
c.Ui.Output(fmt.Sprintf("DisableUpgradeMigration = %v", config.DisableUpgradeMigration))
|
||||||
|
c.Ui.Output(fmt.Sprintf("UpgradeVersionTag = %q", config.UpgradeVersionTag))
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotGetCommand) Synopsis() string {
|
||||||
|
return "Display the current Autopilot configuration"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotGetCommand) Help() string {
|
||||||
|
helpText := `
|
||||||
|
Usage: nomad operator autopilot get-config [options]
|
||||||
|
|
||||||
|
Displays the current Autopilot configuration.
|
||||||
|
|
||||||
|
General Options:
|
||||||
|
|
||||||
|
` + generalOptionsUsage()
|
||||||
|
|
||||||
|
return strings.TrimSpace(helpText)
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOperator_Autopilot_GetConfig_Implements(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var _ cli.Command = &OperatorRaftListCommand{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperatorAutopilotGetConfigCommand(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s, _, addr := testServer(t, false, nil)
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &OperatorAutopilotGetCommand{Meta: Meta{Ui: ui}}
|
||||||
|
args := []string{"-address=" + addr}
|
||||||
|
|
||||||
|
code := c.Run(args)
|
||||||
|
if code != 0 {
|
||||||
|
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
output := strings.TrimSpace(ui.OutputWriter.String())
|
||||||
|
if !strings.Contains(output, "CleanupDeadServers = true") {
|
||||||
|
t.Fatalf("bad: %s", output)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,156 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
|
"github.com/posener/complete"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OperatorAutopilotSetCommand struct {
|
||||||
|
Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotSetCommand) AutocompleteFlags() complete.Flags {
|
||||||
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||||
|
complete.Flags{
|
||||||
|
"-cleanup-dead-servers": complete.PredictAnything,
|
||||||
|
"-max-trailing-logs": complete.PredictAnything,
|
||||||
|
"-last-contact-threshold": complete.PredictAnything,
|
||||||
|
"-server-stabilization-time": complete.PredictAnything,
|
||||||
|
"-redundancy-zone-tag": complete.PredictAnything,
|
||||||
|
"-disable-upgrade-migration": complete.PredictAnything,
|
||||||
|
"-upgrade-version-tag": complete.PredictAnything,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotSetCommand) AutocompleteArgs() complete.Predictor {
|
||||||
|
return complete.PredictNothing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotSetCommand) Run(args []string) int {
|
||||||
|
var cleanupDeadServers flags.BoolValue
|
||||||
|
var maxTrailingLogs flags.UintValue
|
||||||
|
var lastContactThreshold flags.DurationValue
|
||||||
|
var serverStabilizationTime flags.DurationValue
|
||||||
|
var redundancyZoneTag flags.StringValue
|
||||||
|
var disableUpgradeMigration flags.BoolValue
|
||||||
|
var upgradeVersionTag flags.StringValue
|
||||||
|
|
||||||
|
f := c.Meta.FlagSet("autopilot", FlagSetClient)
|
||||||
|
f.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
|
||||||
|
f.Var(&cleanupDeadServers, "cleanup-dead-servers", "")
|
||||||
|
f.Var(&maxTrailingLogs, "max-trailing-logs", "")
|
||||||
|
f.Var(&lastContactThreshold, "last-contact-threshold", "")
|
||||||
|
f.Var(&serverStabilizationTime, "server-stabilization-time", "")
|
||||||
|
f.Var(&redundancyZoneTag, "redundancy-zone-tag", "")
|
||||||
|
f.Var(&disableUpgradeMigration, "disable-upgrade-migration", "")
|
||||||
|
f.Var(&upgradeVersionTag, "upgrade-version-tag", "")
|
||||||
|
|
||||||
|
if err := f.Parse(args); err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Failed to parse args: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up a client.
|
||||||
|
client, err := c.Meta.Client()
|
||||||
|
if err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the current configuration.
|
||||||
|
operator := client.Operator()
|
||||||
|
conf, err := operator.AutopilotGetConfiguration(nil)
|
||||||
|
if err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Error querying for Autopilot configuration: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the config values based on the set flags.
|
||||||
|
cleanupDeadServers.Merge(&conf.CleanupDeadServers)
|
||||||
|
redundancyZoneTag.Merge(&conf.RedundancyZoneTag)
|
||||||
|
disableUpgradeMigration.Merge(&conf.DisableUpgradeMigration)
|
||||||
|
upgradeVersionTag.Merge(&conf.UpgradeVersionTag)
|
||||||
|
|
||||||
|
trailing := uint(conf.MaxTrailingLogs)
|
||||||
|
maxTrailingLogs.Merge(&trailing)
|
||||||
|
conf.MaxTrailingLogs = uint64(trailing)
|
||||||
|
|
||||||
|
last := time.Duration(*conf.LastContactThreshold)
|
||||||
|
lastContactThreshold.Merge(&last)
|
||||||
|
conf.LastContactThreshold = api.NewReadableDuration(last)
|
||||||
|
|
||||||
|
stablization := time.Duration(*conf.ServerStabilizationTime)
|
||||||
|
serverStabilizationTime.Merge(&stablization)
|
||||||
|
conf.ServerStabilizationTime = api.NewReadableDuration(stablization)
|
||||||
|
|
||||||
|
// Check-and-set the new configuration.
|
||||||
|
result, err := operator.AutopilotCASConfiguration(conf, nil)
|
||||||
|
if err != nil {
|
||||||
|
c.Ui.Error(fmt.Sprintf("Error setting Autopilot configuration: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if result {
|
||||||
|
c.Ui.Output("Configuration updated!")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
c.Ui.Output("Configuration could not be atomically updated, please try again")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotSetCommand) Synopsis() string {
|
||||||
|
return "Modify the current Autopilot configuration"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OperatorAutopilotSetCommand) Help() string {
|
||||||
|
helpText := `
|
||||||
|
Usage: nomad operator autopilot set-config [options]
|
||||||
|
|
||||||
|
Modifies the current Autopilot configuration.
|
||||||
|
|
||||||
|
General Options:
|
||||||
|
|
||||||
|
` + generalOptionsUsage() + `
|
||||||
|
|
||||||
|
Set Config Options:
|
||||||
|
|
||||||
|
-cleanup-dead-servers=[true|false]
|
||||||
|
Controls whether Nomad will automatically remove dead servers when
|
||||||
|
new ones are successfully added. Must be one of [true|false].
|
||||||
|
|
||||||
|
-disable-upgrade-migration=[true|false]
|
||||||
|
(Enterprise-only) Controls whether Nomad will avoid promoting
|
||||||
|
new servers until it can perform a migration. Must be one of
|
||||||
|
"true|false".
|
||||||
|
|
||||||
|
-last-contact-threshold=200ms
|
||||||
|
Controls the maximum amount of time a server can go without contact
|
||||||
|
from the leader before being considered unhealthy. Must be a
|
||||||
|
duration value such as "200ms".
|
||||||
|
|
||||||
|
-max-trailing-logs=<value>
|
||||||
|
Controls the maximum number of log entries that a server can trail
|
||||||
|
the leader by before being considered unhealthy.
|
||||||
|
|
||||||
|
-redundancy-zone-tag=<value>
|
||||||
|
(Enterprise-only) Controls the node_meta tag name used for
|
||||||
|
separating servers into different redundancy zones.
|
||||||
|
|
||||||
|
-server-stabilization-time=<10s>
|
||||||
|
Controls the minimum amount of time a server must be stable in
|
||||||
|
the 'healthy' state before being added to the cluster. Only takes
|
||||||
|
effect if all servers are running Raft protocol version 3 or
|
||||||
|
higher. Must be a duration value such as "10s".
|
||||||
|
|
||||||
|
-upgrade-version-tag=<value>
|
||||||
|
(Enterprise-only) The node_meta tag to use for version info when
|
||||||
|
performing upgrade migrations. If left blank, the Nomad version
|
||||||
|
will be used.
|
||||||
|
`
|
||||||
|
return strings.TrimSpace(helpText)
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOperator_Autopilot_SetConfig_Implements(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var _ cli.Command = &OperatorRaftListCommand{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperatorAutopilotSetConfigCommmand(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s, _, addr := testServer(t, false, nil)
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &OperatorAutopilotSetCommand{Meta: Meta{Ui: ui}}
|
||||||
|
args := []string{
|
||||||
|
"-address=" + addr,
|
||||||
|
"-cleanup-dead-servers=false",
|
||||||
|
"-max-trailing-logs=99",
|
||||||
|
"-last-contact-threshold=123ms",
|
||||||
|
"-server-stabilization-time=123ms",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := c.Run(args)
|
||||||
|
if code != 0 {
|
||||||
|
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
output := strings.TrimSpace(ui.OutputWriter.String())
|
||||||
|
if !strings.Contains(output, "Configuration updated") {
|
||||||
|
t.Fatalf("bad: %s", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.Client()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conf, err := client.Operator().AutopilotGetConfiguration(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", conf)
|
||||||
|
}
|
||||||
|
if conf.MaxTrailingLogs != 99 {
|
||||||
|
t.Fatalf("bad: %#v", conf)
|
||||||
|
}
|
||||||
|
if conf.LastContactThreshold.Duration() != 123*time.Millisecond {
|
||||||
|
t.Fatalf("bad: %#v", conf)
|
||||||
|
}
|
||||||
|
if conf.ServerStabilizationTime.Duration() != 123*time.Millisecond {
|
||||||
|
t.Fatalf("bad: %#v", conf)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOperator_Autopilot_Implements(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var _ cli.Command = &OperatorAutopilotCommand{}
|
||||||
|
}
|
|
@ -32,7 +32,10 @@ General Options:
|
||||||
Remove Peer Options:
|
Remove Peer Options:
|
||||||
|
|
||||||
-peer-address="IP:port"
|
-peer-address="IP:port"
|
||||||
Remove a Nomad server with given address from the Raft configuration.
|
Remove a Nomad server with given address from the Raft configuration.
|
||||||
|
|
||||||
|
-peer-id="id"
|
||||||
|
Remove a Nomad server with the given ID from the Raft configuration.
|
||||||
`
|
`
|
||||||
return strings.TrimSpace(helpText)
|
return strings.TrimSpace(helpText)
|
||||||
}
|
}
|
||||||
|
@ -41,6 +44,7 @@ func (c *OperatorRaftRemoveCommand) AutocompleteFlags() complete.Flags {
|
||||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||||
complete.Flags{
|
complete.Flags{
|
||||||
"-peer-address": complete.PredictAnything,
|
"-peer-address": complete.PredictAnything,
|
||||||
|
"-peer-id": complete.PredictAnything,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,11 +58,13 @@ func (c *OperatorRaftRemoveCommand) Synopsis() string {
|
||||||
|
|
||||||
func (c *OperatorRaftRemoveCommand) Run(args []string) int {
|
func (c *OperatorRaftRemoveCommand) Run(args []string) int {
|
||||||
var peerAddress string
|
var peerAddress string
|
||||||
|
var peerID string
|
||||||
|
|
||||||
flags := c.Meta.FlagSet("raft", FlagSetClient)
|
flags := c.Meta.FlagSet("raft", FlagSetClient)
|
||||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
|
||||||
flags.StringVar(&peerAddress, "peer-address", "", "")
|
flags.StringVar(&peerAddress, "peer-address", "", "")
|
||||||
|
flags.StringVar(&peerID, "peer-id", "", "")
|
||||||
if err := flags.Parse(args); err != nil {
|
if err := flags.Parse(args); err != nil {
|
||||||
c.Ui.Error(fmt.Sprintf("Failed to parse args: %v", err))
|
c.Ui.Error(fmt.Sprintf("Failed to parse args: %v", err))
|
||||||
return 1
|
return 1
|
||||||
|
@ -72,20 +78,37 @@ func (c *OperatorRaftRemoveCommand) Run(args []string) int {
|
||||||
}
|
}
|
||||||
operator := client.Operator()
|
operator := client.Operator()
|
||||||
|
|
||||||
// TODO (alexdadgar) Once we expose IDs, add support for removing
|
if err := raftRemovePeers(peerAddress, peerID, operator); err != nil {
|
||||||
// by ID, add support for that.
|
c.Ui.Error(fmt.Sprintf("Error removing peer: %v", err))
|
||||||
if len(peerAddress) == 0 {
|
|
||||||
c.Ui.Error(fmt.Sprintf("an address is required for the peer to remove"))
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
if peerAddress != "" {
|
||||||
// Try to kick the peer.
|
c.Ui.Output(fmt.Sprintf("Removed peer with address %q", peerAddress))
|
||||||
w := &api.WriteOptions{}
|
} else {
|
||||||
if err := operator.RaftRemovePeerByAddress(peerAddress, w); err != nil {
|
c.Ui.Output(fmt.Sprintf("Removed peer with id %q", peerID))
|
||||||
c.Ui.Error(fmt.Sprintf("Failed to remove raft peer: %v", err))
|
|
||||||
return 1
|
|
||||||
}
|
}
|
||||||
c.Ui.Output(fmt.Sprintf("Removed peer with address %q", peerAddress))
|
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func raftRemovePeers(address, id string, operator *api.Operator) error {
|
||||||
|
if len(address) == 0 && len(id) == 0 {
|
||||||
|
return fmt.Errorf("an address or id is required for the peer to remove")
|
||||||
|
}
|
||||||
|
if len(address) > 0 && len(id) > 0 {
|
||||||
|
return fmt.Errorf("cannot give both an address and id")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to kick the peer.
|
||||||
|
if len(address) > 0 {
|
||||||
|
if err := operator.RaftRemovePeerByAddress(address, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := operator.RaftRemovePeerByID(id, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package command
|
package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mitchellh/cli"
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOperator_Raft_RemovePeers_Implements(t *testing.T) {
|
func TestOperator_Raft_RemovePeers_Implements(t *testing.T) {
|
||||||
|
@ -14,6 +14,35 @@ func TestOperator_Raft_RemovePeers_Implements(t *testing.T) {
|
||||||
|
|
||||||
func TestOperator_Raft_RemovePeer(t *testing.T) {
|
func TestOperator_Raft_RemovePeer(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
s, _, addr := testServer(t, false, nil)
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &OperatorRaftRemoveCommand{Meta: Meta{Ui: ui}}
|
||||||
|
args := []string{"-address=" + addr, "-peer-address=nope", "-peer-id=nope"}
|
||||||
|
|
||||||
|
// Give both an address and ID
|
||||||
|
code := c.Run(args)
|
||||||
|
if code != 1 {
|
||||||
|
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(ui.ErrorWriter.String(), "cannot give both an address and id")
|
||||||
|
|
||||||
|
// Neither address nor ID present
|
||||||
|
args = args[:1]
|
||||||
|
code = c.Run(args)
|
||||||
|
if code != 1 {
|
||||||
|
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(ui.ErrorWriter.String(), "an address or id is required for the peer to remove")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_Raft_RemovePeerAddress(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
s, _, addr := testServer(t, false, nil)
|
s, _, addr := testServer(t, false, nil)
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -27,8 +56,24 @@ func TestOperator_Raft_RemovePeer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we get this error, it proves we sent the address all they through.
|
// If we get this error, it proves we sent the address all they through.
|
||||||
output := strings.TrimSpace(ui.ErrorWriter.String())
|
assert.Contains(ui.ErrorWriter.String(), "address \"nope\" was not found in the Raft configuration")
|
||||||
if !strings.Contains(output, "address \"nope\" was not found in the Raft configuration") {
|
}
|
||||||
t.Fatalf("bad: %s", output)
|
|
||||||
}
|
func TestOperator_Raft_RemovePeerID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
s, _, addr := testServer(t, false, nil)
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &OperatorRaftRemoveCommand{Meta: Meta{Ui: ui}}
|
||||||
|
args := []string{"-address=" + addr, "-peer-id=nope"}
|
||||||
|
|
||||||
|
code := c.Run(args)
|
||||||
|
if code != 1 {
|
||||||
|
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get this error, it proves we sent the address all they through.
|
||||||
|
assert.Contains(ui.ErrorWriter.String(), "id \"nope\" was not found in the Raft configuration")
|
||||||
}
|
}
|
||||||
|
|
18
commands.go
18
commands.go
|
@ -275,6 +275,24 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"operator autopilot": func() (cli.Command, error) {
|
||||||
|
return &command.OperatorAutopilotCommand{
|
||||||
|
Meta: meta,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
|
||||||
|
"operator autopilot get-config": func() (cli.Command, error) {
|
||||||
|
return &command.OperatorAutopilotGetCommand{
|
||||||
|
Meta: meta,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
|
||||||
|
"operator autopilot set-config": func() (cli.Command, error) {
|
||||||
|
return &command.OperatorAutopilotSetCommand{
|
||||||
|
Meta: meta,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
|
||||||
"operator raft": func() (cli.Command, error) {
|
"operator raft": func() (cli.Command, error) {
|
||||||
return &command.OperatorRaftCommand{
|
return &command.OperatorRaftCommand{
|
||||||
Meta: meta,
|
Meta: meta,
|
||||||
|
|
|
@ -912,6 +912,7 @@ func parseServices(jobName string, taskGroupName string, task *api.Task, service
|
||||||
"port",
|
"port",
|
||||||
"check",
|
"check",
|
||||||
"address_mode",
|
"address_mode",
|
||||||
|
"check_restart",
|
||||||
}
|
}
|
||||||
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
|
||||||
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
|
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
|
||||||
|
|
|
@ -631,6 +631,42 @@ func TestParse(t *testing.T) {
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"service-check-restart.hcl",
|
||||||
|
&api.Job{
|
||||||
|
ID: helper.StringToPtr("service_check_restart"),
|
||||||
|
Name: helper.StringToPtr("service_check_restart"),
|
||||||
|
Type: helper.StringToPtr("service"),
|
||||||
|
TaskGroups: []*api.TaskGroup{
|
||||||
|
{
|
||||||
|
Name: helper.StringToPtr("group"),
|
||||||
|
Tasks: []*api.Task{
|
||||||
|
{
|
||||||
|
Name: "task",
|
||||||
|
Services: []*api.Service{
|
||||||
|
{
|
||||||
|
Name: "http-service",
|
||||||
|
CheckRestart: &api.CheckRestart{
|
||||||
|
Limit: 3,
|
||||||
|
Grace: helper.TimeToPtr(10 * time.Second),
|
||||||
|
IgnoreWarnings: true,
|
||||||
|
},
|
||||||
|
Checks: []api.ServiceCheck{
|
||||||
|
{
|
||||||
|
Name: "random-check",
|
||||||
|
Type: "tcp",
|
||||||
|
PortLabel: "9001",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
job "service_check_restart" {
|
||||||
|
type = "service"
|
||||||
|
group "group" {
|
||||||
|
task "task" {
|
||||||
|
service {
|
||||||
|
name = "http-service"
|
||||||
|
check_restart {
|
||||||
|
limit = 3
|
||||||
|
grace = "10s"
|
||||||
|
ignore_warnings = true
|
||||||
|
}
|
||||||
|
check {
|
||||||
|
name = "random-check"
|
||||||
|
type = "tcp"
|
||||||
|
port = "9001"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/armon/go-metrics"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/raft"
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AutopilotDelegate is a Nomad delegate for autopilot operations.
|
||||||
|
type AutopilotDelegate struct {
|
||||||
|
server *Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) AutopilotConfig() *autopilot.Config {
|
||||||
|
return d.server.getOrCreateAutopilotConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) FetchStats(ctx context.Context, servers []serf.Member) map[string]*autopilot.ServerStats {
|
||||||
|
return d.server.statsFetcher.Fetch(ctx, servers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) IsServer(m serf.Member) (*autopilot.ServerInfo, error) {
|
||||||
|
ok, parts := isNomadServer(m)
|
||||||
|
if !ok || parts.Region != d.server.Region() {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
server := &autopilot.ServerInfo{
|
||||||
|
Name: m.Name,
|
||||||
|
ID: parts.ID,
|
||||||
|
Addr: parts.Addr,
|
||||||
|
Build: parts.Build,
|
||||||
|
Status: m.Status,
|
||||||
|
}
|
||||||
|
return server, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyHealth heartbeats a metric for monitoring if we're the leader.
|
||||||
|
func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) {
|
||||||
|
if d.server.raft.State() == raft.Leader {
|
||||||
|
metrics.SetGauge([]string{"nomad", "autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
|
||||||
|
if health.Healthy {
|
||||||
|
metrics.SetGauge([]string{"nomad", "autopilot", "healthy"}, 1)
|
||||||
|
} else {
|
||||||
|
metrics.SetGauge([]string{"nomad", "autopilot", "healthy"}, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) PromoteNonVoters(conf *autopilot.Config, health autopilot.OperatorHealthReply) ([]raft.Server, error) {
|
||||||
|
future := d.server.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get raft configuration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return autopilot.PromoteStableServers(conf, health, future.Configuration().Servers), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) Raft() *raft.Raft {
|
||||||
|
return d.server.raft
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AutopilotDelegate) Serf() *serf.Serf {
|
||||||
|
return d.server.serf
|
||||||
|
}
|
|
@ -0,0 +1,350 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/consul/testutil/retry"
|
||||||
|
"github.com/hashicorp/nomad/testutil"
|
||||||
|
"github.com/hashicorp/raft"
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// wantPeers determines whether the server has the given
|
||||||
|
// number of voting raft peers.
|
||||||
|
func wantPeers(s *Server, peers int) error {
|
||||||
|
future := s.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := autopilot.NumPeers(future.Configuration())
|
||||||
|
if got, want := n, peers; got != want {
|
||||||
|
return fmt.Errorf("got %d peers want %d", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wantRaft determines if the servers have all of each other in their
|
||||||
|
// Raft configurations,
|
||||||
|
func wantRaft(servers []*Server) error {
|
||||||
|
// Make sure all the servers are represented in the Raft config,
|
||||||
|
// and that there are no extras.
|
||||||
|
verifyRaft := func(c raft.Configuration) error {
|
||||||
|
want := make(map[raft.ServerID]bool)
|
||||||
|
for _, s := range servers {
|
||||||
|
want[s.config.RaftConfig.LocalID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range c.Servers {
|
||||||
|
if !want[s.ID] {
|
||||||
|
return fmt.Errorf("don't want %q", s.ID)
|
||||||
|
}
|
||||||
|
delete(want, s.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(want) > 0 {
|
||||||
|
return fmt.Errorf("didn't find %v", want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
future := s.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := verifyRaft(future.Configuration()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilot_CleanupDeadServer(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
for i := 1; i <= 3; i++ {
|
||||||
|
testCleanupDeadServer(t, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
||||||
|
conf := func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.BootstrapExpect = 3
|
||||||
|
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(raftVersion)
|
||||||
|
}
|
||||||
|
s1 := testServer(t, conf)
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
s2 := testServer(t, conf)
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, conf)
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
servers := []*Server{s1, s2, s3}
|
||||||
|
|
||||||
|
// Try to join
|
||||||
|
testJoin(t, s1, s2, s3)
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bring up a new server
|
||||||
|
s4 := testServer(t, conf)
|
||||||
|
defer s4.Shutdown()
|
||||||
|
|
||||||
|
// Kill a non-leader server
|
||||||
|
s3.Shutdown()
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
alive := 0
|
||||||
|
for _, m := range s1.Members() {
|
||||||
|
if m.Status == serf.StatusAlive {
|
||||||
|
alive++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if alive != 2 {
|
||||||
|
r.Fatal(nil)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Join the new server
|
||||||
|
testJoin(t, s1, s4)
|
||||||
|
servers[2] = s4
|
||||||
|
|
||||||
|
// Make sure the dead server is removed and we're back to 3 total peers
|
||||||
|
for _, s := range servers {
|
||||||
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, nil)
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
conf := func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
}
|
||||||
|
|
||||||
|
s2 := testServer(t, conf)
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, conf)
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
s4 := testServer(t, conf)
|
||||||
|
defer s4.Shutdown()
|
||||||
|
|
||||||
|
s5 := testServer(t, conf)
|
||||||
|
defer s5.Shutdown()
|
||||||
|
|
||||||
|
servers := []*Server{s1, s2, s3, s4, s5}
|
||||||
|
|
||||||
|
// Join the servers to s1, and wait until they are all promoted to
|
||||||
|
// voters.
|
||||||
|
testJoin(t, s1, servers[1:]...)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
r.Check(wantRaft(servers))
|
||||||
|
for _, s := range servers {
|
||||||
|
r.Check(wantPeers(s, 5))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Kill a non-leader server
|
||||||
|
s4.Shutdown()
|
||||||
|
|
||||||
|
// Should be removed from the peers automatically
|
||||||
|
servers = []*Server{s1, s2, s3, s5}
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
r.Check(wantRaft(servers))
|
||||||
|
for _, s := range servers {
|
||||||
|
r.Check(wantPeers(s, 4))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilot_RollingUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, func(c *Config) {
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
conf := func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
s2 := testServer(t, conf)
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, conf)
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
// Join the servers to s1, and wait until they are all promoted to
|
||||||
|
// voters.
|
||||||
|
servers := []*Server{s1, s2, s3}
|
||||||
|
testJoin(t, s1, s2, s3)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
r.Check(wantRaft(servers))
|
||||||
|
for _, s := range servers {
|
||||||
|
r.Check(wantPeers(s, 3))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Add one more server like we are doing a rolling update.
|
||||||
|
s4 := testServer(t, conf)
|
||||||
|
defer s4.Shutdown()
|
||||||
|
testJoin(t, s1, s4)
|
||||||
|
servers = append(servers, s4)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
r.Check(wantRaft(servers))
|
||||||
|
for _, s := range servers {
|
||||||
|
r.Check(wantPeers(s, 3))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Now kill one of the "old" nodes like we are doing a rolling update.
|
||||||
|
s3.Shutdown()
|
||||||
|
|
||||||
|
isVoter := func() bool {
|
||||||
|
future := s1.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
for _, s := range future.Configuration().Servers {
|
||||||
|
if string(s.ID) == string(s4.config.NodeID) {
|
||||||
|
return s.Suffrage == raft.Voter
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("didn't find s4")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for s4 to stabilize, get promoted to a voter, and for s3 to be
|
||||||
|
// removed.
|
||||||
|
servers = []*Server{s1, s2, s4}
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
r.Check(wantRaft(servers))
|
||||||
|
for _, s := range servers {
|
||||||
|
r.Check(wantPeers(s, 3))
|
||||||
|
}
|
||||||
|
if !isVoter() {
|
||||||
|
r.Fatalf("should be a voter")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, nil)
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
conf := func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
}
|
||||||
|
s2 := testServer(t, conf)
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, conf)
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
s4 := testServer(t, conf)
|
||||||
|
defer s4.Shutdown()
|
||||||
|
|
||||||
|
servers := []*Server{s1, s2, s3}
|
||||||
|
|
||||||
|
// Join the servers to s1
|
||||||
|
testJoin(t, s1, s2, s3)
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
// Add s4 to peers directly
|
||||||
|
addr := fmt.Sprintf("127.0.0.1:%d", s4.config.SerfConfig.MemberlistConfig.BindPort)
|
||||||
|
s1.raft.AddVoter(raft.ServerID(s4.config.NodeID), raft.ServerAddress(addr), 0, 0)
|
||||||
|
|
||||||
|
// Verify we have 4 peers
|
||||||
|
peers, err := s1.numPeers()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if peers != 4 {
|
||||||
|
t.Fatalf("bad: %v", peers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for s4 to be removed
|
||||||
|
for _, s := range []*Server{s1, s2, s3} {
|
||||||
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilot_PromoteNonVoter(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, func(c *Config) {
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s1.Shutdown()
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
defer codec.Close()
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
s2 := testServer(t, func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s2.Shutdown()
|
||||||
|
testJoin(t, s1, s2)
|
||||||
|
|
||||||
|
// Make sure we see it as a nonvoter initially. We wait until half
|
||||||
|
// the stabilization period has passed.
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
future := s1.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
r.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
servers := future.Configuration().Servers
|
||||||
|
if len(servers) != 2 {
|
||||||
|
r.Fatalf("bad: %v", servers)
|
||||||
|
}
|
||||||
|
if servers[1].Suffrage != raft.Nonvoter {
|
||||||
|
r.Fatalf("bad: %v", servers)
|
||||||
|
}
|
||||||
|
health := s1.autopilot.GetServerHealth(string(servers[1].ID))
|
||||||
|
if health == nil {
|
||||||
|
r.Fatalf("nil health, %v", s1.autopilot.GetClusterHealth())
|
||||||
|
}
|
||||||
|
if !health.Healthy {
|
||||||
|
r.Fatalf("bad: %v", health)
|
||||||
|
}
|
||||||
|
if time.Since(health.StableSince) < s1.config.AutopilotConfig.ServerStabilizationTime/2 {
|
||||||
|
r.Fatal("stable period not elapsed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Make sure it ends up as a voter.
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
future := s1.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
r.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
servers := future.Configuration().Servers
|
||||||
|
if len(servers) != 2 {
|
||||||
|
r.Fatalf("bad: %v", servers)
|
||||||
|
}
|
||||||
|
if servers[1].Suffrage != raft.Voter {
|
||||||
|
r.Fatalf("bad: %v", servers)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/memberlist"
|
"github.com/hashicorp/memberlist"
|
||||||
"github.com/hashicorp/nomad/helper/tlsutil"
|
"github.com/hashicorp/nomad/helper/tlsutil"
|
||||||
"github.com/hashicorp/nomad/helper/uuid"
|
"github.com/hashicorp/nomad/helper/uuid"
|
||||||
|
@ -93,6 +94,10 @@ type Config struct {
|
||||||
// RaftTimeout is applied to any network traffic for raft. Defaults to 10s.
|
// RaftTimeout is applied to any network traffic for raft. Defaults to 10s.
|
||||||
RaftTimeout time.Duration
|
RaftTimeout time.Duration
|
||||||
|
|
||||||
|
// (Enterprise-only) NonVoter is used to prevent this server from being added
|
||||||
|
// as a voting member of the Raft cluster.
|
||||||
|
NonVoter bool
|
||||||
|
|
||||||
// SerfConfig is the configuration for the serf cluster
|
// SerfConfig is the configuration for the serf cluster
|
||||||
SerfConfig *serf.Config
|
SerfConfig *serf.Config
|
||||||
|
|
||||||
|
@ -261,6 +266,19 @@ type Config struct {
|
||||||
// BackwardsCompatibleMetrics determines whether to show methods of
|
// BackwardsCompatibleMetrics determines whether to show methods of
|
||||||
// displaying metrics for older verions, or to only show the new format
|
// displaying metrics for older verions, or to only show the new format
|
||||||
BackwardsCompatibleMetrics bool
|
BackwardsCompatibleMetrics bool
|
||||||
|
|
||||||
|
// AutopilotConfig is used to apply the initial autopilot config when
|
||||||
|
// bootstrapping.
|
||||||
|
AutopilotConfig *autopilot.Config
|
||||||
|
|
||||||
|
// ServerHealthInterval is the frequency with which the health of the
|
||||||
|
// servers in the cluster will be updated.
|
||||||
|
ServerHealthInterval time.Duration
|
||||||
|
|
||||||
|
// AutopilotInterval is the frequency with which the leader will perform
|
||||||
|
// autopilot tasks, such as promoting eligible non-voters and removing
|
||||||
|
// dead servers.
|
||||||
|
AutopilotInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckVersion is used to check if the ProtocolVersion is valid
|
// CheckVersion is used to check if the ProtocolVersion is valid
|
||||||
|
@ -321,6 +339,14 @@ func DefaultConfig() *Config {
|
||||||
TLSConfig: &config.TLSConfig{},
|
TLSConfig: &config.TLSConfig{},
|
||||||
ReplicationBackoff: 30 * time.Second,
|
ReplicationBackoff: 30 * time.Second,
|
||||||
SentinelGCInterval: 30 * time.Second,
|
SentinelGCInterval: 30 * time.Second,
|
||||||
|
AutopilotConfig: &autopilot.Config{
|
||||||
|
CleanupDeadServers: true,
|
||||||
|
LastContactThreshold: 200 * time.Millisecond,
|
||||||
|
MaxTrailingLogs: 250,
|
||||||
|
ServerStabilizationTime: 10 * time.Second,
|
||||||
|
},
|
||||||
|
ServerHealthInterval: 2 * time.Second,
|
||||||
|
AutopilotInterval: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable all known schedulers by default
|
// Enable all known schedulers by default
|
||||||
|
@ -344,8 +370,8 @@ func DefaultConfig() *Config {
|
||||||
// Disable shutdown on removal
|
// Disable shutdown on removal
|
||||||
c.RaftConfig.ShutdownOnRemove = false
|
c.RaftConfig.ShutdownOnRemove = false
|
||||||
|
|
||||||
// Enable interoperability with raft protocol version 1, and don't
|
// Enable interoperability with new raft APIs, requires all servers
|
||||||
// start using new ID-based features yet.
|
// to be on raft v1 or higher.
|
||||||
c.RaftConfig.ProtocolVersion = 2
|
c.RaftConfig.ProtocolVersion = 2
|
||||||
|
|
||||||
return c
|
return c
|
||||||
|
|
19
nomad/fsm.go
19
nomad/fsm.go
|
@ -234,6 +234,8 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} {
|
||||||
return n.applyACLTokenDelete(buf[1:], log.Index)
|
return n.applyACLTokenDelete(buf[1:], log.Index)
|
||||||
case structs.ACLTokenBootstrapRequestType:
|
case structs.ACLTokenBootstrapRequestType:
|
||||||
return n.applyACLTokenBootstrap(buf[1:], log.Index)
|
return n.applyACLTokenBootstrap(buf[1:], log.Index)
|
||||||
|
case structs.AutopilotRequestType:
|
||||||
|
return n.applyAutopilotUpdate(buf[1:], log.Index)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check enterprise only message types.
|
// Check enterprise only message types.
|
||||||
|
@ -833,6 +835,23 @@ func (n *nomadFSM) applyACLTokenBootstrap(buf []byte, index uint64) interface{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *nomadFSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} {
|
||||||
|
var req structs.AutopilotSetConfigRequest
|
||||||
|
if err := structs.Decode(buf, &req); err != nil {
|
||||||
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||||
|
}
|
||||||
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "autopilot"}, time.Now())
|
||||||
|
|
||||||
|
if req.CAS {
|
||||||
|
act, err := n.state.AutopilotCASConfig(index, req.Config.ModifyIndex, &req.Config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return act
|
||||||
|
}
|
||||||
|
return n.state.AutopilotSetConfig(index, &req.Config)
|
||||||
|
}
|
||||||
|
|
||||||
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
|
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
|
||||||
// Create a new snapshot
|
// Create a new snapshot
|
||||||
snap, err := n.state.Snapshot()
|
snap, err := n.state.Snapshot()
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/nomad/helper"
|
"github.com/hashicorp/nomad/helper"
|
||||||
"github.com/hashicorp/nomad/nomad/mock"
|
"github.com/hashicorp/nomad/nomad/mock"
|
||||||
|
@ -2310,3 +2311,62 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
|
||||||
t.Fatalf("Diff % #v", pretty.Diff(&expected, out2))
|
t.Fatalf("Diff % #v", pretty.Diff(&expected, out2))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFSM_Autopilot(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fsm := testFSM(t)
|
||||||
|
|
||||||
|
// Set the autopilot config using a request.
|
||||||
|
req := structs.AutopilotSetConfigRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Config: autopilot.Config{
|
||||||
|
CleanupDeadServers: true,
|
||||||
|
LastContactThreshold: 10 * time.Second,
|
||||||
|
MaxTrailingLogs: 300,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
buf, err := structs.Encode(structs.AutopilotRequestType, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
resp := fsm.Apply(makeLog(buf))
|
||||||
|
if _, ok := resp.(error); ok {
|
||||||
|
t.Fatalf("bad: %v", resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify key is set directly in the state store.
|
||||||
|
_, config, err := fsm.state.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if config.CleanupDeadServers != req.Config.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %v", config.CleanupDeadServers)
|
||||||
|
}
|
||||||
|
if config.LastContactThreshold != req.Config.LastContactThreshold {
|
||||||
|
t.Fatalf("bad: %v", config.LastContactThreshold)
|
||||||
|
}
|
||||||
|
if config.MaxTrailingLogs != req.Config.MaxTrailingLogs {
|
||||||
|
t.Fatalf("bad: %v", config.MaxTrailingLogs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now use CAS and provide an old index
|
||||||
|
req.CAS = true
|
||||||
|
req.Config.CleanupDeadServers = false
|
||||||
|
req.Config.ModifyIndex = config.ModifyIndex - 1
|
||||||
|
buf, err = structs.Encode(structs.AutopilotRequestType, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
resp = fsm.Apply(makeLog(buf))
|
||||||
|
if _, ok := resp.(error); ok {
|
||||||
|
t.Fatalf("bad: %v", resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, config, err = fsm.state.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if !config.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %v", config.CleanupDeadServers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -13,7 +13,9 @@ import (
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/armon/go-metrics"
|
"github.com/armon/go-metrics"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
|
"github.com/hashicorp/go-version"
|
||||||
"github.com/hashicorp/nomad/helper/uuid"
|
"github.com/hashicorp/nomad/helper/uuid"
|
||||||
"github.com/hashicorp/nomad/nomad/state"
|
"github.com/hashicorp/nomad/nomad/state"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
@ -37,6 +39,8 @@ const (
|
||||||
barrierWriteTimeout = 2 * time.Minute
|
barrierWriteTimeout = 2 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var minAutopilotVersion = version.Must(version.NewVersion("0.8.0"))
|
||||||
|
|
||||||
// monitorLeadership is used to monitor if we acquire or lose our role
|
// monitorLeadership is used to monitor if we acquire or lose our role
|
||||||
// as the leader in the Raft cluster. There is some work the leader is
|
// as the leader in the Raft cluster. There is some work the leader is
|
||||||
// expected to do, so we must react to changes
|
// expected to do, so we must react to changes
|
||||||
|
@ -168,6 +172,10 @@ func (s *Server) establishLeadership(stopCh chan struct{}) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize and start the autopilot routine
|
||||||
|
s.getOrCreateAutopilotConfig()
|
||||||
|
s.autopilot.Start()
|
||||||
|
|
||||||
// Enable the plan queue, since we are now the leader
|
// Enable the plan queue, since we are now the leader
|
||||||
s.planQueue.SetEnabled(true)
|
s.planQueue.SetEnabled(true)
|
||||||
|
|
||||||
|
@ -635,6 +643,9 @@ func (s *Server) revokeLeadership() error {
|
||||||
// Clear the leader token since we are no longer the leader.
|
// Clear the leader token since we are no longer the leader.
|
||||||
s.setLeaderAcl("")
|
s.setLeaderAcl("")
|
||||||
|
|
||||||
|
// Disable autopilot
|
||||||
|
s.autopilot.Stop()
|
||||||
|
|
||||||
// Disable the plan queue, since we are no longer leader
|
// Disable the plan queue, since we are no longer leader
|
||||||
s.planQueue.SetEnabled(false)
|
s.planQueue.SetEnabled(false)
|
||||||
|
|
||||||
|
@ -776,7 +787,7 @@ func (s *Server) addRaftPeer(m serf.Member, parts *serverParts) error {
|
||||||
// but we want to avoid doing that if possible to prevent useless Raft
|
// but we want to avoid doing that if possible to prevent useless Raft
|
||||||
// log entries. If the address is the same but the ID changed, remove the
|
// log entries. If the address is the same but the ID changed, remove the
|
||||||
// old server before adding the new one.
|
// old server before adding the new one.
|
||||||
minRaftProtocol, err := MinRaftProtocol(s.config.Region, members)
|
minRaftProtocol, err := s.autopilot.MinRaftProtocol()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -810,8 +821,7 @@ func (s *Server) addRaftPeer(m serf.Member, parts *serverParts) error {
|
||||||
// Attempt to add as a peer
|
// Attempt to add as a peer
|
||||||
switch {
|
switch {
|
||||||
case minRaftProtocol >= 3:
|
case minRaftProtocol >= 3:
|
||||||
// todo(kyhavlov): change this to AddNonVoter when adding autopilot
|
addFuture := s.raft.AddNonvoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
||||||
addFuture := s.raft.AddVoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
|
||||||
if err := addFuture.Error(); err != nil {
|
if err := addFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] nomad: failed to add raft peer: %v", err)
|
s.logger.Printf("[ERR] nomad: failed to add raft peer: %v", err)
|
||||||
return err
|
return err
|
||||||
|
@ -836,7 +846,6 @@ func (s *Server) addRaftPeer(m serf.Member, parts *serverParts) error {
|
||||||
// removeRaftPeer is used to remove a Raft peer when a Nomad server leaves
|
// removeRaftPeer is used to remove a Raft peer when a Nomad server leaves
|
||||||
// or is reaped
|
// or is reaped
|
||||||
func (s *Server) removeRaftPeer(m serf.Member, parts *serverParts) error {
|
func (s *Server) removeRaftPeer(m serf.Member, parts *serverParts) error {
|
||||||
// TODO (alexdadgar) - This will need to be changed once we support node IDs.
|
|
||||||
addr := (&net.TCPAddr{IP: m.Addr, Port: parts.Port}).String()
|
addr := (&net.TCPAddr{IP: m.Addr, Port: parts.Port}).String()
|
||||||
|
|
||||||
// See if it's already in the configuration. It's harmless to re-remove it
|
// See if it's already in the configuration. It's harmless to re-remove it
|
||||||
|
@ -848,7 +857,7 @@ func (s *Server) removeRaftPeer(m serf.Member, parts *serverParts) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
minRaftProtocol, err := MinRaftProtocol(s.config.Region, s.serf.Members())
|
minRaftProtocol, err := s.autopilot.MinRaftProtocol()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1163,3 +1172,31 @@ func diffACLTokens(state *state.StateStore, minIndex uint64, remoteList []*struc
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
|
||||||
|
func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config {
|
||||||
|
state := s.fsm.State()
|
||||||
|
_, config, err := state.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Printf("[ERR] autopilot: failed to get config: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if config != nil {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ServersMeetMinimumVersion(s.Members(), minAutopilotVersion) {
|
||||||
|
s.logger.Printf("[INFO] autopilot: version %v", s.Members()[0].Tags)
|
||||||
|
s.logger.Printf("[WARN] autopilot: can't initialize until all servers are >= %s", minAutopilotVersion.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config = s.config.AutopilotConfig
|
||||||
|
req := structs.AutopilotSetConfigRequest{Config: *config}
|
||||||
|
if _, _, err = s.raftApply(structs.AutopilotRequestType, req); err != nil {
|
||||||
|
s.logger.Printf("[ERR] autopilot: failed to initialize config: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil/retry"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/nomad/nomad/mock"
|
"github.com/hashicorp/nomad/nomad/mock"
|
||||||
"github.com/hashicorp/nomad/nomad/state"
|
"github.com/hashicorp/nomad/nomad/state"
|
||||||
|
@ -815,21 +816,18 @@ func TestLeader_DiffACLTokens(t *testing.T) {
|
||||||
func TestLeader_UpgradeRaftVersion(t *testing.T) {
|
func TestLeader_UpgradeRaftVersion(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
s1 := testServer(t, func(c *Config) {
|
s1 := testServer(t, func(c *Config) {
|
||||||
c.Datacenter = "dc1"
|
|
||||||
c.RaftConfig.ProtocolVersion = 2
|
c.RaftConfig.ProtocolVersion = 2
|
||||||
})
|
})
|
||||||
defer s1.Shutdown()
|
defer s1.Shutdown()
|
||||||
|
|
||||||
s2 := testServer(t, func(c *Config) {
|
s2 := testServer(t, func(c *Config) {
|
||||||
c.DevDisableBootstrap = true
|
c.DevDisableBootstrap = true
|
||||||
c.Datacenter = "dc1"
|
|
||||||
c.RaftConfig.ProtocolVersion = 1
|
c.RaftConfig.ProtocolVersion = 1
|
||||||
})
|
})
|
||||||
defer s2.Shutdown()
|
defer s2.Shutdown()
|
||||||
|
|
||||||
s3 := testServer(t, func(c *Config) {
|
s3 := testServer(t, func(c *Config) {
|
||||||
c.DevDisableBootstrap = true
|
c.DevDisableBootstrap = true
|
||||||
c.Datacenter = "dc1"
|
|
||||||
c.RaftConfig.ProtocolVersion = 2
|
c.RaftConfig.ProtocolVersion = 2
|
||||||
})
|
})
|
||||||
defer s3.Shutdown()
|
defer s3.Shutdown()
|
||||||
|
@ -854,7 +852,7 @@ func TestLeader_UpgradeRaftVersion(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range []*Server{s1, s3} {
|
for _, s := range []*Server{s1, s3} {
|
||||||
minVer, err := MinRaftProtocol(s1.config.Region, s.Members())
|
minVer, err := s.autopilot.MinRaftProtocol()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -902,3 +900,81 @@ func TestLeader_UpgradeRaftVersion(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLeader_RollRaftServer(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, func(c *Config) {
|
||||||
|
c.RaftConfig.ProtocolVersion = 2
|
||||||
|
})
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
s2 := testServer(t, func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.RaftConfig.ProtocolVersion = 1
|
||||||
|
})
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.RaftConfig.ProtocolVersion = 2
|
||||||
|
})
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
servers := []*Server{s1, s2, s3}
|
||||||
|
|
||||||
|
// Try to join
|
||||||
|
testJoin(t, s1, s2, s3)
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kill the v1 server
|
||||||
|
s2.Shutdown()
|
||||||
|
|
||||||
|
for _, s := range []*Server{s1, s3} {
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
minVer, err := s.autopilot.MinRaftProtocol()
|
||||||
|
if err != nil {
|
||||||
|
r.Fatal(err)
|
||||||
|
}
|
||||||
|
if got, want := minVer, 2; got != want {
|
||||||
|
r.Fatalf("got min raft version %d want %d", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace the dead server with one running raft protocol v3
|
||||||
|
s4 := testServer(t, func(c *Config) {
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s4.Shutdown()
|
||||||
|
testJoin(t, s4, s1)
|
||||||
|
servers[1] = s4
|
||||||
|
|
||||||
|
// Make sure the dead server is removed and we're back to 3 total peers
|
||||||
|
for _, s := range servers {
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
addrs := 0
|
||||||
|
ids := 0
|
||||||
|
future := s.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
r.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, server := range future.Configuration().Servers {
|
||||||
|
if string(server.ID) == string(server.Address) {
|
||||||
|
addrs++
|
||||||
|
} else {
|
||||||
|
ids++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if got, want := addrs, 2; got != want {
|
||||||
|
r.Fatalf("got %d server addresses want %d", got, want)
|
||||||
|
}
|
||||||
|
if got, want := ids, 1; got != want {
|
||||||
|
r.Fatalf("got %d server ids want %d", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
@ -124,3 +125,161 @@ REMOVE:
|
||||||
op.srv.logger.Printf("[WARN] nomad.operator: Removed Raft peer %q", args.Address)
|
op.srv.logger.Printf("[WARN] nomad.operator: Removed Raft peer %q", args.Address)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RaftRemovePeerByID is used to kick a stale peer (one that is in the Raft
|
||||||
|
// quorum but no longer known to Serf or the catalog) by address in the form of
|
||||||
|
// "IP:port". The reply argument is not used, but is required to fulfill the RPC
|
||||||
|
// interface.
|
||||||
|
func (op *Operator) RaftRemovePeerByID(args *structs.RaftPeerByIDRequest, reply *struct{}) error {
|
||||||
|
if done, err := op.srv.forward("Operator.RaftRemovePeerByID", args, args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check management permissions
|
||||||
|
if aclObj, err := op.srv.ResolveToken(args.AuthToken); err != nil {
|
||||||
|
return err
|
||||||
|
} else if aclObj != nil && !aclObj.IsManagement() {
|
||||||
|
return structs.ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this is an operation designed for humans to use, we will return
|
||||||
|
// an error if the supplied id isn't among the peers since it's
|
||||||
|
// likely they screwed up.
|
||||||
|
var address raft.ServerAddress
|
||||||
|
{
|
||||||
|
future := op.srv.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, s := range future.Configuration().Servers {
|
||||||
|
if s.ID == args.ID {
|
||||||
|
address = s.Address
|
||||||
|
goto REMOVE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("id %q was not found in the Raft configuration",
|
||||||
|
args.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
REMOVE:
|
||||||
|
// The Raft library itself will prevent various forms of foot-shooting,
|
||||||
|
// like making a configuration with no voters. Some consideration was
|
||||||
|
// given here to adding more checks, but it was decided to make this as
|
||||||
|
// low-level and direct as possible. We've got ACL coverage to lock this
|
||||||
|
// down, and if you are an operator, it's assumed you know what you are
|
||||||
|
// doing if you are calling this. If you remove a peer that's known to
|
||||||
|
// Serf, for example, it will come back when the leader does a reconcile
|
||||||
|
// pass.
|
||||||
|
minRaftProtocol, err := op.srv.autopilot.MinRaftProtocol()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var future raft.Future
|
||||||
|
if minRaftProtocol >= 2 {
|
||||||
|
future = op.srv.raft.RemoveServer(args.ID, 0, 0)
|
||||||
|
} else {
|
||||||
|
future = op.srv.raft.RemovePeer(address)
|
||||||
|
}
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
op.srv.logger.Printf("[WARN] nomad.operator: Failed to remove Raft peer with id %q: %v",
|
||||||
|
args.ID, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
op.srv.logger.Printf("[WARN] nomad.operator: Removed Raft peer with id %q", args.ID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotGetConfiguration is used to retrieve the current Autopilot configuration.
|
||||||
|
func (op *Operator) AutopilotGetConfiguration(args *structs.GenericRequest, reply *autopilot.Config) error {
|
||||||
|
if done, err := op.srv.forward("Operator.AutopilotGetConfiguration", args, args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This action requires operator read access.
|
||||||
|
rule, err := op.srv.ResolveToken(args.AuthToken)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rule != nil && !rule.AllowOperatorRead() {
|
||||||
|
return structs.ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
state := op.srv.fsm.State()
|
||||||
|
_, config, err := state.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if config == nil {
|
||||||
|
return fmt.Errorf("autopilot config not initialized yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
*reply = *config
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotSetConfiguration is used to set the current Autopilot configuration.
|
||||||
|
func (op *Operator) AutopilotSetConfiguration(args *structs.AutopilotSetConfigRequest, reply *bool) error {
|
||||||
|
if done, err := op.srv.forward("Operator.AutopilotSetConfiguration", args, args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This action requires operator write access.
|
||||||
|
rule, err := op.srv.ResolveToken(args.AuthToken)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rule != nil && !rule.AllowOperatorWrite() {
|
||||||
|
return structs.ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the update
|
||||||
|
resp, _, err := op.srv.raftApply(structs.AutopilotRequestType, args)
|
||||||
|
if err != nil {
|
||||||
|
op.srv.logger.Printf("[ERR] nomad.operator: Apply failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if respErr, ok := resp.(error); ok {
|
||||||
|
return respErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the return type is a bool.
|
||||||
|
if respBool, ok := resp.(bool); ok {
|
||||||
|
*reply = respBool
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerHealth is used to get the current health of the servers.
|
||||||
|
func (op *Operator) ServerHealth(args *structs.GenericRequest, reply *autopilot.OperatorHealthReply) error {
|
||||||
|
// This must be sent to the leader, so we fix the args since we are
|
||||||
|
// re-using a structure where we don't support all the options.
|
||||||
|
args.AllowStale = false
|
||||||
|
if done, err := op.srv.forward("Operator.ServerHealth", args, args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This action requires operator read access.
|
||||||
|
rule, err := op.srv.ResolveToken(args.AuthToken)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rule != nil && !rule.AllowOperatorRead() {
|
||||||
|
return structs.ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit early if the min Raft version is too low
|
||||||
|
minRaftProtocol, err := op.srv.autopilot.MinRaftProtocol()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting server raft protocol versions: %s", err)
|
||||||
|
}
|
||||||
|
if minRaftProtocol < 3 {
|
||||||
|
return fmt.Errorf("all servers must have raft_protocol set to 3 or higher to use this endpoint")
|
||||||
|
}
|
||||||
|
|
||||||
|
*reply = op.srv.autopilot.GetClusterHealth()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -225,3 +225,111 @@ func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) {
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOperator_RaftRemovePeerByID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1 := testServer(t, func(c *Config) {
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s1.Shutdown()
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
// Try to remove a peer that's not there.
|
||||||
|
arg := structs.RaftPeerByIDRequest{
|
||||||
|
ID: raft.ServerID("e35bde83-4e9c-434f-a6ef-453f44ee21ea"),
|
||||||
|
}
|
||||||
|
arg.Region = s1.config.Region
|
||||||
|
var reply struct{}
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByID", &arg, &reply)
|
||||||
|
if err == nil || !strings.Contains(err.Error(), "not found in the Raft configuration") {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add it manually to Raft.
|
||||||
|
{
|
||||||
|
future := s1.raft.AddVoter(arg.ID, raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", freeport.GetT(t, 1)[0])), 0, 0)
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure it's there.
|
||||||
|
{
|
||||||
|
future := s1.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
configuration := future.Configuration()
|
||||||
|
if len(configuration.Servers) != 2 {
|
||||||
|
t.Fatalf("bad: %v", configuration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove it, now it should go through.
|
||||||
|
if err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByID", &arg, &reply); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure it's not there.
|
||||||
|
{
|
||||||
|
future := s1.raft.GetConfiguration()
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
configuration := future.Configuration()
|
||||||
|
if len(configuration.Servers) != 1 {
|
||||||
|
t.Fatalf("bad: %v", configuration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s1, root := testACLServer(t, func(c *Config) {
|
||||||
|
c.RaftConfig.ProtocolVersion = 3
|
||||||
|
})
|
||||||
|
defer s1.Shutdown()
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
assert := assert.New(t)
|
||||||
|
state := s1.fsm.State()
|
||||||
|
|
||||||
|
// Create ACL token
|
||||||
|
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid", mock.NodePolicy(acl.PolicyWrite))
|
||||||
|
|
||||||
|
arg := structs.RaftPeerByIDRequest{
|
||||||
|
ID: raft.ServerID("e35bde83-4e9c-434f-a6ef-453f44ee21ea"),
|
||||||
|
}
|
||||||
|
arg.Region = s1.config.Region
|
||||||
|
|
||||||
|
// Add peer manually to Raft.
|
||||||
|
{
|
||||||
|
future := s1.raft.AddVoter(arg.ID, raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", freeport.GetT(t, 1)[0])), 0, 0)
|
||||||
|
assert.Nil(future.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
var reply struct{}
|
||||||
|
|
||||||
|
// Try with no token and expect permission denied
|
||||||
|
{
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByID", &arg, &reply)
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try with an invalid token and expect permission denied
|
||||||
|
{
|
||||||
|
arg.AuthToken = invalidToken.SecretID
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByID", &arg, &reply)
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try with a management token
|
||||||
|
{
|
||||||
|
arg.AuthToken = root.SecretID
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByID", &arg, &reply)
|
||||||
|
assert.Nil(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -184,7 +184,7 @@ func (s *Server) maybeBootstrap() {
|
||||||
// Attempt a live bootstrap!
|
// Attempt a live bootstrap!
|
||||||
var configuration raft.Configuration
|
var configuration raft.Configuration
|
||||||
var addrs []string
|
var addrs []string
|
||||||
minRaftVersion, err := MinRaftProtocol(s.config.Region, members)
|
minRaftVersion, err := s.autopilot.MinRaftProtocol()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] nomad: Failed to read server raft versions: %v", err)
|
s.logger.Printf("[ERR] nomad: Failed to read server raft versions: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
consulapi "github.com/hashicorp/consul/api"
|
consulapi "github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
|
@ -100,6 +101,9 @@ type Server struct {
|
||||||
raftInmem *raft.InmemStore
|
raftInmem *raft.InmemStore
|
||||||
raftTransport *raft.NetworkTransport
|
raftTransport *raft.NetworkTransport
|
||||||
|
|
||||||
|
// autopilot is the Autopilot instance for this server.
|
||||||
|
autopilot *autopilot.Autopilot
|
||||||
|
|
||||||
// fsm is the state machine used with Raft
|
// fsm is the state machine used with Raft
|
||||||
fsm *nomadFSM
|
fsm *nomadFSM
|
||||||
|
|
||||||
|
@ -171,6 +175,10 @@ type Server struct {
|
||||||
leaderAcl string
|
leaderAcl string
|
||||||
leaderAclLock sync.Mutex
|
leaderAclLock sync.Mutex
|
||||||
|
|
||||||
|
// statsFetcher is used by autopilot to check the status of the other
|
||||||
|
// Nomad router.
|
||||||
|
statsFetcher *StatsFetcher
|
||||||
|
|
||||||
// EnterpriseState is used to fill in state for Pro/Ent builds
|
// EnterpriseState is used to fill in state for Pro/Ent builds
|
||||||
EnterpriseState
|
EnterpriseState
|
||||||
|
|
||||||
|
@ -271,6 +279,9 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
||||||
// Create the periodic dispatcher for launching periodic jobs.
|
// Create the periodic dispatcher for launching periodic jobs.
|
||||||
s.periodicDispatcher = NewPeriodicDispatch(s.logger, s)
|
s.periodicDispatcher = NewPeriodicDispatch(s.logger, s)
|
||||||
|
|
||||||
|
// Initialize the stats fetcher that autopilot will use.
|
||||||
|
s.statsFetcher = NewStatsFetcher(logger, s.connPool, s.config.Region)
|
||||||
|
|
||||||
// Setup Vault
|
// Setup Vault
|
||||||
if err := s.setupVaultClient(); err != nil {
|
if err := s.setupVaultClient(); err != nil {
|
||||||
s.Shutdown()
|
s.Shutdown()
|
||||||
|
@ -346,6 +357,9 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI, logger *log.Logg
|
||||||
// Emit metrics
|
// Emit metrics
|
||||||
go s.heartbeatStats()
|
go s.heartbeatStats()
|
||||||
|
|
||||||
|
// Start the server health checking.
|
||||||
|
go s.autopilot.ServerHealthLoop(s.shutdownCh)
|
||||||
|
|
||||||
// Start enterprise background workers
|
// Start enterprise background workers
|
||||||
s.startEnterpriseBackground()
|
s.startEnterpriseBackground()
|
||||||
|
|
||||||
|
@ -425,8 +439,6 @@ func (s *Server) Leave() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (alexdadgar) - This will need to be updated before 0.8 release to
|
|
||||||
// correctly handle using node IDs instead of address when raftProtocol = 3
|
|
||||||
addr := s.raftTransport.LocalAddr()
|
addr := s.raftTransport.LocalAddr()
|
||||||
|
|
||||||
// If we are the current leader, and we have any other peers (cluster has multiple
|
// If we are the current leader, and we have any other peers (cluster has multiple
|
||||||
|
@ -435,9 +447,21 @@ func (s *Server) Leave() error {
|
||||||
// for some sane period of time.
|
// for some sane period of time.
|
||||||
isLeader := s.IsLeader()
|
isLeader := s.IsLeader()
|
||||||
if isLeader && numPeers > 1 {
|
if isLeader && numPeers > 1 {
|
||||||
future := s.raft.RemovePeer(addr)
|
minRaftProtocol, err := s.autopilot.MinRaftProtocol()
|
||||||
if err := future.Error(); err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] nomad: failed to remove ourself as raft peer: %v", err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if minRaftProtocol >= 2 && s.config.RaftConfig.ProtocolVersion >= 3 {
|
||||||
|
future := s.raft.RemoveServer(raft.ServerID(s.config.NodeID), 0, 0)
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
s.logger.Printf("[ERR] nomad: failed to remove ourself as raft peer: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
future := s.raft.RemovePeer(addr)
|
||||||
|
if err := future.Error(); err != nil {
|
||||||
|
s.logger.Printf("[ERR] nomad: failed to remove ourself as raft peer: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -777,6 +801,8 @@ func (s *Server) setupRPC(tlsWrap tlsutil.RegionWrapper) error {
|
||||||
}
|
}
|
||||||
s.rpcListener = list
|
s.rpcListener = list
|
||||||
|
|
||||||
|
s.logger.Printf("[INFO] nomad: RPC listening on %q", s.rpcListener.Addr().String())
|
||||||
|
|
||||||
if s.config.RPCAdvertise != nil {
|
if s.config.RPCAdvertise != nil {
|
||||||
s.rpcAdvertise = s.config.RPCAdvertise
|
s.rpcAdvertise = s.config.RPCAdvertise
|
||||||
} else {
|
} else {
|
||||||
|
@ -935,8 +961,6 @@ func (s *Server) setupRaft() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !hasState {
|
if !hasState {
|
||||||
// TODO (alexdadgar) - This will need to be updated when
|
|
||||||
// we add support for node IDs.
|
|
||||||
configuration := raft.Configuration{
|
configuration := raft.Configuration{
|
||||||
Servers: []raft.Server{
|
Servers: []raft.Server{
|
||||||
{
|
{
|
||||||
|
@ -977,6 +1001,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
|
||||||
conf.Tags["build"] = s.config.Build
|
conf.Tags["build"] = s.config.Build
|
||||||
conf.Tags["raft_vsn"] = fmt.Sprintf("%d", s.config.RaftConfig.ProtocolVersion)
|
conf.Tags["raft_vsn"] = fmt.Sprintf("%d", s.config.RaftConfig.ProtocolVersion)
|
||||||
conf.Tags["id"] = s.config.NodeID
|
conf.Tags["id"] = s.config.NodeID
|
||||||
|
conf.Tags["rpc_addr"] = s.rpcAdvertise.(*net.TCPAddr).IP.String()
|
||||||
conf.Tags["port"] = fmt.Sprintf("%d", s.rpcAdvertise.(*net.TCPAddr).Port)
|
conf.Tags["port"] = fmt.Sprintf("%d", s.rpcAdvertise.(*net.TCPAddr).Port)
|
||||||
if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
|
if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
|
||||||
conf.Tags["bootstrap"] = "1"
|
conf.Tags["bootstrap"] = "1"
|
||||||
|
@ -985,6 +1010,9 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
|
||||||
if bootstrapExpect != 0 {
|
if bootstrapExpect != 0 {
|
||||||
conf.Tags["expect"] = fmt.Sprintf("%d", bootstrapExpect)
|
conf.Tags["expect"] = fmt.Sprintf("%d", bootstrapExpect)
|
||||||
}
|
}
|
||||||
|
if s.config.NonVoter {
|
||||||
|
conf.Tags["nonvoter"] = "1"
|
||||||
|
}
|
||||||
conf.MemberlistConfig.LogOutput = s.config.LogOutput
|
conf.MemberlistConfig.LogOutput = s.config.LogOutput
|
||||||
conf.LogOutput = s.config.LogOutput
|
conf.LogOutput = s.config.LogOutput
|
||||||
conf.EventCh = ch
|
conf.EventCh = ch
|
||||||
|
|
|
@ -2,9 +2,15 @@
|
||||||
|
|
||||||
package nomad
|
package nomad
|
||||||
|
|
||||||
|
import "github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
|
||||||
type EnterpriseState struct{}
|
type EnterpriseState struct{}
|
||||||
|
|
||||||
func (s *Server) setupEnterprise(config *Config) error {
|
func (s *Server) setupEnterprise(config *Config) error {
|
||||||
|
// Set up the OSS version of autopilot
|
||||||
|
apDelegate := &AutopilotDelegate{s}
|
||||||
|
s.autopilot = autopilot.NewAutopilot(s.logger, apDelegate, config.AutopilotInterval, config.ServerHealthInterval)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ func testACLServer(t *testing.T, cb func(*Config)) (*Server, *structs.ACLToken)
|
||||||
func testServer(t *testing.T, cb func(*Config)) *Server {
|
func testServer(t *testing.T, cb func(*Config)) *Server {
|
||||||
// Setup the default settings
|
// Setup the default settings
|
||||||
config := DefaultConfig()
|
config := DefaultConfig()
|
||||||
config.Build = "0.7.0+unittest"
|
config.Build = "0.8.0+unittest"
|
||||||
config.DevMode = true
|
config.DevMode = true
|
||||||
nodeNum := atomic.AddUint32(&nodeNumber, 1)
|
nodeNum := atomic.AddUint32(&nodeNumber, 1)
|
||||||
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
|
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
|
||||||
|
@ -74,6 +74,11 @@ func testServer(t *testing.T, cb func(*Config)) *Server {
|
||||||
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
|
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
|
||||||
config.RaftTimeout = 500 * time.Millisecond
|
config.RaftTimeout = 500 * time.Millisecond
|
||||||
|
|
||||||
|
// Tighten the autopilot timing
|
||||||
|
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
|
||||||
|
config.ServerHealthInterval = 50 * time.Millisecond
|
||||||
|
config.AutopilotInterval = 100 * time.Millisecond
|
||||||
|
|
||||||
// Disable Vault
|
// Disable Vault
|
||||||
f := false
|
f := false
|
||||||
config.VaultConfig.Enabled = &f
|
config.VaultConfig.Enabled = &f
|
||||||
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/go-memdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// autopilotConfigTableSchema returns a new table schema used for storing
|
||||||
|
// the autopilot configuration
|
||||||
|
func autopilotConfigTableSchema() *memdb.TableSchema {
|
||||||
|
return &memdb.TableSchema{
|
||||||
|
Name: "autopilot-config",
|
||||||
|
Indexes: map[string]*memdb.IndexSchema{
|
||||||
|
"id": {
|
||||||
|
Name: "id",
|
||||||
|
AllowMissing: true,
|
||||||
|
Unique: true,
|
||||||
|
Indexer: &memdb.ConditionalIndex{
|
||||||
|
Conditional: func(obj interface{}) (bool, error) { return true, nil },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotConfig is used to get the current Autopilot configuration.
|
||||||
|
func (s *StateStore) AutopilotConfig() (uint64, *autopilot.Config, error) {
|
||||||
|
tx := s.db.Txn(false)
|
||||||
|
defer tx.Abort()
|
||||||
|
|
||||||
|
// Get the autopilot config
|
||||||
|
c, err := tx.First("autopilot-config", "id")
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("failed autopilot config lookup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config, ok := c.(*autopilot.Config)
|
||||||
|
if !ok {
|
||||||
|
return 0, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return config.ModifyIndex, config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotSetConfig is used to set the current Autopilot configuration.
|
||||||
|
func (s *StateStore) AutopilotSetConfig(idx uint64, config *autopilot.Config) error {
|
||||||
|
tx := s.db.Txn(true)
|
||||||
|
defer tx.Abort()
|
||||||
|
|
||||||
|
s.autopilotSetConfigTxn(idx, tx, config)
|
||||||
|
|
||||||
|
tx.Commit()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotCASConfig is used to try updating the Autopilot configuration with a
|
||||||
|
// given Raft index. If the CAS index specified is not equal to the last observed index
|
||||||
|
// for the config, then the call is a noop,
|
||||||
|
func (s *StateStore) AutopilotCASConfig(idx, cidx uint64, config *autopilot.Config) (bool, error) {
|
||||||
|
tx := s.db.Txn(true)
|
||||||
|
defer tx.Abort()
|
||||||
|
|
||||||
|
// Check for an existing config
|
||||||
|
existing, err := tx.First("autopilot-config", "id")
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed autopilot config lookup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the existing index does not match the provided CAS
|
||||||
|
// index arg, then we shouldn't update anything and can safely
|
||||||
|
// return early here.
|
||||||
|
e, ok := existing.(*autopilot.Config)
|
||||||
|
if !ok || e.ModifyIndex != cidx {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.autopilotSetConfigTxn(idx, tx, config)
|
||||||
|
|
||||||
|
tx.Commit()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StateStore) autopilotSetConfigTxn(idx uint64, tx *memdb.Txn, config *autopilot.Config) error {
|
||||||
|
// Check for an existing config
|
||||||
|
existing, err := tx.First("autopilot-config", "id")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed autopilot config lookup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the indexes.
|
||||||
|
if existing != nil {
|
||||||
|
config.CreateIndex = existing.(*autopilot.Config).CreateIndex
|
||||||
|
} else {
|
||||||
|
config.CreateIndex = idx
|
||||||
|
}
|
||||||
|
config.ModifyIndex = idx
|
||||||
|
|
||||||
|
if err := tx.Insert("autopilot-config", config); err != nil {
|
||||||
|
return fmt.Errorf("failed updating autopilot config: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStateStore_Autopilot(t *testing.T) {
|
||||||
|
s := testStateStore(t)
|
||||||
|
|
||||||
|
expected := &autopilot.Config{
|
||||||
|
CleanupDeadServers: true,
|
||||||
|
LastContactThreshold: 5 * time.Second,
|
||||||
|
MaxTrailingLogs: 500,
|
||||||
|
ServerStabilizationTime: 100 * time.Second,
|
||||||
|
RedundancyZoneTag: "az",
|
||||||
|
DisableUpgradeMigration: true,
|
||||||
|
UpgradeVersionTag: "build",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.AutopilotSetConfig(0, expected); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx, config, err := s.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if idx != 0 {
|
||||||
|
t.Fatalf("bad: %d", idx)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(expected, config) {
|
||||||
|
t.Fatalf("bad: %#v, %#v", expected, config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStateStore_AutopilotCAS(t *testing.T) {
|
||||||
|
s := testStateStore(t)
|
||||||
|
|
||||||
|
expected := &autopilot.Config{
|
||||||
|
CleanupDeadServers: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.AutopilotSetConfig(0, expected); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := s.AutopilotSetConfig(1, expected); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a CAS with an index lower than the entry
|
||||||
|
ok, err := s.AutopilotCASConfig(2, 0, &autopilot.Config{
|
||||||
|
CleanupDeadServers: false,
|
||||||
|
})
|
||||||
|
if ok || err != nil {
|
||||||
|
t.Fatalf("expected (false, nil), got: (%v, %#v)", ok, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the index is untouched and the entry
|
||||||
|
// has not been updated.
|
||||||
|
idx, config, err := s.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if idx != 1 {
|
||||||
|
t.Fatalf("bad: %d", idx)
|
||||||
|
}
|
||||||
|
if !config.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do another CAS, this time with the correct index
|
||||||
|
ok, err = s.AutopilotCASConfig(2, 1, &autopilot.Config{
|
||||||
|
CleanupDeadServers: false,
|
||||||
|
})
|
||||||
|
if !ok || err != nil {
|
||||||
|
t.Fatalf("expected (true, nil), got: (%v, %#v)", ok, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the config was updated
|
||||||
|
idx, config, err = s.AutopilotConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if idx != 2 {
|
||||||
|
t.Fatalf("bad: %d", idx)
|
||||||
|
}
|
||||||
|
if config.CleanupDeadServers {
|
||||||
|
t.Fatalf("bad: %#v", config)
|
||||||
|
}
|
||||||
|
}
|
|
@ -43,6 +43,7 @@ func init() {
|
||||||
vaultAccessorTableSchema,
|
vaultAccessorTableSchema,
|
||||||
aclPolicyTableSchema,
|
aclPolicyTableSchema,
|
||||||
aclTokenTableSchema,
|
aclTokenTableSchema,
|
||||||
|
autopilotConfigTableSchema,
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatsFetcher has two functions for autopilot. First, lets us fetch all the
|
||||||
|
// stats in parallel so we are taking a sample as close to the same time as
|
||||||
|
// possible, since we are comparing time-sensitive info for the health check.
|
||||||
|
// Second, it bounds the time so that one slow RPC can't hold up the health
|
||||||
|
// check loop; as a side effect of how it implements this, it also limits to
|
||||||
|
// a single in-flight RPC to any given server, so goroutines don't accumulate
|
||||||
|
// as we run the health check fairly frequently.
|
||||||
|
type StatsFetcher struct {
|
||||||
|
logger *log.Logger
|
||||||
|
pool *ConnPool
|
||||||
|
region string
|
||||||
|
inflight map[string]struct{}
|
||||||
|
inflightLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatsFetcher returns a stats fetcher.
|
||||||
|
func NewStatsFetcher(logger *log.Logger, pool *ConnPool, region string) *StatsFetcher {
|
||||||
|
return &StatsFetcher{
|
||||||
|
logger: logger,
|
||||||
|
pool: pool,
|
||||||
|
region: region,
|
||||||
|
inflight: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch does the RPC to fetch the server stats from a single server. We don't
|
||||||
|
// cancel this when the context is canceled because we only want one in-flight
|
||||||
|
// RPC to each server, so we let it finish and then clean up the in-flight
|
||||||
|
// tracking.
|
||||||
|
func (f *StatsFetcher) fetch(server *serverParts, replyCh chan *autopilot.ServerStats) {
|
||||||
|
var args struct{}
|
||||||
|
var reply autopilot.ServerStats
|
||||||
|
err := f.pool.RPC(f.region, server.RPCAddr, server.MajorVersion, "Status.RaftStats", &args, &reply)
|
||||||
|
if err != nil {
|
||||||
|
f.logger.Printf("[WARN] nomad: error getting server health from %q: %v",
|
||||||
|
server.Name, err)
|
||||||
|
} else {
|
||||||
|
replyCh <- &reply
|
||||||
|
}
|
||||||
|
|
||||||
|
f.inflightLock.Lock()
|
||||||
|
delete(f.inflight, server.ID)
|
||||||
|
f.inflightLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch will attempt to query all the servers in parallel.
|
||||||
|
func (f *StatsFetcher) Fetch(ctx context.Context, members []serf.Member) map[string]*autopilot.ServerStats {
|
||||||
|
type workItem struct {
|
||||||
|
server *serverParts
|
||||||
|
replyCh chan *autopilot.ServerStats
|
||||||
|
}
|
||||||
|
var servers []*serverParts
|
||||||
|
for _, s := range members {
|
||||||
|
if ok, parts := isNomadServer(s); ok {
|
||||||
|
servers = append(servers, parts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip any servers that have inflight requests.
|
||||||
|
var work []*workItem
|
||||||
|
f.inflightLock.Lock()
|
||||||
|
for _, server := range servers {
|
||||||
|
if _, ok := f.inflight[server.ID]; ok {
|
||||||
|
f.logger.Printf("[WARN] nomad: error getting server health from %q: last request still outstanding",
|
||||||
|
server.Name)
|
||||||
|
} else {
|
||||||
|
workItem := &workItem{
|
||||||
|
server: server,
|
||||||
|
replyCh: make(chan *autopilot.ServerStats, 1),
|
||||||
|
}
|
||||||
|
work = append(work, workItem)
|
||||||
|
f.inflight[server.ID] = struct{}{}
|
||||||
|
go f.fetch(workItem.server, workItem.replyCh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.inflightLock.Unlock()
|
||||||
|
|
||||||
|
// Now wait for the results to come in, or for the context to be
|
||||||
|
// canceled.
|
||||||
|
replies := make(map[string]*autopilot.ServerStats)
|
||||||
|
for _, workItem := range work {
|
||||||
|
select {
|
||||||
|
case reply := <-workItem.replyCh:
|
||||||
|
replies[workItem.server.ID] = reply
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
f.logger.Printf("[WARN] nomad: error getting server health from %q: %v",
|
||||||
|
workItem.server.Name, ctx.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return replies
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStatsFetcher(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
conf := func(c *Config) {
|
||||||
|
c.Region = "region-a"
|
||||||
|
c.DevDisableBootstrap = true
|
||||||
|
c.BootstrapExpect = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
s1 := testServer(t, conf)
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
s2 := testServer(t, conf)
|
||||||
|
defer s2.Shutdown()
|
||||||
|
|
||||||
|
s3 := testServer(t, conf)
|
||||||
|
defer s3.Shutdown()
|
||||||
|
|
||||||
|
testJoin(t, s1, s2, s3)
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
members := s1.serf.Members()
|
||||||
|
if len(members) != 3 {
|
||||||
|
t.Fatalf("bad len: %d", len(members))
|
||||||
|
}
|
||||||
|
|
||||||
|
var servers []*serverParts
|
||||||
|
for _, member := range members {
|
||||||
|
ok, server := isNomadServer(member)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("bad: %#v", member)
|
||||||
|
}
|
||||||
|
servers = append(servers, server)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a normal fetch and make sure we get three responses.
|
||||||
|
func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
stats := s1.statsFetcher.Fetch(ctx, s1.Members())
|
||||||
|
if len(stats) != 3 {
|
||||||
|
t.Fatalf("bad: %#v", stats)
|
||||||
|
}
|
||||||
|
for id, stat := range stats {
|
||||||
|
switch id {
|
||||||
|
case s1.config.NodeID, s2.config.NodeID, s3.config.NodeID:
|
||||||
|
// OK
|
||||||
|
default:
|
||||||
|
t.Fatalf("bad: %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat == nil || stat.LastTerm == 0 {
|
||||||
|
t.Fatalf("bad: %#v", stat)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Fake an in-flight request to server 3 and make sure we don't fetch
|
||||||
|
// from it.
|
||||||
|
func() {
|
||||||
|
s1.statsFetcher.inflight[string(s3.config.NodeID)] = struct{}{}
|
||||||
|
defer delete(s1.statsFetcher.inflight, string(s3.config.NodeID))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
stats := s1.statsFetcher.Fetch(ctx, s1.Members())
|
||||||
|
if len(stats) != 2 {
|
||||||
|
t.Fatalf("bad: %#v", stats)
|
||||||
|
}
|
||||||
|
for id, stat := range stats {
|
||||||
|
switch id {
|
||||||
|
case s1.config.NodeID, s2.config.NodeID:
|
||||||
|
// OK
|
||||||
|
case s3.config.NodeID:
|
||||||
|
t.Fatalf("bad")
|
||||||
|
default:
|
||||||
|
t.Fatalf("bad: %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat == nil || stat.LastTerm == 0 {
|
||||||
|
t.Fatalf("bad: %#v", stat)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
|
@ -1,6 +1,10 @@
|
||||||
package nomad
|
package nomad
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/nomad/nomad/structs"
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,3 +108,21 @@ func (s *Status) Members(args *structs.GenericRequest, reply *structs.ServerMemb
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Used by Autopilot to query the raft stats of the local server.
|
||||||
|
func (s *Status) RaftStats(args struct{}, reply *autopilot.ServerStats) error {
|
||||||
|
stats := s.srv.raft.Stats()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
reply.LastContact = stats["last_contact"]
|
||||||
|
reply.LastIndex, err = strconv.ParseUint(stats["last_log_index"], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing server's last_log_index value: %s", err)
|
||||||
|
}
|
||||||
|
reply.LastTerm, err = strconv.ParseUint(stats["last_log_term"], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing server's last_log_term value: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/helper"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AutopilotConfig struct {
|
||||||
|
// CleanupDeadServers controls whether to remove dead servers when a new
|
||||||
|
// server is added to the Raft peers.
|
||||||
|
CleanupDeadServers *bool `mapstructure:"cleanup_dead_servers"`
|
||||||
|
|
||||||
|
// ServerStabilizationTime is the minimum amount of time a server must be
|
||||||
|
// in a stable, healthy state before it can be added to the cluster. Only
|
||||||
|
// applicable with Raft protocol version 3 or higher.
|
||||||
|
ServerStabilizationTime time.Duration `mapstructure:"server_stabilization_time"`
|
||||||
|
|
||||||
|
// LastContactThreshold is the limit on the amount of time a server can go
|
||||||
|
// without leader contact before being considered unhealthy.
|
||||||
|
LastContactThreshold time.Duration `mapstructure:"last_contact_threshold"`
|
||||||
|
|
||||||
|
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
|
||||||
|
// be behind before being considered unhealthy.
|
||||||
|
MaxTrailingLogs int `mapstructure:"max_trailing_logs"`
|
||||||
|
|
||||||
|
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
|
||||||
|
// servers into zones for redundancy. If left blank, this feature will be disabled.
|
||||||
|
RedundancyZoneTag string `mapstructure:"redundancy_zone_tag"`
|
||||||
|
|
||||||
|
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
|
||||||
|
// strategy of waiting until enough newer-versioned servers have been added to the
|
||||||
|
// cluster before promoting them to voters.
|
||||||
|
DisableUpgradeMigration *bool `mapstructure:"disable_upgrade_migration"`
|
||||||
|
|
||||||
|
// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
|
||||||
|
// performing upgrade migrations. If left blank, the Nomad version will be used.
|
||||||
|
UpgradeVersionTag string `mapstructure:"upgrade_version_tag"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAutopilotConfig() returns the canonical defaults for the Nomad
|
||||||
|
// `autopilot` configuration.
|
||||||
|
func DefaultAutopilotConfig() *AutopilotConfig {
|
||||||
|
return &AutopilotConfig{
|
||||||
|
CleanupDeadServers: helper.BoolToPtr(true),
|
||||||
|
LastContactThreshold: 200 * time.Millisecond,
|
||||||
|
MaxTrailingLogs: 250,
|
||||||
|
ServerStabilizationTime: 10 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AutopilotConfig) Merge(b *AutopilotConfig) *AutopilotConfig {
|
||||||
|
result := a.Copy()
|
||||||
|
|
||||||
|
if b.CleanupDeadServers != nil {
|
||||||
|
result.CleanupDeadServers = helper.BoolToPtr(*b.CleanupDeadServers)
|
||||||
|
}
|
||||||
|
if b.ServerStabilizationTime != 0 {
|
||||||
|
result.ServerStabilizationTime = b.ServerStabilizationTime
|
||||||
|
}
|
||||||
|
if b.LastContactThreshold != 0 {
|
||||||
|
result.LastContactThreshold = b.LastContactThreshold
|
||||||
|
}
|
||||||
|
if b.MaxTrailingLogs != 0 {
|
||||||
|
result.MaxTrailingLogs = b.MaxTrailingLogs
|
||||||
|
}
|
||||||
|
if b.RedundancyZoneTag != "" {
|
||||||
|
result.RedundancyZoneTag = b.RedundancyZoneTag
|
||||||
|
}
|
||||||
|
if b.DisableUpgradeMigration != nil {
|
||||||
|
result.DisableUpgradeMigration = helper.BoolToPtr(*b.DisableUpgradeMigration)
|
||||||
|
}
|
||||||
|
if b.UpgradeVersionTag != "" {
|
||||||
|
result.UpgradeVersionTag = b.UpgradeVersionTag
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a copy of this Autopilot config.
|
||||||
|
func (a *AutopilotConfig) Copy() *AutopilotConfig {
|
||||||
|
if a == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nc := new(AutopilotConfig)
|
||||||
|
*nc = *a
|
||||||
|
|
||||||
|
// Copy the bools
|
||||||
|
if a.CleanupDeadServers != nil {
|
||||||
|
nc.CleanupDeadServers = helper.BoolToPtr(*a.CleanupDeadServers)
|
||||||
|
}
|
||||||
|
if a.DisableUpgradeMigration != nil {
|
||||||
|
nc.DisableUpgradeMigration = helper.BoolToPtr(*a.DisableUpgradeMigration)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nc
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAutopilotConfig_Merge(t *testing.T) {
|
||||||
|
trueValue, falseValue := true, false
|
||||||
|
|
||||||
|
c1 := &AutopilotConfig{
|
||||||
|
CleanupDeadServers: &falseValue,
|
||||||
|
ServerStabilizationTime: 1 * time.Second,
|
||||||
|
LastContactThreshold: 1 * time.Second,
|
||||||
|
MaxTrailingLogs: 1,
|
||||||
|
RedundancyZoneTag: "1",
|
||||||
|
DisableUpgradeMigration: &falseValue,
|
||||||
|
UpgradeVersionTag: "1",
|
||||||
|
}
|
||||||
|
|
||||||
|
c2 := &AutopilotConfig{
|
||||||
|
CleanupDeadServers: &trueValue,
|
||||||
|
ServerStabilizationTime: 2 * time.Second,
|
||||||
|
LastContactThreshold: 2 * time.Second,
|
||||||
|
MaxTrailingLogs: 2,
|
||||||
|
RedundancyZoneTag: "2",
|
||||||
|
DisableUpgradeMigration: nil,
|
||||||
|
UpgradeVersionTag: "2",
|
||||||
|
}
|
||||||
|
|
||||||
|
e := &AutopilotConfig{
|
||||||
|
CleanupDeadServers: &trueValue,
|
||||||
|
ServerStabilizationTime: 2 * time.Second,
|
||||||
|
LastContactThreshold: 2 * time.Second,
|
||||||
|
MaxTrailingLogs: 2,
|
||||||
|
RedundancyZoneTag: "2",
|
||||||
|
DisableUpgradeMigration: &falseValue,
|
||||||
|
UpgradeVersionTag: "2",
|
||||||
|
}
|
||||||
|
|
||||||
|
result := c1.Merge(c2)
|
||||||
|
if !reflect.DeepEqual(result, e) {
|
||||||
|
t.Fatalf("bad:\n%#v\n%#v", result, e)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package structs
|
package structs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -50,3 +51,34 @@ type RaftPeerByAddressRequest struct {
|
||||||
// WriteRequest holds the Region for this request.
|
// WriteRequest holds the Region for this request.
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RaftPeerByIDRequest is used by the Operator endpoint to apply a Raft
|
||||||
|
// operation on a specific Raft peer by ID.
|
||||||
|
type RaftPeerByIDRequest struct {
|
||||||
|
// ID is the peer ID to remove.
|
||||||
|
ID raft.ServerID
|
||||||
|
|
||||||
|
// WriteRequest holds the Region for this request.
|
||||||
|
WriteRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotSetConfigRequest is used by the Operator endpoint to update the
|
||||||
|
// current Autopilot configuration of the cluster.
|
||||||
|
type AutopilotSetConfigRequest struct {
|
||||||
|
// Datacenter is the target this request is intended for.
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// Config is the new Autopilot configuration to use.
|
||||||
|
Config autopilot.Config
|
||||||
|
|
||||||
|
// CAS controls whether to use check-and-set semantics for this request.
|
||||||
|
CAS bool
|
||||||
|
|
||||||
|
// WriteRequest holds the ACL token to go along with this request.
|
||||||
|
WriteRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestDatacenter returns the datacenter for a given request.
|
||||||
|
func (op *AutopilotSetConfigRequest) RequestDatacenter() string {
|
||||||
|
return op.Datacenter
|
||||||
|
}
|
||||||
|
|
|
@ -78,6 +78,7 @@ const (
|
||||||
ACLTokenUpsertRequestType
|
ACLTokenUpsertRequestType
|
||||||
ACLTokenDeleteRequestType
|
ACLTokenDeleteRequestType
|
||||||
ACLTokenBootstrapRequestType
|
ACLTokenBootstrapRequestType
|
||||||
|
AutopilotRequestType
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -46,7 +46,9 @@ type serverParts struct {
|
||||||
MinorVersion int
|
MinorVersion int
|
||||||
Build version.Version
|
Build version.Version
|
||||||
RaftVersion int
|
RaftVersion int
|
||||||
|
NonVoter bool
|
||||||
Addr net.Addr
|
Addr net.Addr
|
||||||
|
RPCAddr net.Addr
|
||||||
Status serf.MemberStatus
|
Status serf.MemberStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,24 +71,31 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
|
||||||
region := m.Tags["region"]
|
region := m.Tags["region"]
|
||||||
datacenter := m.Tags["dc"]
|
datacenter := m.Tags["dc"]
|
||||||
_, bootstrap := m.Tags["bootstrap"]
|
_, bootstrap := m.Tags["bootstrap"]
|
||||||
|
_, nonVoter := m.Tags["nonvoter"]
|
||||||
|
|
||||||
expect := 0
|
expect := 0
|
||||||
expect_str, ok := m.Tags["expect"]
|
expectStr, ok := m.Tags["expect"]
|
||||||
var err error
|
var err error
|
||||||
if ok {
|
if ok {
|
||||||
expect, err = strconv.Atoi(expect_str)
|
expect, err = strconv.Atoi(expectStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
port_str := m.Tags["port"]
|
// If the server is missing the rpc_addr tag, default to the serf advertise addr
|
||||||
port, err := strconv.Atoi(port_str)
|
rpcIP := net.ParseIP(m.Tags["rpc_addr"])
|
||||||
|
if rpcIP == nil {
|
||||||
|
rpcIP = m.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
portStr := m.Tags["port"]
|
||||||
|
port, err := strconv.Atoi(portStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
build_version, err := version.NewVersion(m.Tags["build"])
|
buildVersion, err := version.NewVersion(m.Tags["build"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -106,16 +115,17 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
|
||||||
minorVersion = 0
|
minorVersion = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
raft_vsn := 0
|
raftVsn := 0
|
||||||
raft_vsn_str, ok := m.Tags["raft_vsn"]
|
raftVsnString, ok := m.Tags["raft_vsn"]
|
||||||
if ok {
|
if ok {
|
||||||
raft_vsn, err = strconv.Atoi(raft_vsn_str)
|
raftVsn, err = strconv.Atoi(raftVsnString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := &net.TCPAddr{IP: m.Addr, Port: port}
|
addr := &net.TCPAddr{IP: m.Addr, Port: port}
|
||||||
|
rpcAddr := &net.TCPAddr{IP: rpcIP, Port: port}
|
||||||
parts := &serverParts{
|
parts := &serverParts{
|
||||||
Name: m.Name,
|
Name: m.Name,
|
||||||
ID: id,
|
ID: id,
|
||||||
|
@ -125,10 +135,12 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
|
||||||
Bootstrap: bootstrap,
|
Bootstrap: bootstrap,
|
||||||
Expect: expect,
|
Expect: expect,
|
||||||
Addr: addr,
|
Addr: addr,
|
||||||
|
RPCAddr: rpcAddr,
|
||||||
MajorVersion: majorVersion,
|
MajorVersion: majorVersion,
|
||||||
MinorVersion: minorVersion,
|
MinorVersion: minorVersion,
|
||||||
Build: *build_version,
|
Build: *buildVersion,
|
||||||
RaftVersion: raft_vsn,
|
RaftVersion: raftVsn,
|
||||||
|
NonVoter: nonVoter,
|
||||||
Status: m.Status,
|
Status: m.Status,
|
||||||
}
|
}
|
||||||
return true, parts
|
return true, parts
|
||||||
|
@ -139,7 +151,10 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
|
||||||
func ServersMeetMinimumVersion(members []serf.Member, minVersion *version.Version) bool {
|
func ServersMeetMinimumVersion(members []serf.Member, minVersion *version.Version) bool {
|
||||||
for _, member := range members {
|
for _, member := range members {
|
||||||
if valid, parts := isNomadServer(member); valid && parts.Status == serf.StatusAlive {
|
if valid, parts := isNomadServer(member); valid && parts.Status == serf.StatusAlive {
|
||||||
if parts.Build.LessThan(minVersion) {
|
// Check if the versions match - version.LessThan will return true for
|
||||||
|
// 0.8.0-rc1 < 0.8.0, so we want to ignore the metadata
|
||||||
|
versionsMatch := slicesMatch(minVersion.Segments(), parts.Build.Segments())
|
||||||
|
if parts.Build.LessThan(minVersion) && !versionsMatch {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -148,34 +163,26 @@ func ServersMeetMinimumVersion(members []serf.Member, minVersion *version.Versio
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// MinRaftProtocol returns the lowest supported Raft protocol among alive servers
|
func slicesMatch(a, b []int) bool {
|
||||||
// in the given region.
|
if a == nil && b == nil {
|
||||||
func MinRaftProtocol(region string, members []serf.Member) (int, error) {
|
return true
|
||||||
minVersion := -1
|
}
|
||||||
for _, m := range members {
|
|
||||||
if m.Tags["role"] != "nomad" || m.Tags["region"] != region || m.Status != serf.StatusAlive {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
vsn, ok := m.Tags["raft_vsn"]
|
if a == nil || b == nil {
|
||||||
if !ok {
|
return false
|
||||||
vsn = "1"
|
}
|
||||||
}
|
|
||||||
raftVsn, err := strconv.Atoi(vsn)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if minVersion == -1 || raftVsn < minVersion {
|
if len(a) != len(b) {
|
||||||
minVersion = raftVsn
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if minVersion == -1 {
|
return true
|
||||||
return minVersion, fmt.Errorf("no servers found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return minVersion, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shuffleStrings randomly shuffles the list of strings
|
// shuffleStrings randomly shuffles the list of strings
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package nomad
|
package nomad
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -18,12 +17,15 @@ func TestIsNomadServer(t *testing.T) {
|
||||||
Addr: net.IP([]byte{127, 0, 0, 1}),
|
Addr: net.IP([]byte{127, 0, 0, 1}),
|
||||||
Status: serf.StatusAlive,
|
Status: serf.StatusAlive,
|
||||||
Tags: map[string]string{
|
Tags: map[string]string{
|
||||||
"role": "nomad",
|
"role": "nomad",
|
||||||
"region": "aws",
|
"region": "aws",
|
||||||
"dc": "east-aws",
|
"dc": "east-aws",
|
||||||
"port": "10000",
|
"rpc_addr": "1.1.1.1",
|
||||||
"vsn": "1",
|
"port": "10000",
|
||||||
"build": "0.7.0+ent",
|
"vsn": "1",
|
||||||
|
"raft_vsn": "2",
|
||||||
|
"nonvoter": "1",
|
||||||
|
"build": "0.7.0+ent",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
valid, parts := isNomadServer(m)
|
valid, parts := isNomadServer(m)
|
||||||
|
@ -43,6 +45,15 @@ func TestIsNomadServer(t *testing.T) {
|
||||||
if parts.Status != serf.StatusAlive {
|
if parts.Status != serf.StatusAlive {
|
||||||
t.Fatalf("bad: %v", parts.Status)
|
t.Fatalf("bad: %v", parts.Status)
|
||||||
}
|
}
|
||||||
|
if parts.RaftVersion != 2 {
|
||||||
|
t.Fatalf("bad: %v", parts.RaftVersion)
|
||||||
|
}
|
||||||
|
if parts.RPCAddr.String() != "1.1.1.1:10000" {
|
||||||
|
t.Fatalf("bad: %v", parts.RPCAddr.String())
|
||||||
|
}
|
||||||
|
if !parts.NonVoter {
|
||||||
|
t.Fatalf("bad: %v", parts.NonVoter)
|
||||||
|
}
|
||||||
if seg := parts.Build.Segments(); len(seg) != 3 {
|
if seg := parts.Build.Segments(); len(seg) != 3 {
|
||||||
t.Fatalf("bad: %v", parts.Build)
|
t.Fatalf("bad: %v", parts.Build)
|
||||||
} else if seg[0] != 0 && seg[1] != 7 && seg[2] != 0 {
|
} else if seg[0] != 0 && seg[1] != 7 && seg[2] != 0 {
|
||||||
|
@ -152,105 +163,6 @@ func TestServersMeetMinimumVersion(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMinRaftProtocol(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
makeMember := func(version, region string) serf.Member {
|
|
||||||
return serf.Member{
|
|
||||||
Name: "foo",
|
|
||||||
Addr: net.IP([]byte{127, 0, 0, 1}),
|
|
||||||
Tags: map[string]string{
|
|
||||||
"role": "nomad",
|
|
||||||
"region": region,
|
|
||||||
"dc": "dc1",
|
|
||||||
"port": "10000",
|
|
||||||
"vsn": "1",
|
|
||||||
"raft_vsn": version,
|
|
||||||
},
|
|
||||||
Status: serf.StatusAlive,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
members []serf.Member
|
|
||||||
region string
|
|
||||||
expected int
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
// No servers, error
|
|
||||||
{
|
|
||||||
members: []serf.Member{},
|
|
||||||
expected: -1,
|
|
||||||
err: errors.New("no servers found"),
|
|
||||||
},
|
|
||||||
// One server
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("1", "global"),
|
|
||||||
},
|
|
||||||
region: "global",
|
|
||||||
expected: 1,
|
|
||||||
},
|
|
||||||
// One server, bad version formatting
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("asdf", "global"),
|
|
||||||
},
|
|
||||||
region: "global",
|
|
||||||
expected: -1,
|
|
||||||
err: errors.New(`strconv.Atoi: parsing "asdf": invalid syntax`),
|
|
||||||
},
|
|
||||||
// One server, wrong datacenter
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("1", "global"),
|
|
||||||
},
|
|
||||||
region: "nope",
|
|
||||||
expected: -1,
|
|
||||||
err: errors.New("no servers found"),
|
|
||||||
},
|
|
||||||
// Multiple servers, different versions
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("1", "global"),
|
|
||||||
makeMember("2", "global"),
|
|
||||||
},
|
|
||||||
region: "global",
|
|
||||||
expected: 1,
|
|
||||||
},
|
|
||||||
// Multiple servers, same version
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("2", "global"),
|
|
||||||
makeMember("2", "global"),
|
|
||||||
},
|
|
||||||
region: "global",
|
|
||||||
expected: 2,
|
|
||||||
},
|
|
||||||
// Multiple servers, multiple datacenters
|
|
||||||
{
|
|
||||||
members: []serf.Member{
|
|
||||||
makeMember("3", "r1"),
|
|
||||||
makeMember("2", "r1"),
|
|
||||||
makeMember("1", "r2"),
|
|
||||||
},
|
|
||||||
region: "r1",
|
|
||||||
expected: 2,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
result, err := MinRaftProtocol(tc.region, tc.members)
|
|
||||||
if result != tc.expected {
|
|
||||||
t.Fatalf("bad: %v, %v, %v", result, tc.expected, tc)
|
|
||||||
}
|
|
||||||
if tc.err != nil {
|
|
||||||
if err == nil || tc.err.Error() != err.Error() {
|
|
||||||
t.Fatalf("bad: %v, %v, %v", err, tc.err, tc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShuffleStrings(t *testing.T) {
|
func TestShuffleStrings(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
// Generate input
|
// Generate input
|
||||||
|
|
|
@ -62,6 +62,7 @@ type PortsConfig struct {
|
||||||
type ServerConfig struct {
|
type ServerConfig struct {
|
||||||
Enabled bool `json:"enabled"`
|
Enabled bool `json:"enabled"`
|
||||||
BootstrapExpect int `json:"bootstrap_expect"`
|
BootstrapExpect int `json:"bootstrap_expect"`
|
||||||
|
RaftProtocol int `json:"raft_protocol,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConfig is used to configure the client
|
// ClientConfig is used to configure the client
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
printWidth: 100
|
||||||
|
singleQuote: true
|
||||||
|
trailingComma: es5
|
|
@ -1,21 +0,0 @@
|
||||||
---
|
|
||||||
language: node_js
|
|
||||||
node_js:
|
|
||||||
- "6"
|
|
||||||
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- $HOME/.npm
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- npm config set spin false
|
|
||||||
- npm install -g phantomjs-prebuilt
|
|
||||||
- phantomjs --version
|
|
||||||
|
|
||||||
install:
|
|
||||||
- npm install
|
|
||||||
|
|
||||||
script:
|
|
||||||
- npm test
|
|
|
@ -1,15 +1,14 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import { computed, get } from '@ember/object';
|
||||||
import RESTAdapter from 'ember-data/adapters/rest';
|
import RESTAdapter from 'ember-data/adapters/rest';
|
||||||
import codesForError from '../utils/codes-for-error';
|
import codesForError from '../utils/codes-for-error';
|
||||||
|
|
||||||
const { get, computed, inject } = Ember;
|
|
||||||
|
|
||||||
export const namespace = 'v1';
|
export const namespace = 'v1';
|
||||||
|
|
||||||
export default RESTAdapter.extend({
|
export default RESTAdapter.extend({
|
||||||
namespace,
|
namespace,
|
||||||
|
|
||||||
token: inject.service(),
|
token: service(),
|
||||||
|
|
||||||
headers: computed('token.secret', function() {
|
headers: computed('token.secret', function() {
|
||||||
const token = this.get('token.secret');
|
const token = this.get('token.secret');
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import RSVP from 'rsvp';
|
||||||
|
import { assign } from '@ember/polyfills';
|
||||||
import ApplicationAdapter from './application';
|
import ApplicationAdapter from './application';
|
||||||
|
|
||||||
const { RSVP, inject, assign } = Ember;
|
|
||||||
|
|
||||||
export default ApplicationAdapter.extend({
|
export default ApplicationAdapter.extend({
|
||||||
system: inject.service(),
|
system: service(),
|
||||||
|
|
||||||
shouldReloadAll: () => true,
|
shouldReloadAll: () => true,
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
import { default as ApplicationAdapter, namespace } from './application';
|
import { default as ApplicationAdapter, namespace } from './application';
|
||||||
|
|
||||||
const { inject } = Ember;
|
|
||||||
|
|
||||||
export default ApplicationAdapter.extend({
|
export default ApplicationAdapter.extend({
|
||||||
store: inject.service(),
|
store: service(),
|
||||||
|
|
||||||
namespace: namespace + '/acl',
|
namespace: namespace + '/acl',
|
||||||
|
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
import Ember from 'ember';
|
import Application from '@ember/application';
|
||||||
import Resolver from './resolver';
|
import Resolver from './resolver';
|
||||||
import loadInitializers from 'ember-load-initializers';
|
import loadInitializers from 'ember-load-initializers';
|
||||||
import config from './config/environment';
|
import config from './config/environment';
|
||||||
|
|
||||||
let App;
|
let App;
|
||||||
|
|
||||||
Ember.MODEL_FACTORY_INJECTIONS = true;
|
App = Application.extend({
|
||||||
|
|
||||||
App = Ember.Application.extend({
|
|
||||||
modulePrefix: config.modulePrefix,
|
modulePrefix: config.modulePrefix,
|
||||||
podModulePrefix: config.podModulePrefix,
|
podModulePrefix: config.podModulePrefix,
|
||||||
Resolver
|
Resolver,
|
||||||
});
|
});
|
||||||
|
|
||||||
loadInitializers(App, config.modulePrefix);
|
loadInitializers(App, config.modulePrefix);
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { run } from '@ember/runloop';
|
||||||
import { lazyClick } from '../helpers/lazy-click';
|
import { lazyClick } from '../helpers/lazy-click';
|
||||||
|
|
||||||
const { Component, inject, run } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
store: inject.service(),
|
store: service(),
|
||||||
|
|
||||||
tagName: 'tr',
|
tagName: 'tr',
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
import Ember from 'ember';
|
import { computed } from '@ember/object';
|
||||||
import DistributionBar from './distribution-bar';
|
import DistributionBar from './distribution-bar';
|
||||||
|
|
||||||
const { computed } = Ember;
|
|
||||||
|
|
||||||
export default DistributionBar.extend({
|
export default DistributionBar.extend({
|
||||||
layoutName: 'components/distribution-bar',
|
layoutName: 'components/distribution-bar',
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: '',
|
tagName: '',
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
import { lazyClick } from '../helpers/lazy-click';
|
import { lazyClick } from '../helpers/lazy-click';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'tr',
|
tagName: 'tr',
|
||||||
classNames: ['client-node-row', 'is-interactive'],
|
classNames: ['client-node-row', 'is-interactive'],
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
import { run } from '@ember/runloop';
|
||||||
|
import { assign } from '@ember/polyfills';
|
||||||
|
import { guidFor } from '@ember/object/internals';
|
||||||
import d3 from 'npm:d3-selection';
|
import d3 from 'npm:d3-selection';
|
||||||
import 'npm:d3-transition';
|
import 'npm:d3-transition';
|
||||||
import WindowResizable from '../mixins/window-resizable';
|
import WindowResizable from '../mixins/window-resizable';
|
||||||
import styleStringProperty from '../utils/properties/style-string';
|
import styleStringProperty from '../utils/properties/style-string';
|
||||||
|
|
||||||
const { Component, computed, run, assign, guidFor } = Ember;
|
|
||||||
const sumAggregate = (total, val) => total + val;
|
const sumAggregate = (total, val) => total + val;
|
||||||
|
|
||||||
export default Component.extend(WindowResizable, {
|
export default Component.extend(WindowResizable, {
|
||||||
|
@ -96,7 +99,7 @@ export default Component.extend(WindowResizable, {
|
||||||
});
|
});
|
||||||
|
|
||||||
slices = slices.merge(slicesEnter);
|
slices = slices.merge(slicesEnter);
|
||||||
slices.attr('class', d => d.className || `slice-${filteredData.indexOf(d)}`);
|
slices.attr('class', d => d.className || `slice-${_data.indexOf(d)}`);
|
||||||
|
|
||||||
const setWidth = d => `${width * d.percent - (d.index === sliceCount - 1 || d.index === 0 ? 1 : 2)}px`
|
const setWidth = d => `${width * d.percent - (d.index === sliceCount - 1 || d.index === 0 ? 1 : 2)}px`
|
||||||
const setOffset = d => `${width * d.offset + (d.index === 0 ? 0 : 1)}px`
|
const setOffset = d => `${width * d.offset + (d.index === 0 ? 0 : 1)}px`
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
|
||||||
|
export default Component.extend({
|
||||||
|
variants: computed(() => [
|
||||||
|
{
|
||||||
|
key: 'Normal',
|
||||||
|
title: 'Normal',
|
||||||
|
slug: '',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: 'Info',
|
||||||
|
title: 'Info',
|
||||||
|
slug: 'is-info',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: 'Warning',
|
||||||
|
title: 'Warning',
|
||||||
|
slug: 'is-warning',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: 'Danger',
|
||||||
|
title: 'Danger',
|
||||||
|
slug: 'is-danger',
|
||||||
|
},
|
||||||
|
]),
|
||||||
|
});
|
|
@ -0,0 +1,97 @@
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
|
||||||
|
export default Component.extend({
|
||||||
|
nomadTheme: computed(() => [
|
||||||
|
{
|
||||||
|
name: 'Primary',
|
||||||
|
base: '#25ba81',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Primary Dark',
|
||||||
|
base: '#1d9467',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Text',
|
||||||
|
base: '#0a0a0a',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Link',
|
||||||
|
base: '#1563ff',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Gray',
|
||||||
|
base: '#bbc4d1',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Off-white',
|
||||||
|
base: '#f5f5f5',
|
||||||
|
},
|
||||||
|
]),
|
||||||
|
|
||||||
|
productColors: computed(() => [
|
||||||
|
{
|
||||||
|
name: 'Consul Pink',
|
||||||
|
base: '#ff0087',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Consul Pink Dark',
|
||||||
|
base: '#c62a71',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Packer Blue',
|
||||||
|
base: '#1daeff',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Packer Blue Dark',
|
||||||
|
base: '#1d94dd',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Terraform Purple',
|
||||||
|
base: '#5c4ee5',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Terraform Purple Dark',
|
||||||
|
base: '#4040b2',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Vagrant Blue',
|
||||||
|
base: '#1563ff',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Vagrant Blue Dark',
|
||||||
|
base: '#104eb2',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Nomad Green',
|
||||||
|
base: '#25ba81',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Nomad Green Dark',
|
||||||
|
base: '#1d9467',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Nomad Green Darker',
|
||||||
|
base: '#16704d',
|
||||||
|
},
|
||||||
|
]),
|
||||||
|
|
||||||
|
emotiveColors: computed(() => [
|
||||||
|
{
|
||||||
|
name: 'Success',
|
||||||
|
base: '#23d160',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Warning',
|
||||||
|
base: '#fa8e23',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Danger',
|
||||||
|
base: '#c84034',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Info',
|
||||||
|
base: '#1563ff',
|
||||||
|
},
|
||||||
|
]),
|
||||||
|
});
|
|
@ -0,0 +1,13 @@
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
|
||||||
|
export default Component.extend({
|
||||||
|
distributionBarData: computed(() => {
|
||||||
|
return [
|
||||||
|
{ label: 'one', value: 10 },
|
||||||
|
{ label: 'two', value: 20 },
|
||||||
|
{ label: 'three', value: 0 },
|
||||||
|
{ label: 'four', value: 35 },
|
||||||
|
];
|
||||||
|
}),
|
||||||
|
});
|
|
@ -0,0 +1,43 @@
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
|
||||||
|
export default Component.extend({
|
||||||
|
timerTicks: 0,
|
||||||
|
|
||||||
|
startTimer: function() {
|
||||||
|
this.set(
|
||||||
|
'timer',
|
||||||
|
setInterval(() => {
|
||||||
|
this.incrementProperty('timerTicks');
|
||||||
|
}, 500)
|
||||||
|
);
|
||||||
|
}.on('init'),
|
||||||
|
|
||||||
|
willDestroy() {
|
||||||
|
clearInterval(this.get('timer'));
|
||||||
|
},
|
||||||
|
|
||||||
|
distributionBarData: computed(() => {
|
||||||
|
return [
|
||||||
|
{ label: 'one', value: 10 },
|
||||||
|
{ label: 'two', value: 20 },
|
||||||
|
{ label: 'three', value: 30 },
|
||||||
|
];
|
||||||
|
}),
|
||||||
|
|
||||||
|
distributionBarDataWithClasses: computed(() => {
|
||||||
|
return [
|
||||||
|
{ label: 'Queued', value: 10, className: 'queued' },
|
||||||
|
{ label: 'Complete', value: 20, className: 'complete' },
|
||||||
|
{ label: 'Failed', value: 30, className: 'failed' },
|
||||||
|
];
|
||||||
|
}),
|
||||||
|
|
||||||
|
distributionBarDataRotating: computed('timerTicks', () => {
|
||||||
|
return [
|
||||||
|
{ label: 'one', value: Math.round(Math.random() * 50) },
|
||||||
|
{ label: 'two', value: Math.round(Math.random() * 50) },
|
||||||
|
{ label: 'three', value: Math.round(Math.random() * 50) },
|
||||||
|
];
|
||||||
|
}),
|
||||||
|
});
|
|
@ -1,9 +1,9 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import Component from '@ember/component';
|
||||||
const { Component, inject, computed } = Ember;
|
import { computed } from '@ember/object';
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
system: inject.service(),
|
system: service(),
|
||||||
|
|
||||||
sortedNamespaces: computed('system.namespaces.@each.name', function() {
|
sortedNamespaces: computed('system.namespaces.@each.name', function() {
|
||||||
const namespaces = this.get('system.namespaces').toArray() || [];
|
const namespaces = this.get('system.namespaces').toArray() || [];
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
classNames: ['job-deployment', 'boxed-section'],
|
classNames: ['job-deployment', 'boxed-section'],
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: '',
|
tagName: '',
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
import moment from 'moment';
|
import moment from 'moment';
|
||||||
|
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'ol',
|
tagName: 'ol',
|
||||||
classNames: ['timeline'],
|
classNames: ['timeline'],
|
||||||
|
|
||||||
deployments: computed(() => []),
|
deployments: computed(() => []),
|
||||||
|
|
||||||
sortedDeployments: computed('deployments.@each.version.submitTime', function() {
|
sortedDeployments: computed('deployments.@each.versionSubmitTime', function() {
|
||||||
return this.get('deployments')
|
return this.get('deployments')
|
||||||
.sortBy('version.submitTime')
|
.sortBy('versionSubmitTime')
|
||||||
.reverse();
|
.reverse();
|
||||||
}),
|
}),
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: '',
|
tagName: '',
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import Ember from 'ember';
|
import { equal } from '@ember/object/computed';
|
||||||
|
import Component from '@ember/component';
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
classNames: ['job-diff'],
|
classNames: ['job-diff'],
|
||||||
|
@ -10,7 +9,7 @@ export default Component.extend({
|
||||||
|
|
||||||
verbose: true,
|
verbose: true,
|
||||||
|
|
||||||
isEdited: computed.equal('diff.Type', 'Edited'),
|
isEdited: equal('diff.Type', 'Edited'),
|
||||||
isAdded: computed.equal('diff.Type', 'Added'),
|
isAdded: equal('diff.Type', 'Added'),
|
||||||
isDeleted: computed.equal('diff.Type', 'Deleted'),
|
isDeleted: equal('diff.Type', 'Deleted'),
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
import { lazyClick } from '../helpers/lazy-click';
|
import { lazyClick } from '../helpers/lazy-click';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'tr',
|
tagName: 'tr',
|
||||||
classNames: ['job-row', 'is-interactive'],
|
classNames: ['job-row', 'is-interactive'],
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
const changeTypes = ['Added', 'Deleted', 'Edited'];
|
const changeTypes = ['Added', 'Deleted', 'Edited'];
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
import moment from 'moment';
|
import moment from 'moment';
|
||||||
|
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'ol',
|
tagName: 'ol',
|
||||||
classNames: ['timeline'],
|
classNames: ['timeline'],
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
import { run } from '@ember/runloop';
|
||||||
import JSONFormatterPkg from 'npm:json-formatter-js';
|
import JSONFormatterPkg from 'npm:json-formatter-js';
|
||||||
|
|
||||||
const { Component, computed, run } = Ember;
|
|
||||||
|
|
||||||
// json-formatter-js is packaged in a funny way that ember-cli-browserify
|
// json-formatter-js is packaged in a funny way that ember-cli-browserify
|
||||||
// doesn't unwrap properly.
|
// doesn't unwrap properly.
|
||||||
const { default: JSONFormatter } = JSONFormatterPkg;
|
const { default: JSONFormatter } = JSONFormatterPkg;
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
source: computed(() => []),
|
source: computed(() => []),
|
||||||
|
@ -31,9 +30,11 @@ export default Component.extend({
|
||||||
const lowerBound = Math.max(1, page - spread);
|
const lowerBound = Math.max(1, page - spread);
|
||||||
const upperBound = Math.min(lastPage, page + spread) + 1;
|
const upperBound = Math.min(lastPage, page + spread) + 1;
|
||||||
|
|
||||||
return Array(upperBound - lowerBound).fill(null).map((_, index) => ({
|
return Array(upperBound - lowerBound)
|
||||||
pageNumber: lowerBound + index,
|
.fill(null)
|
||||||
}));
|
.map((_, index) => ({
|
||||||
|
pageNumber: lowerBound + index,
|
||||||
|
}));
|
||||||
}),
|
}),
|
||||||
|
|
||||||
list: computed('source.[]', 'page', 'size', function() {
|
list: computed('source.[]', 'page', 'size', function() {
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: '',
|
tagName: '',
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'table',
|
tagName: 'table',
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
const { Component, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'th',
|
tagName: 'th',
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'tbody',
|
tagName: 'tbody',
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'thead',
|
tagName: 'thead',
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
import Ember from 'ember';
|
import { reads } from '@ember/object/computed';
|
||||||
|
import Component from '@ember/component';
|
||||||
const { Component, computed, run } = Ember;
|
import { run } from '@ember/runloop';
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
// Passed to the component (mutable)
|
// Passed to the component (mutable)
|
||||||
searchTerm: null,
|
searchTerm: null,
|
||||||
|
|
||||||
// Used as a debounce buffer
|
// Used as a debounce buffer
|
||||||
_searchTerm: computed.reads('searchTerm'),
|
_searchTerm: reads('searchTerm'),
|
||||||
|
|
||||||
// Used to throttle sets to searchTerm
|
// Used to throttle sets to searchTerm
|
||||||
debounce: 150,
|
debounce: 150,
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import { alias } from '@ember/object/computed';
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
import { lazyClick } from '../helpers/lazy-click';
|
import { lazyClick } from '../helpers/lazy-click';
|
||||||
|
|
||||||
const { Component, inject, computed } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
// TODO Switch back to the router service style when it is no longer feature-flagged
|
// TODO Switch back to the router service once the service behaves more like Route
|
||||||
|
// https://github.com/emberjs/ember.js/issues/15801
|
||||||
// router: inject.service('router'),
|
// router: inject.service('router'),
|
||||||
_router: inject.service('-routing'),
|
_router: service('-routing'),
|
||||||
router: computed.alias('_router.router'),
|
router: alias('_router.router'),
|
||||||
|
|
||||||
tagName: 'tr',
|
tagName: 'tr',
|
||||||
classNames: ['server-agent-row', 'is-interactive'],
|
classNames: ['server-agent-row', 'is-interactive'],
|
||||||
|
@ -15,7 +17,8 @@ export default Component.extend({
|
||||||
|
|
||||||
agent: null,
|
agent: null,
|
||||||
isActive: computed('agent', 'router.currentURL', function() {
|
isActive: computed('agent', 'router.currentURL', function() {
|
||||||
// TODO Switch back to the router service style when it is no longer feature-flagged
|
// TODO Switch back to the router service once the service behaves more like Route
|
||||||
|
// https://github.com/emberjs/ember.js/issues/15801
|
||||||
// const targetURL = this.get('router').urlFor('servers.server', this.get('agent'));
|
// const targetURL = this.get('router').urlFor('servers.server', this.get('agent'));
|
||||||
// const currentURL = `${this.get('router.rootURL').slice(0, -1)}${this.get('router.currentURL')}`;
|
// const currentURL = `${this.get('router.rootURL').slice(0, -1)}${this.get('router.currentURL')}`;
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
import Ember from 'ember';
|
import Component from '@ember/component';
|
||||||
import { lazyClick } from '../helpers/lazy-click';
|
import { lazyClick } from '../helpers/lazy-click';
|
||||||
|
|
||||||
const { Component } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend({
|
export default Component.extend({
|
||||||
tagName: 'tr',
|
tagName: 'tr',
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
import Ember from 'ember';
|
import { inject as service } from '@ember/service';
|
||||||
|
import Component from '@ember/component';
|
||||||
|
import { computed } from '@ember/object';
|
||||||
|
import { run } from '@ember/runloop';
|
||||||
import { task } from 'ember-concurrency';
|
import { task } from 'ember-concurrency';
|
||||||
import { logger } from 'nomad-ui/utils/classes/log';
|
import { logger } from 'nomad-ui/utils/classes/log';
|
||||||
import WindowResizable from 'nomad-ui/mixins/window-resizable';
|
import WindowResizable from 'nomad-ui/mixins/window-resizable';
|
||||||
|
|
||||||
const { Component, computed, inject, run } = Ember;
|
|
||||||
|
|
||||||
export default Component.extend(WindowResizable, {
|
export default Component.extend(WindowResizable, {
|
||||||
token: inject.service(),
|
token: service(),
|
||||||
|
|
||||||
classNames: ['boxed-section', 'task-log'],
|
classNames: ['boxed-section', 'task-log'],
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import Ember from 'ember';
|
import Controller from '@ember/controller';
|
||||||
|
|
||||||
const { Controller } = Ember;
|
|
||||||
|
|
||||||
export default Controller.extend({});
|
export default Controller.extend({});
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
import Ember from 'ember';
|
import { alias } from '@ember/object/computed';
|
||||||
|
import Controller from '@ember/controller';
|
||||||
import Sortable from 'nomad-ui/mixins/sortable';
|
import Sortable from 'nomad-ui/mixins/sortable';
|
||||||
|
|
||||||
const { Controller, computed } = Ember;
|
|
||||||
|
|
||||||
export default Controller.extend(Sortable, {
|
export default Controller.extend(Sortable, {
|
||||||
queryParams: {
|
queryParams: {
|
||||||
sortProperty: 'sort',
|
sortProperty: 'sort',
|
||||||
|
@ -12,6 +11,6 @@ export default Controller.extend(Sortable, {
|
||||||
sortProperty: 'name',
|
sortProperty: 'name',
|
||||||
sortDescending: false,
|
sortDescending: false,
|
||||||
|
|
||||||
listToSort: computed.alias('model.states'),
|
listToSort: alias('model.states'),
|
||||||
sortedStates: computed.alias('listSorted'),
|
sortedStates: alias('listSorted'),
|
||||||
});
|
});
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue