cd837b0b18
command/agent/* -> agent/* command/consul/* -> agent/consul/* command/agent/command{,_test}.go -> command/agent{,_test}.go command/base/command.go -> command/base.go command/base/* -> command/* commands.go -> command/commands.go The script which did the refactor is: ( cd $GOPATH/src/github.com/hashicorp/consul git mv command/agent/command.go command/agent.go git mv command/agent/command_test.go command/agent_test.go git mv command/agent/flag_slice_value{,_test}.go command/ git mv command/agent . git mv command/base/command.go command/base.go git mv command/base/config_util{,_test}.go command/ git mv commands.go command/ git mv consul agent rmdir command/base/ gsed -i -e 's|package agent|package command|' command/agent{,_test}.go gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go gsed -i -e 's|package main|package command|' command/commands.go gsed -i -e 's|base.Command|BaseCommand|' command/commands.go gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go gsed -i -e 's|base\.||' command/commands.go gsed -i -e 's|command\.||' command/commands.go gsed -i -e 's|command|c|' main.go gsed -i -e 's|range Commands|range command.Commands|' main.go gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go gsed -i -e 's|base.Command|BaseCommand|' command/*.go gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go gsed -i -e 's|base\.||' command/*_test.go gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go # fix imports f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f goimports -w $f f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f goimports -w $f goimports -w command/*.go main.go )
338 lines
9.7 KiB
Go
338 lines
9.7 KiB
Go
package agent
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
|
"github.com/hashicorp/consul/api"
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
"github.com/hashicorp/raft"
|
|
)
|
|
|
|
// OperatorRaftConfiguration is used to inspect the current Raft configuration.
|
|
// This supports the stale query mode in case the cluster doesn't have a leader.
|
|
func (s *HTTPServer) OperatorRaftConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
if req.Method != "GET" {
|
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
|
return nil, nil
|
|
}
|
|
|
|
var args structs.DCSpecificRequest
|
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
|
return nil, nil
|
|
}
|
|
|
|
var reply structs.RaftConfigurationResponse
|
|
if err := s.agent.RPC("Operator.RaftGetConfiguration", &args, &reply); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return reply, nil
|
|
}
|
|
|
|
// OperatorRaftPeer supports actions on Raft peers. Currently we only support
|
|
// removing peers by address.
|
|
func (s *HTTPServer) OperatorRaftPeer(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
if req.Method != "DELETE" {
|
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
|
return nil, nil
|
|
}
|
|
|
|
var args structs.RaftRemovePeerRequest
|
|
s.parseDC(req, &args.Datacenter)
|
|
s.parseToken(req, &args.Token)
|
|
|
|
params := req.URL.Query()
|
|
_, hasID := params["id"]
|
|
if hasID {
|
|
args.ID = raft.ServerID(params.Get("id"))
|
|
}
|
|
_, hasAddress := params["address"]
|
|
if hasAddress {
|
|
args.Address = raft.ServerAddress(params.Get("address"))
|
|
}
|
|
|
|
if !hasID && !hasAddress {
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
fmt.Fprint(resp, "Must specify either ?id with the server's ID or ?address with IP:port of peer to remove")
|
|
return nil, nil
|
|
}
|
|
if hasID && hasAddress {
|
|
resp.WriteHeader(http.StatusBadRequest)
|
|
fmt.Fprint(resp, "Must specify only one of ?id or ?address")
|
|
return nil, nil
|
|
}
|
|
|
|
var reply struct{}
|
|
method := "Operator.RaftRemovePeerByID"
|
|
if hasAddress {
|
|
method = "Operator.RaftRemovePeerByAddress"
|
|
}
|
|
if err := s.agent.RPC(method, &args, &reply); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
type keyringArgs struct {
|
|
Key string
|
|
Token string
|
|
RelayFactor uint8
|
|
}
|
|
|
|
// OperatorKeyringEndpoint handles keyring operations (install, list, use, remove)
|
|
func (s *HTTPServer) OperatorKeyringEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
var args keyringArgs
|
|
if req.Method == "POST" || req.Method == "PUT" || req.Method == "DELETE" {
|
|
if err := decodeBody(req, &args, nil); err != nil {
|
|
resp.WriteHeader(400)
|
|
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
|
return nil, nil
|
|
}
|
|
}
|
|
s.parseToken(req, &args.Token)
|
|
|
|
// Parse relay factor
|
|
if relayFactor := req.URL.Query().Get("relay-factor"); relayFactor != "" {
|
|
n, err := strconv.Atoi(relayFactor)
|
|
if err != nil {
|
|
resp.WriteHeader(400)
|
|
fmt.Fprintf(resp, "Error parsing relay factor: %v", err)
|
|
return nil, nil
|
|
}
|
|
|
|
args.RelayFactor, err = ParseRelayFactor(n)
|
|
if err != nil {
|
|
resp.WriteHeader(400)
|
|
fmt.Fprintf(resp, "Invalid relay factor: %v", err)
|
|
return nil, nil
|
|
}
|
|
}
|
|
|
|
// Switch on the method
|
|
switch req.Method {
|
|
case "GET":
|
|
return s.KeyringList(resp, req, &args)
|
|
case "POST":
|
|
return s.KeyringInstall(resp, req, &args)
|
|
case "PUT":
|
|
return s.KeyringUse(resp, req, &args)
|
|
case "DELETE":
|
|
return s.KeyringRemove(resp, req, &args)
|
|
default:
|
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
|
return nil, nil
|
|
}
|
|
}
|
|
|
|
// KeyringInstall is used to install a new gossip encryption key into the cluster
|
|
func (s *HTTPServer) KeyringInstall(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
|
responses, err := s.agent.InstallKey(args.Key, args.Token, args.RelayFactor)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return nil, keyringErrorsOrNil(responses.Responses)
|
|
}
|
|
|
|
// KeyringList is used to list the keys installed in the cluster
|
|
func (s *HTTPServer) KeyringList(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
|
responses, err := s.agent.ListKeys(args.Token, args.RelayFactor)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return responses.Responses, keyringErrorsOrNil(responses.Responses)
|
|
}
|
|
|
|
// KeyringRemove is used to list the keys installed in the cluster
|
|
func (s *HTTPServer) KeyringRemove(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
|
responses, err := s.agent.RemoveKey(args.Key, args.Token, args.RelayFactor)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return nil, keyringErrorsOrNil(responses.Responses)
|
|
}
|
|
|
|
// KeyringUse is used to change the primary gossip encryption key
|
|
func (s *HTTPServer) KeyringUse(resp http.ResponseWriter, req *http.Request, args *keyringArgs) (interface{}, error) {
|
|
responses, err := s.agent.UseKey(args.Key, args.Token, args.RelayFactor)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return nil, keyringErrorsOrNil(responses.Responses)
|
|
}
|
|
|
|
func keyringErrorsOrNil(responses []*structs.KeyringResponse) error {
|
|
var errs error
|
|
for _, response := range responses {
|
|
if response.Error != "" {
|
|
pool := response.Datacenter + " (LAN)"
|
|
if response.WAN {
|
|
pool = "WAN"
|
|
}
|
|
errs = multierror.Append(errs, fmt.Errorf("%s error: %s", pool, response.Error))
|
|
for key, message := range response.Messages {
|
|
errs = multierror.Append(errs, fmt.Errorf("%s: %s", key, message))
|
|
}
|
|
}
|
|
}
|
|
return errs
|
|
}
|
|
|
|
// OperatorAutopilotConfiguration is used to inspect the current Autopilot configuration.
|
|
// This supports the stale query mode in case the cluster doesn't have a leader.
|
|
func (s *HTTPServer) OperatorAutopilotConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
// Switch on the method
|
|
switch req.Method {
|
|
case "GET":
|
|
var args structs.DCSpecificRequest
|
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
|
return nil, nil
|
|
}
|
|
|
|
var reply structs.AutopilotConfig
|
|
if err := s.agent.RPC("Operator.AutopilotGetConfiguration", &args, &reply); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
out := api.AutopilotConfiguration{
|
|
CleanupDeadServers: reply.CleanupDeadServers,
|
|
LastContactThreshold: api.NewReadableDuration(reply.LastContactThreshold),
|
|
MaxTrailingLogs: reply.MaxTrailingLogs,
|
|
ServerStabilizationTime: api.NewReadableDuration(reply.ServerStabilizationTime),
|
|
RedundancyZoneTag: reply.RedundancyZoneTag,
|
|
DisableUpgradeMigration: reply.DisableUpgradeMigration,
|
|
CreateIndex: reply.CreateIndex,
|
|
ModifyIndex: reply.ModifyIndex,
|
|
}
|
|
|
|
return out, nil
|
|
|
|
case "PUT":
|
|
var args structs.AutopilotSetConfigRequest
|
|
s.parseDC(req, &args.Datacenter)
|
|
s.parseToken(req, &args.Token)
|
|
|
|
var conf api.AutopilotConfiguration
|
|
if err := decodeBody(req, &conf, FixupConfigDurations); err != nil {
|
|
resp.WriteHeader(400)
|
|
fmt.Fprintf(resp, "Error parsing autopilot config: %v", err)
|
|
return nil, nil
|
|
}
|
|
|
|
args.Config = structs.AutopilotConfig{
|
|
CleanupDeadServers: conf.CleanupDeadServers,
|
|
LastContactThreshold: conf.LastContactThreshold.Duration(),
|
|
MaxTrailingLogs: conf.MaxTrailingLogs,
|
|
ServerStabilizationTime: conf.ServerStabilizationTime.Duration(),
|
|
RedundancyZoneTag: conf.RedundancyZoneTag,
|
|
DisableUpgradeMigration: conf.DisableUpgradeMigration,
|
|
}
|
|
|
|
// Check for cas value
|
|
params := req.URL.Query()
|
|
if _, ok := params["cas"]; ok {
|
|
casVal, err := strconv.ParseUint(params.Get("cas"), 10, 64)
|
|
if err != nil {
|
|
resp.WriteHeader(400)
|
|
fmt.Fprintf(resp, "Error parsing cas value: %v", err)
|
|
return nil, nil
|
|
}
|
|
args.Config.ModifyIndex = casVal
|
|
args.CAS = true
|
|
}
|
|
|
|
var reply bool
|
|
if err := s.agent.RPC("Operator.AutopilotSetConfiguration", &args, &reply); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Only use the out value if this was a CAS
|
|
if !args.CAS {
|
|
return true, nil
|
|
}
|
|
return reply, nil
|
|
|
|
default:
|
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
|
return nil, nil
|
|
}
|
|
}
|
|
|
|
// FixupConfigDurations is used to handle parsing the duration fields in
|
|
// the Autopilot config struct
|
|
func FixupConfigDurations(raw interface{}) error {
|
|
rawMap, ok := raw.(map[string]interface{})
|
|
if !ok {
|
|
return nil
|
|
}
|
|
for key, val := range rawMap {
|
|
if strings.ToLower(key) == "lastcontactthreshold" ||
|
|
strings.ToLower(key) == "serverstabilizationtime" {
|
|
// Convert a string value into an integer
|
|
if vStr, ok := val.(string); ok {
|
|
dur, err := time.ParseDuration(vStr)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
rawMap[key] = dur
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// OperatorServerHealth is used to get the health of the servers in the local DC
|
|
func (s *HTTPServer) OperatorServerHealth(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
if req.Method != "GET" {
|
|
resp.WriteHeader(http.StatusMethodNotAllowed)
|
|
return nil, nil
|
|
}
|
|
|
|
var args structs.DCSpecificRequest
|
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
|
return nil, nil
|
|
}
|
|
|
|
var reply structs.OperatorHealthReply
|
|
if err := s.agent.RPC("Operator.ServerHealth", &args, &reply); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Reply with status 429 if something is unhealthy
|
|
if !reply.Healthy {
|
|
resp.WriteHeader(http.StatusTooManyRequests)
|
|
}
|
|
|
|
out := &api.OperatorHealthReply{
|
|
Healthy: reply.Healthy,
|
|
FailureTolerance: reply.FailureTolerance,
|
|
}
|
|
for _, server := range reply.Servers {
|
|
out.Servers = append(out.Servers, api.ServerHealth{
|
|
ID: server.ID,
|
|
Name: server.Name,
|
|
Address: server.Address,
|
|
Version: server.Version,
|
|
Leader: server.Leader,
|
|
SerfStatus: server.SerfStatus.String(),
|
|
LastContact: api.NewReadableDuration(server.LastContact),
|
|
LastTerm: server.LastTerm,
|
|
LastIndex: server.LastIndex,
|
|
Healthy: server.Healthy,
|
|
Voter: server.Voter,
|
|
StableSince: server.StableSince.Round(time.Second).UTC(),
|
|
})
|
|
}
|
|
|
|
return out, nil
|
|
}
|