update rest of consul packages
This commit is contained in:
parent
f492ab6d9e
commit
98ad59b1de
|
@ -6,7 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
ctestutil "github.com/hashicorp/consul/testutil"
|
||||
ctestutil "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"testing"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
consultest "github.com/hashicorp/consul/testutil"
|
||||
consultest "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
ctestutil "github.com/hashicorp/consul/testutil"
|
||||
ctestutil "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/api"
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
)
|
||||
|
||||
// reset will reverse the setup from initialize() and then redo it (for tests)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
|
@ -3,7 +3,7 @@ package apitests
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-version"
|
||||
|
|
|
@ -18,15 +18,14 @@ const (
|
|||
ACLManagementType = "management"
|
||||
)
|
||||
|
||||
type ACLTokenPolicyLink struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
type ACLTokenRoleLink struct {
|
||||
type ACLLink struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
type ACLTokenPolicyLink = ACLLink
|
||||
type ACLTokenRoleLink = ACLLink
|
||||
|
||||
// ACLToken represents an ACL Token
|
||||
type ACLToken struct {
|
||||
CreateIndex uint64
|
||||
|
@ -46,6 +45,10 @@ type ACLToken struct {
|
|||
// DEPRECATED (ACL-Legacy-Compat)
|
||||
// Rules will only be present for legacy tokens returned via the new APIs
|
||||
Rules string `json:",omitempty"`
|
||||
|
||||
// Namespace is the namespace the ACLToken is associated with.
|
||||
// Namespaces is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ACLTokenListEntry struct {
|
||||
|
@ -61,6 +64,10 @@ type ACLTokenListEntry struct {
|
|||
CreateTime time.Time
|
||||
Hash []byte
|
||||
Legacy bool
|
||||
|
||||
// Namespace is the namespace the ACLTokenListEntry is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ACLEntry is used to represent a legacy ACL token
|
||||
|
@ -105,6 +112,10 @@ type ACLPolicy struct {
|
|||
Hash []byte
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLPolicy is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ACLPolicyListEntry struct {
|
||||
|
@ -115,12 +126,13 @@ type ACLPolicyListEntry struct {
|
|||
Hash []byte
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLPolicyListEntry is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ACLRolePolicyLink struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
type ACLRolePolicyLink = ACLLink
|
||||
|
||||
// ACLRole represents an ACL Role.
|
||||
type ACLRole struct {
|
||||
|
@ -132,6 +144,10 @@ type ACLRole struct {
|
|||
Hash []byte
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLRole is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BindingRuleBindType is the type of binding rule mechanism used.
|
||||
|
@ -155,6 +171,10 @@ type ACLBindingRule struct {
|
|||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLBindingRule is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ACLAuthMethod struct {
|
||||
|
@ -169,6 +189,10 @@ type ACLAuthMethod struct {
|
|||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLAuthMethod is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ACLAuthMethodListEntry struct {
|
||||
|
@ -177,6 +201,10 @@ type ACLAuthMethodListEntry struct {
|
|||
Description string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
|
||||
// Namespace is the namespace the ACLAuthMethodListEntry is associated with.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
|
||||
|
|
|
@ -23,23 +23,11 @@ const (
|
|||
// service proxies another service within Consul and speaks the connect
|
||||
// protocol.
|
||||
ServiceKindConnectProxy ServiceKind = "connect-proxy"
|
||||
)
|
||||
|
||||
// ProxyExecMode is the execution mode for a managed Connect proxy.
|
||||
type ProxyExecMode string
|
||||
|
||||
const (
|
||||
// ProxyExecModeDaemon indicates that the proxy command should be long-running
|
||||
// and should be started and supervised by the agent until it's target service
|
||||
// is deregistered.
|
||||
ProxyExecModeDaemon ProxyExecMode = "daemon"
|
||||
|
||||
// ProxyExecModeScript indicates that the proxy command should be invoke to
|
||||
// completion on each change to the configuration of lifecycle event. The
|
||||
// script typically fetches the config and certificates from the agent API and
|
||||
// then configures an externally managed daemon, perhaps starting and stopping
|
||||
// it if necessary.
|
||||
ProxyExecModeScript ProxyExecMode = "script"
|
||||
// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
|
||||
// service will proxy connections based off the SNI header set by other
|
||||
// connect proxies
|
||||
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
|
||||
)
|
||||
|
||||
// UpstreamDestType is the type of upstream discovery mechanism.
|
||||
|
@ -64,7 +52,9 @@ type AgentCheck struct {
|
|||
Output string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
Type string
|
||||
Definition HealthCheckDefinition
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AgentWeights represent optional weights for a service
|
||||
|
@ -82,15 +72,18 @@ type AgentService struct {
|
|||
Meta map[string]string
|
||||
Port int
|
||||
Address string
|
||||
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
|
||||
Weights AgentWeights
|
||||
EnableTagOverride bool
|
||||
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ContentHash string `json:",omitempty" bexpr:"-"`
|
||||
// DEPRECATED (ProxyDestination) - remove this field
|
||||
ProxyDestination string `json:",omitempty" bexpr:"-"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ContentHash string `json:",omitempty" bexpr:"-"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
// NOTE: If we ever set the ContentHash outside of singular service lookup then we may need
|
||||
// to include the Namespace in the hash. When we do, then we are in for lots of fun with tests.
|
||||
// For now though, ignoring it works well enough.
|
||||
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
|
||||
}
|
||||
|
||||
// AgentServiceChecksInfo returns information about a Service and its checks
|
||||
|
@ -103,28 +96,20 @@ type AgentServiceChecksInfo struct {
|
|||
// AgentServiceConnect represents the Connect configuration of a service.
|
||||
type AgentServiceConnect struct {
|
||||
Native bool `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
|
||||
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
|
||||
// service.
|
||||
type AgentServiceConnectProxy struct {
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
|
||||
// ServiceDefinition or response.
|
||||
type AgentServiceConnectProxyConfig struct {
|
||||
DestinationServiceName string
|
||||
DestinationServiceName string `json:",omitempty"`
|
||||
DestinationServiceID string `json:",omitempty"`
|
||||
LocalServiceAddress string `json:",omitempty"`
|
||||
LocalServicePort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
Upstreams []Upstream `json:",omitempty"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
Expose ExposeConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AgentMember represents a cluster member known to the agent
|
||||
|
@ -157,21 +142,29 @@ type MembersOpts struct {
|
|||
|
||||
// AgentServiceRegistration is used to register a new service
|
||||
type AgentServiceRegistration struct {
|
||||
Kind ServiceKind `json:",omitempty"`
|
||||
ID string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Tags []string `json:",omitempty"`
|
||||
Port int `json:",omitempty"`
|
||||
Address string `json:",omitempty"`
|
||||
EnableTagOverride bool `json:",omitempty"`
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
Weights *AgentWeights `json:",omitempty"`
|
||||
Kind ServiceKind `json:",omitempty"`
|
||||
ID string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Tags []string `json:",omitempty"`
|
||||
Port int `json:",omitempty"`
|
||||
Address string `json:",omitempty"`
|
||||
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
|
||||
EnableTagOverride bool `json:",omitempty"`
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
Weights *AgentWeights `json:",omitempty"`
|
||||
Check *AgentServiceCheck
|
||||
Checks AgentServiceChecks
|
||||
// DEPRECATED (ProxyDestination) - remove this field
|
||||
ProxyDestination string `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
|
||||
}
|
||||
|
||||
//ServiceRegisterOpts is used to pass extra options to the service register.
|
||||
type ServiceRegisterOpts struct {
|
||||
//Missing healthchecks will be deleted from the agent.
|
||||
//Using this parameter allows to idempotently register a service and its checks without
|
||||
//having to manually deregister checks.
|
||||
ReplaceExistingChecks bool
|
||||
}
|
||||
|
||||
// AgentCheckRegistration is used to register a new check
|
||||
|
@ -181,6 +174,7 @@ type AgentCheckRegistration struct {
|
|||
Notes string `json:",omitempty"`
|
||||
ServiceID string `json:",omitempty"`
|
||||
AgentServiceCheck
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AgentServiceCheck is used to define a node or service level check
|
||||
|
@ -196,6 +190,7 @@ type AgentServiceCheck struct {
|
|||
HTTP string `json:",omitempty"`
|
||||
Header map[string][]string `json:",omitempty"`
|
||||
Method string `json:",omitempty"`
|
||||
Body string `json:",omitempty"`
|
||||
TCP string `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
Notes string `json:",omitempty"`
|
||||
|
@ -276,12 +271,8 @@ type ConnectProxyConfig struct {
|
|||
TargetServiceID string
|
||||
TargetServiceName string
|
||||
ContentHash string
|
||||
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
|
||||
// but they don't need ExecMode or Command
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
}
|
||||
|
||||
// Upstream is the response structure for a proxy upstream configuration.
|
||||
|
@ -293,6 +284,7 @@ type Upstream struct {
|
|||
LocalBindAddress string `json:",omitempty"`
|
||||
LocalBindPort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Agent can be used to query the Agent endpoints
|
||||
|
@ -571,8 +563,25 @@ func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
|
|||
// ServiceRegister is used to register a new service with
|
||||
// the local agent
|
||||
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
|
||||
opts := ServiceRegisterOpts{
|
||||
ReplaceExistingChecks: false,
|
||||
}
|
||||
|
||||
return a.serviceRegister(service, opts)
|
||||
}
|
||||
|
||||
// ServiceRegister is used to register a new service with
|
||||
// the local agent and can be passed additional options.
|
||||
func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
|
||||
return a.serviceRegister(service, opts)
|
||||
}
|
||||
|
||||
func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/service/register")
|
||||
r.obj = service
|
||||
if opts.ReplaceExistingChecks {
|
||||
r.params.Set("replace-existing-checks", "true")
|
||||
}
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -755,6 +764,19 @@ func (a *Agent) ForceLeave(node string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//ForceLeavePrune is used to have an a failed agent removed
|
||||
//from the list of members
|
||||
func (a *Agent) ForceLeavePrune(node string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
||||
r.params.Set("prune", "1")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectAuthorize is used to authorize an incoming connection
|
||||
// to a natively integrated Connect service.
|
||||
func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) {
|
||||
|
@ -815,31 +837,6 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *Qu
|
|||
return &out, qm, nil
|
||||
}
|
||||
|
||||
// ConnectProxyConfig gets the configuration for a local managed proxy instance.
|
||||
//
|
||||
// Note that this uses an unconventional blocking mechanism since it's
|
||||
// agent-local state. That means there is no persistent raft index so we block
|
||||
// based on object hash instead.
|
||||
func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out ConnectProxyConfig
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &out, qm, nil
|
||||
}
|
||||
|
||||
// EnableServiceMaintenance toggles service maintenance mode on
|
||||
// for the given service ID.
|
||||
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
|
||||
|
@ -899,20 +896,29 @@ func (a *Agent) DisableNodeMaintenance() error {
|
|||
// log stream. An empty string will be sent down the given channel when there's
|
||||
// nothing left to stream, after which the caller should close the stopCh.
|
||||
func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
|
||||
return a.monitor(loglevel, false, stopCh, q)
|
||||
}
|
||||
|
||||
// MonitorJSON is like Monitor except it returns logs in JSON format.
|
||||
func (a *Agent) MonitorJSON(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
|
||||
return a.monitor(loglevel, true, stopCh, q)
|
||||
}
|
||||
func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/monitor")
|
||||
r.setQueryOptions(q)
|
||||
if loglevel != "" {
|
||||
r.params.Add("loglevel", loglevel)
|
||||
}
|
||||
if logJSON {
|
||||
r.params.Set("logjson", "true")
|
||||
}
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logCh := make(chan string, 64)
|
||||
go func() {
|
||||
defer resp.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for {
|
||||
select {
|
||||
|
@ -936,7 +942,6 @@ func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions
|
|||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return logCh, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-rootcerts"
|
||||
)
|
||||
|
||||
|
@ -71,10 +71,18 @@ const (
|
|||
// client in this package but is defined here for consistency with all the
|
||||
// other ENV names we use.
|
||||
GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
|
||||
|
||||
// HTTPNamespaceEnvVar defines an environment variable name which sets
|
||||
// the HTTP Namespace to be used by default. This can still be overridden.
|
||||
HTTPNamespaceEnvName = "CONSUL_NAMESPACE"
|
||||
)
|
||||
|
||||
// QueryOptions are used to parameterize a query
|
||||
type QueryOptions struct {
|
||||
// Namespace overrides the `default` namespace
|
||||
// Note: Namespaces are available only in Consul Enterprise
|
||||
Namespace string
|
||||
|
||||
// Providing a datacenter overwrites the DC provided
|
||||
// by the Config
|
||||
Datacenter string
|
||||
|
@ -89,7 +97,7 @@ type QueryOptions struct {
|
|||
RequireConsistent bool
|
||||
|
||||
// UseCache requests that the agent cache results locally. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details on the
|
||||
// https://www.consul.io/api/features/caching.html for more details on the
|
||||
// semantics.
|
||||
UseCache bool
|
||||
|
||||
|
@ -99,14 +107,14 @@ type QueryOptions struct {
|
|||
// returned. Clients that wish to allow for stale results on error can set
|
||||
// StaleIfError to a longer duration to change this behavior. It is ignored
|
||||
// if the endpoint supports background refresh caching. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||
// https://www.consul.io/api/features/caching.html for more details.
|
||||
MaxAge time.Duration
|
||||
|
||||
// StaleIfError specifies how stale the client will accept a cached response
|
||||
// if the servers are unavailable to fetch a fresh one. Only makes sense when
|
||||
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
|
||||
// ignored if the endpoint supports background refresh caching. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||
// https://www.consul.io/api/features/caching.html for more details.
|
||||
StaleIfError time.Duration
|
||||
|
||||
// WaitIndex is used to enable a blocking query. Waits
|
||||
|
@ -143,6 +151,10 @@ type QueryOptions struct {
|
|||
// a value from 0 to 5 (inclusive).
|
||||
RelayFactor uint8
|
||||
|
||||
// LocalOnly is used in keyring list operation to force the keyring
|
||||
// query to only hit local servers (no WAN traffic).
|
||||
LocalOnly bool
|
||||
|
||||
// Connect filters prepared query execution to only include Connect-capable
|
||||
// services. This currently affects prepared query execution.
|
||||
Connect bool
|
||||
|
@ -174,6 +186,10 @@ func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
|
|||
|
||||
// WriteOptions are used to parameterize a write
|
||||
type WriteOptions struct {
|
||||
// Namespace overrides the `default` namespace
|
||||
// Note: Namespaces are available only in Consul Enterprise
|
||||
Namespace string
|
||||
|
||||
// Providing a datacenter overwrites the DC provided
|
||||
// by the Config
|
||||
Datacenter string
|
||||
|
@ -288,6 +304,10 @@ type Config struct {
|
|||
// If provided it is read once at startup and never again.
|
||||
TokenFile string
|
||||
|
||||
// Namespace is the name of the namespace to send along for the request
|
||||
// when no other Namespace ispresent in the QueryOptions
|
||||
Namespace string
|
||||
|
||||
TLSConfig TLSConfig
|
||||
}
|
||||
|
||||
|
@ -307,14 +327,26 @@ type TLSConfig struct {
|
|||
// Consul communication, defaults to the system bundle if not specified.
|
||||
CAPath string
|
||||
|
||||
// CAPem is the optional PEM-encoded CA certificate used for Consul
|
||||
// communication, defaults to the system bundle if not specified.
|
||||
CAPem []byte
|
||||
|
||||
// CertFile is the optional path to the certificate for Consul
|
||||
// communication. If this is set then you need to also set KeyFile.
|
||||
CertFile string
|
||||
|
||||
// CertPEM is the optional PEM-encoded certificate for Consul
|
||||
// communication. If this is set then you need to also set KeyPEM.
|
||||
CertPEM []byte
|
||||
|
||||
// KeyFile is the optional path to the private key for Consul communication.
|
||||
// If this is set then you need to also set CertFile.
|
||||
KeyFile string
|
||||
|
||||
// KeyPEM is the optional PEM-encoded private key for Consul communication.
|
||||
// If this is set then you need to also set CertPEM.
|
||||
KeyPEM []byte
|
||||
|
||||
// InsecureSkipVerify if set to true will disable TLS host verification.
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
@ -326,7 +358,14 @@ type TLSConfig struct {
|
|||
// is not recommended, then you may notice idle connections building up over
|
||||
// time. To avoid this, use the DefaultNonPooledConfig() instead.
|
||||
func DefaultConfig() *Config {
|
||||
return defaultConfig(cleanhttp.DefaultPooledTransport)
|
||||
return defaultConfig(nil, cleanhttp.DefaultPooledTransport)
|
||||
}
|
||||
|
||||
// DefaultConfigWithLogger returns a default configuration for the client. It
|
||||
// is exactly the same as DefaultConfig, but allows for a pre-configured logger
|
||||
// object to be passed through.
|
||||
func DefaultConfigWithLogger(logger hclog.Logger) *Config {
|
||||
return defaultConfig(logger, cleanhttp.DefaultPooledTransport)
|
||||
}
|
||||
|
||||
// DefaultNonPooledConfig returns a default configuration for the client which
|
||||
|
@ -335,12 +374,18 @@ func DefaultConfig() *Config {
|
|||
// accumulation of idle connections if you make many client objects during the
|
||||
// lifetime of your application.
|
||||
func DefaultNonPooledConfig() *Config {
|
||||
return defaultConfig(cleanhttp.DefaultTransport)
|
||||
return defaultConfig(nil, cleanhttp.DefaultTransport)
|
||||
}
|
||||
|
||||
// defaultConfig returns the default configuration for the client, using the
|
||||
// given function to make the transport.
|
||||
func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||
func defaultConfig(logger hclog.Logger, transportFn func() *http.Transport) *Config {
|
||||
if logger == nil {
|
||||
logger = hclog.New(&hclog.LoggerOptions{
|
||||
Name: "consul-api",
|
||||
})
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
Address: "127.0.0.1:8500",
|
||||
Scheme: "http",
|
||||
|
@ -378,7 +423,7 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
|||
if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
|
||||
enabled, err := strconv.ParseBool(ssl)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
|
||||
logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLEnvName), "error", err)
|
||||
}
|
||||
|
||||
if enabled {
|
||||
|
@ -404,13 +449,17 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
|||
if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
|
||||
doVerify, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
|
||||
logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLVerifyEnvName), "error", err)
|
||||
}
|
||||
if !doVerify {
|
||||
config.TLSConfig.InsecureSkipVerify = true
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(HTTPNamespaceEnvName); v != "" {
|
||||
config.Namespace = v
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
|
@ -434,18 +483,31 @@ func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
|
|||
tlsClientConfig.ServerName = server
|
||||
}
|
||||
|
||||
if len(tlsConfig.CertPEM) != 0 && len(tlsConfig.KeyPEM) != 0 {
|
||||
tlsCert, err := tls.X509KeyPair(tlsConfig.CertPEM, tlsConfig.KeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
|
||||
} else if len(tlsConfig.CertPEM) != 0 || len(tlsConfig.KeyPEM) != 0 {
|
||||
return nil, fmt.Errorf("both client cert and client key must be provided")
|
||||
}
|
||||
|
||||
if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
|
||||
tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
|
||||
} else if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
|
||||
return nil, fmt.Errorf("both client cert and client key must be provided")
|
||||
}
|
||||
|
||||
if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" {
|
||||
if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" || len(tlsConfig.CAPem) != 0 {
|
||||
rootConfig := &rootcerts.Config{
|
||||
CAFile: tlsConfig.CAFile,
|
||||
CAPath: tlsConfig.CAPath,
|
||||
CAFile: tlsConfig.CAFile,
|
||||
CAPath: tlsConfig.CAPath,
|
||||
CACertificate: tlsConfig.CAPem,
|
||||
}
|
||||
if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
|
||||
return nil, err
|
||||
|
@ -620,6 +682,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Namespace != "" {
|
||||
r.params.Set("ns", q.Namespace)
|
||||
}
|
||||
if q.Datacenter != "" {
|
||||
r.params.Set("dc", q.Datacenter)
|
||||
}
|
||||
|
@ -655,6 +720,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
if q.RelayFactor != 0 {
|
||||
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
|
||||
}
|
||||
if q.LocalOnly {
|
||||
r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly))
|
||||
}
|
||||
if q.Connect {
|
||||
r.params.Set("connect", "true")
|
||||
}
|
||||
|
@ -672,6 +740,7 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
r.header.Set("Cache-Control", strings.Join(cc, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
r.ctx = q.ctx
|
||||
}
|
||||
|
||||
|
@ -715,6 +784,9 @@ func (r *request) setWriteOptions(q *WriteOptions) {
|
|||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Namespace != "" {
|
||||
r.params.Set("ns", q.Namespace)
|
||||
}
|
||||
if q.Datacenter != "" {
|
||||
r.params.Set("dc", q.Datacenter)
|
||||
}
|
||||
|
@ -779,6 +851,9 @@ func (c *Client) newRequest(method, path string) *request {
|
|||
if c.config.Datacenter != "" {
|
||||
r.params.Set("dc", c.config.Datacenter)
|
||||
}
|
||||
if c.config.Namespace != "" {
|
||||
r.params.Set("ns", c.config.Namespace)
|
||||
}
|
||||
if c.config.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||
}
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Weights struct {
|
||||
Passing int
|
||||
Warning int
|
||||
|
@ -16,6 +21,11 @@ type Node struct {
|
|||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
type ServiceAddress struct {
|
||||
Address string
|
||||
Port int
|
||||
}
|
||||
|
||||
type CatalogService struct {
|
||||
ID string
|
||||
Node string
|
||||
|
@ -26,17 +36,17 @@ type CatalogService struct {
|
|||
ServiceID string
|
||||
ServiceName string
|
||||
ServiceAddress string
|
||||
ServiceTaggedAddresses map[string]ServiceAddress
|
||||
ServiceTags []string
|
||||
ServiceMeta map[string]string
|
||||
ServicePort int
|
||||
ServiceWeights Weights
|
||||
ServiceEnableTagOverride bool
|
||||
// DEPRECATED (ProxyDestination) - remove the next comment!
|
||||
// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
|
||||
ServiceProxy *AgentServiceConnectProxyConfig
|
||||
CreateIndex uint64
|
||||
Checks HealthChecks
|
||||
ModifyIndex uint64
|
||||
ServiceProxy *AgentServiceConnectProxyConfig
|
||||
CreateIndex uint64
|
||||
Checks HealthChecks
|
||||
ModifyIndex uint64
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type CatalogNode struct {
|
||||
|
@ -44,6 +54,11 @@ type CatalogNode struct {
|
|||
Services map[string]*AgentService
|
||||
}
|
||||
|
||||
type CatalogNodeServiceList struct {
|
||||
Node *Node
|
||||
Services []*AgentService
|
||||
}
|
||||
|
||||
type CatalogRegistration struct {
|
||||
ID string
|
||||
Node string
|
||||
|
@ -59,10 +74,11 @@ type CatalogRegistration struct {
|
|||
|
||||
type CatalogDeregistration struct {
|
||||
Node string
|
||||
Address string // Obsolete.
|
||||
Address string `json:",omitempty"` // Obsolete.
|
||||
Datacenter string
|
||||
ServiceID string
|
||||
CheckID string
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Catalog can be used to query the Catalog endpoints
|
||||
|
@ -242,3 +258,36 @@ func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta,
|
|||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// NodeServiceList is used to query for service information about a single node. It differs from
|
||||
// the Node function only in its return type which will contain a list of services as opposed to
|
||||
// a map of service ids to services. This different structure allows for using the wildcard specifier
|
||||
// '*' for the Namespace in the QueryOptions.
|
||||
func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeServiceList, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/node-services/"+node)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out *CatalogNodeServiceList
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
func ParseServiceAddr(addrPort string) (ServiceAddress, error) {
|
||||
port := 0
|
||||
host, portStr, err := net.SplitHostPort(addrPort)
|
||||
if err == nil {
|
||||
port, err = strconv.Atoi(portStr)
|
||||
}
|
||||
return ServiceAddress{Address: host, Port: port}, err
|
||||
}
|
||||
|
|
|
@ -12,8 +12,12 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ServiceDefaults string = "service-defaults"
|
||||
ProxyDefaults string = "proxy-defaults"
|
||||
ServiceDefaults string = "service-defaults"
|
||||
ProxyDefaults string = "proxy-defaults"
|
||||
ServiceRouter string = "service-router"
|
||||
ServiceSplitter string = "service-splitter"
|
||||
ServiceResolver string = "service-resolver"
|
||||
|
||||
ProxyConfigGlobal string = "global"
|
||||
)
|
||||
|
||||
|
@ -24,10 +28,71 @@ type ConfigEntry interface {
|
|||
GetModifyIndex() uint64
|
||||
}
|
||||
|
||||
type MeshGatewayMode string
|
||||
|
||||
const (
|
||||
// MeshGatewayModeDefault represents no specific mode and should
|
||||
// be used to indicate that a different layer of the configuration
|
||||
// chain should take precedence
|
||||
MeshGatewayModeDefault MeshGatewayMode = ""
|
||||
|
||||
// MeshGatewayModeNone represents that the Upstream Connect connections
|
||||
// should be direct and not flow through a mesh gateway.
|
||||
MeshGatewayModeNone MeshGatewayMode = "none"
|
||||
|
||||
// MeshGatewayModeLocal represents that the Upstrea Connect connections
|
||||
// should be made to a mesh gateway in the local datacenter. This is
|
||||
MeshGatewayModeLocal MeshGatewayMode = "local"
|
||||
|
||||
// MeshGatewayModeRemote represents that the Upstream Connect connections
|
||||
// should be made to a mesh gateway in a remote datacenter.
|
||||
MeshGatewayModeRemote MeshGatewayMode = "remote"
|
||||
)
|
||||
|
||||
// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
|
||||
// services
|
||||
type MeshGatewayConfig struct {
|
||||
// Mode is the mode that should be used for the upstream connection.
|
||||
Mode MeshGatewayMode `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect.
|
||||
// Users can expose individual paths and/or all HTTP/GRPC paths for checks.
|
||||
type ExposeConfig struct {
|
||||
// Checks defines whether paths associated with Consul checks will be exposed.
|
||||
// This flag triggers exposing all HTTP and GRPC check paths registered for the service.
|
||||
Checks bool `json:",omitempty"`
|
||||
|
||||
// Paths is the list of paths exposed through the proxy.
|
||||
Paths []ExposePath `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ExposePath struct {
|
||||
// ListenerPort defines the port of the proxy's listener for exposed paths.
|
||||
ListenerPort int `json:",omitempty"`
|
||||
|
||||
// Path is the path to expose through the proxy, ie. "/metrics."
|
||||
Path string `json:",omitempty"`
|
||||
|
||||
// LocalPathPort is the port that the service is listening on for the given path.
|
||||
LocalPathPort int `json:",omitempty"`
|
||||
|
||||
// Protocol describes the upstream's service protocol.
|
||||
// Valid values are "http" and "http2", defaults to "http"
|
||||
Protocol string `json:",omitempty"`
|
||||
|
||||
// ParsedFromCheck is set if this path was parsed from a registered check
|
||||
ParsedFromCheck bool
|
||||
}
|
||||
|
||||
type ServiceConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Protocol string
|
||||
Namespace string `json:",omitempty"`
|
||||
Protocol string `json:",omitempty"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
Expose ExposeConfig `json:",omitempty"`
|
||||
ExternalSNI string `json:",omitempty"`
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
@ -51,7 +116,10 @@ func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
|
|||
type ProxyConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Config map[string]interface{}
|
||||
Namespace string `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
Expose ExposeConfig `json:",omitempty"`
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
@ -80,14 +148,35 @@ type rawEntryListResponse struct {
|
|||
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
|
||||
switch kind {
|
||||
case ServiceDefaults:
|
||||
return &ServiceConfigEntry{Name: name}, nil
|
||||
return &ServiceConfigEntry{Kind: kind, Name: name}, nil
|
||||
case ProxyDefaults:
|
||||
return &ProxyConfigEntry{Name: name}, nil
|
||||
return &ProxyConfigEntry{Kind: kind, Name: name}, nil
|
||||
case ServiceRouter:
|
||||
return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil
|
||||
case ServiceSplitter:
|
||||
return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil
|
||||
case ServiceResolver:
|
||||
return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
|
||||
}
|
||||
}
|
||||
|
||||
func MakeConfigEntry(kind, name string) (ConfigEntry, error) {
|
||||
return makeConfigEntry(kind, name)
|
||||
}
|
||||
|
||||
// DecodeConfigEntry will decode the result of using json.Unmarshal of a config
|
||||
// entry into a map[string]interface{}.
|
||||
//
|
||||
// Important caveats:
|
||||
//
|
||||
// - This will NOT work if the map[string]interface{} was produced using HCL
|
||||
// decoding as that requires more extensive parsing to work around the issues
|
||||
// with map[string][]interface{} that arise.
|
||||
//
|
||||
// - This will only decode fields using their camel case json field
|
||||
// representations.
|
||||
func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
|
||||
var entry ConfigEntry
|
||||
|
||||
|
@ -132,7 +221,19 @@ func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
|
|||
return DecodeConfigEntry(raw)
|
||||
}
|
||||
|
||||
// Config can be used to query the Config endpoints
|
||||
func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) {
|
||||
var entries []ConfigEntry
|
||||
for _, rawEntry := range raw {
|
||||
entry, err := DecodeConfigEntry(rawEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ConfigEntries can be used to query the Config endpoints
|
||||
type ConfigEntries struct {
|
||||
c *Client
|
||||
}
|
||||
|
@ -195,13 +296,9 @@ func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *Q
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
var entries []ConfigEntry
|
||||
for _, rawEntry := range raw {
|
||||
entry, err := DecodeConfigEntry(rawEntry)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
entries, err := decodeConfigEntrySlice(raw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return entries, qm, nil
|
||||
|
|
205
vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
generated
vendored
Normal file
205
vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ServiceRouterConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Routes []ServiceRoute `json:",omitempty"`
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceRoute struct {
|
||||
Match *ServiceRouteMatch `json:",omitempty"`
|
||||
Destination *ServiceRouteDestination `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceRouteMatch struct {
|
||||
HTTP *ServiceRouteHTTPMatch `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceRouteHTTPMatch struct {
|
||||
PathExact string `json:",omitempty"`
|
||||
PathPrefix string `json:",omitempty"`
|
||||
PathRegex string `json:",omitempty"`
|
||||
|
||||
Header []ServiceRouteHTTPMatchHeader `json:",omitempty"`
|
||||
QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"`
|
||||
Methods []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceRouteHTTPMatchHeader struct {
|
||||
Name string
|
||||
Present bool `json:",omitempty"`
|
||||
Exact string `json:",omitempty"`
|
||||
Prefix string `json:",omitempty"`
|
||||
Suffix string `json:",omitempty"`
|
||||
Regex string `json:",omitempty"`
|
||||
Invert bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceRouteHTTPMatchQueryParam struct {
|
||||
Name string
|
||||
Present bool `json:",omitempty"`
|
||||
Exact string `json:",omitempty"`
|
||||
Regex string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceRouteDestination struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
PrefixRewrite string `json:",omitempty"`
|
||||
RequestTimeout time.Duration `json:",omitempty"`
|
||||
NumRetries uint32 `json:",omitempty"`
|
||||
RetryOnConnectFailure bool `json:",omitempty"`
|
||||
RetryOnStatusCodes []uint32 `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) {
|
||||
panic("WHAT")
|
||||
type Alias ServiceRouteDestination
|
||||
exported := &struct {
|
||||
RequestTimeout string `json:",omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
RequestTimeout: e.RequestTimeout.String(),
|
||||
Alias: (*Alias)(e),
|
||||
}
|
||||
if e.RequestTimeout == 0 {
|
||||
exported.RequestTimeout = ""
|
||||
}
|
||||
|
||||
return json.Marshal(exported)
|
||||
}
|
||||
|
||||
func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
|
||||
type Alias ServiceRouteDestination
|
||||
aux := &struct {
|
||||
RequestTimeout string
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(e),
|
||||
}
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
if aux.RequestTimeout != "" {
|
||||
if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServiceSplitterConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Splits []ServiceSplit `json:",omitempty"`
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceSplit struct {
|
||||
Weight float32
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceResolverConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
DefaultSubset string `json:",omitempty"`
|
||||
Subsets map[string]ServiceResolverSubset `json:",omitempty"`
|
||||
Redirect *ServiceResolverRedirect `json:",omitempty"`
|
||||
Failover map[string]ServiceResolverFailover `json:",omitempty"`
|
||||
ConnectTimeout time.Duration `json:",omitempty"`
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) {
|
||||
panic("WHAT")
|
||||
type Alias ServiceResolverConfigEntry
|
||||
exported := &struct {
|
||||
ConnectTimeout string `json:",omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
ConnectTimeout: e.ConnectTimeout.String(),
|
||||
Alias: (*Alias)(e),
|
||||
}
|
||||
if e.ConnectTimeout == 0 {
|
||||
exported.ConnectTimeout = ""
|
||||
}
|
||||
|
||||
return json.Marshal(exported)
|
||||
}
|
||||
|
||||
func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
|
||||
type Alias ServiceResolverConfigEntry
|
||||
aux := &struct {
|
||||
ConnectTimeout string
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(e),
|
||||
}
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
if aux.ConnectTimeout != "" {
|
||||
if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
|
||||
func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
|
||||
func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||
func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||
|
||||
type ServiceResolverSubset struct {
|
||||
Filter string `json:",omitempty"`
|
||||
OnlyPassing bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceResolverRedirect struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceResolverFailover struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenters []string `json:",omitempty"`
|
||||
}
|
|
@ -17,6 +17,12 @@ type CAConfig struct {
|
|||
// and maps).
|
||||
Config map[string]interface{}
|
||||
|
||||
// State is read-only data that the provider might have persisted for use
|
||||
// after restart or leadership transition. For example this might include
|
||||
// UUIDs of resources it has created. Setting this when writing a
|
||||
// configuration is an error.
|
||||
State map[string]string
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
@ -33,9 +39,10 @@ type CommonCAProviderConfig struct {
|
|||
type ConsulCAProviderConfig struct {
|
||||
CommonCAProviderConfig `mapstructure:",squash"`
|
||||
|
||||
PrivateKey string
|
||||
RootCert string
|
||||
RotationPeriod time.Duration
|
||||
PrivateKey string
|
||||
RootCert string
|
||||
RotationPeriod time.Duration
|
||||
IntermediateCertTTL time.Duration
|
||||
}
|
||||
|
||||
// ParseConsulCAConfig takes a raw config map and returns a parsed
|
||||
|
|
|
@ -54,6 +54,13 @@ type Intention struct {
|
|||
// or modified.
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
|
||||
// Hash of the contents of the intention
|
||||
//
|
||||
// This is needed mainly for replication purposes. When replicating from
|
||||
// one DC to another keeping the content Hash will allow us to detect
|
||||
// content changes more efficiently than checking every single field
|
||||
Hash []byte
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta
|
|||
return wm, nil
|
||||
}
|
||||
|
||||
// Node is used to return the coordinates of a single in the LAN pool.
|
||||
// Node is used to return the coordinates of a single node in the LAN pool.
|
||||
func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
|
||||
r.setQueryOptions(q)
|
||||
|
|
|
@ -0,0 +1,229 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DiscoveryChain can be used to query the discovery-chain endpoints
|
||||
type DiscoveryChain struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// DiscoveryChain returns a handle to the discovery-chain endpoints
|
||||
func (c *Client) DiscoveryChain() *DiscoveryChain {
|
||||
return &DiscoveryChain{c}
|
||||
}
|
||||
|
||||
func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) {
|
||||
if name == "" {
|
||||
return nil, nil, fmt.Errorf("Name parameter must not be empty")
|
||||
}
|
||||
|
||||
method := "GET"
|
||||
if opts != nil && opts.requiresPOST() {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name))
|
||||
r.setQueryOptions(q)
|
||||
|
||||
if opts != nil {
|
||||
if opts.EvaluateInDatacenter != "" {
|
||||
r.params.Set("compile-dc", opts.EvaluateInDatacenter)
|
||||
}
|
||||
}
|
||||
|
||||
if method == "POST" {
|
||||
r.obj = opts
|
||||
}
|
||||
|
||||
rtt, resp, err := requireOK(d.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out DiscoveryChainResponse
|
||||
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &out, qm, nil
|
||||
}
|
||||
|
||||
type DiscoveryChainOptions struct {
|
||||
EvaluateInDatacenter string `json:"-"`
|
||||
|
||||
// OverrideMeshGateway allows for the mesh gateway setting to be overridden
|
||||
// for any resolver in the compiled chain.
|
||||
OverrideMeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
|
||||
// OverrideProtocol allows for the final protocol for the chain to be
|
||||
// altered.
|
||||
//
|
||||
// - If the chain ordinarily would be TCP and an L7 protocol is passed here
|
||||
// the chain will not include Routers or Splitters.
|
||||
//
|
||||
// - If the chain ordinarily would be L7 and TCP is passed here the chain
|
||||
// will not include Routers or Splitters.
|
||||
OverrideProtocol string `json:",omitempty"`
|
||||
|
||||
// OverrideConnectTimeout allows for the ConnectTimeout setting to be
|
||||
// overridden for any resolver in the compiled chain.
|
||||
OverrideConnectTimeout time.Duration `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (o *DiscoveryChainOptions) requiresPOST() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.OverrideMeshGateway.Mode != "" ||
|
||||
o.OverrideProtocol != "" ||
|
||||
o.OverrideConnectTimeout != 0
|
||||
}
|
||||
|
||||
type DiscoveryChainResponse struct {
|
||||
Chain *CompiledDiscoveryChain
|
||||
}
|
||||
|
||||
type CompiledDiscoveryChain struct {
|
||||
ServiceName string
|
||||
Namespace string
|
||||
Datacenter string
|
||||
|
||||
// CustomizationHash is a unique hash of any data that affects the
|
||||
// compilation of the discovery chain other than config entries or the
|
||||
// name/namespace/datacenter evaluation criteria.
|
||||
//
|
||||
// If set, this value should be used to prefix/suffix any generated load
|
||||
// balancer data plane objects to avoid sharing customized and
|
||||
// non-customized versions.
|
||||
CustomizationHash string
|
||||
|
||||
// Protocol is the overall protocol shared by everything in the chain.
|
||||
Protocol string
|
||||
|
||||
// StartNode is the first key into the Nodes map that should be followed
|
||||
// when walking the discovery chain.
|
||||
StartNode string
|
||||
|
||||
// Nodes contains all nodes available for traversal in the chain keyed by a
|
||||
// unique name. You can walk this by starting with StartNode.
|
||||
//
|
||||
// NOTE: The names should be treated as opaque values and are only
|
||||
// guaranteed to be consistent within a single compilation.
|
||||
Nodes map[string]*DiscoveryGraphNode
|
||||
|
||||
// Targets is a list of all targets used in this chain.
|
||||
//
|
||||
// NOTE: The names should be treated as opaque values and are only
|
||||
// guaranteed to be consistent within a single compilation.
|
||||
Targets map[string]*DiscoveryTarget
|
||||
}
|
||||
|
||||
const (
|
||||
DiscoveryGraphNodeTypeRouter = "router"
|
||||
DiscoveryGraphNodeTypeSplitter = "splitter"
|
||||
DiscoveryGraphNodeTypeResolver = "resolver"
|
||||
)
|
||||
|
||||
// DiscoveryGraphNode is a single node in the compiled discovery chain.
|
||||
type DiscoveryGraphNode struct {
|
||||
Type string
|
||||
Name string // this is NOT necessarily a service
|
||||
|
||||
// fields for Type==router
|
||||
Routes []*DiscoveryRoute
|
||||
|
||||
// fields for Type==splitter
|
||||
Splits []*DiscoverySplit
|
||||
|
||||
// fields for Type==resolver
|
||||
Resolver *DiscoveryResolver
|
||||
}
|
||||
|
||||
// compiled form of ServiceRoute
|
||||
type DiscoveryRoute struct {
|
||||
Definition *ServiceRoute
|
||||
NextNode string
|
||||
}
|
||||
|
||||
// compiled form of ServiceSplit
|
||||
type DiscoverySplit struct {
|
||||
Weight float32
|
||||
NextNode string
|
||||
}
|
||||
|
||||
// compiled form of ServiceResolverConfigEntry
|
||||
type DiscoveryResolver struct {
|
||||
Default bool
|
||||
ConnectTimeout time.Duration
|
||||
Target string
|
||||
Failover *DiscoveryFailover
|
||||
}
|
||||
|
||||
func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) {
|
||||
type Alias DiscoveryResolver
|
||||
exported := &struct {
|
||||
ConnectTimeout string `json:",omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
ConnectTimeout: r.ConnectTimeout.String(),
|
||||
Alias: (*Alias)(r),
|
||||
}
|
||||
if r.ConnectTimeout == 0 {
|
||||
exported.ConnectTimeout = ""
|
||||
}
|
||||
|
||||
return json.Marshal(exported)
|
||||
}
|
||||
|
||||
func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error {
|
||||
type Alias DiscoveryResolver
|
||||
aux := &struct {
|
||||
ConnectTimeout string
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(r),
|
||||
}
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
if aux.ConnectTimeout != "" {
|
||||
if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compiled form of ServiceResolverFailover
|
||||
type DiscoveryFailover struct {
|
||||
Targets []string
|
||||
}
|
||||
|
||||
// DiscoveryTarget represents all of the inputs necessary to use a resolver
|
||||
// config entry to execute a catalog query to generate a list of service
|
||||
// instances during discovery.
|
||||
type DiscoveryTarget struct {
|
||||
ID string
|
||||
|
||||
Service string
|
||||
ServiceSubset string
|
||||
Namespace string
|
||||
Datacenter string
|
||||
|
||||
MeshGateway MeshGatewayConfig
|
||||
Subset ServiceResolverSubset
|
||||
External bool
|
||||
SNI string
|
||||
Name string
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
module github.com/hashicorp/consul/api
|
||||
|
||||
go 1.12
|
||||
|
||||
replace github.com/hashicorp/consul/sdk => ../sdk
|
||||
|
||||
require (
|
||||
github.com/hashicorp/consul/sdk v0.4.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-hclog v0.12.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/hashicorp/go-uuid v1.0.1
|
||||
github.com/hashicorp/serf v0.8.2
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
)
|
|
@ -0,0 +1,111 @@
|
|||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -36,6 +36,8 @@ type HealthCheck struct {
|
|||
ServiceID string
|
||||
ServiceName string
|
||||
ServiceTags []string
|
||||
Type string
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
Definition HealthCheckDefinition
|
||||
|
||||
|
@ -49,6 +51,7 @@ type HealthCheckDefinition struct {
|
|||
HTTP string
|
||||
Header map[string][]string
|
||||
Method string
|
||||
Body string
|
||||
TLSSkipVerify bool
|
||||
TCP string
|
||||
IntervalDuration time.Duration `json:"-"`
|
||||
|
@ -94,40 +97,63 @@ func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
|
|||
return json.Marshal(out)
|
||||
}
|
||||
|
||||
func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error {
|
||||
func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
|
||||
type Alias HealthCheckDefinition
|
||||
aux := &struct {
|
||||
Interval string
|
||||
Timeout string
|
||||
DeregisterCriticalServiceAfter string
|
||||
IntervalDuration interface{}
|
||||
TimeoutDuration interface{}
|
||||
DeregisterCriticalServiceAfterDuration interface{}
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(d),
|
||||
Alias: (*Alias)(t),
|
||||
}
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the values into both the time.Duration and old ReadableDuration fields.
|
||||
var err error
|
||||
if aux.Interval != "" {
|
||||
if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil {
|
||||
return err
|
||||
|
||||
if aux.IntervalDuration == nil {
|
||||
t.IntervalDuration = time.Duration(t.Interval)
|
||||
} else {
|
||||
switch v := aux.IntervalDuration.(type) {
|
||||
case string:
|
||||
if t.IntervalDuration, err = time.ParseDuration(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case float64:
|
||||
t.IntervalDuration = time.Duration(v)
|
||||
}
|
||||
d.Interval = ReadableDuration(d.IntervalDuration)
|
||||
t.Interval = ReadableDuration(t.IntervalDuration)
|
||||
}
|
||||
if aux.Timeout != "" {
|
||||
if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil {
|
||||
return err
|
||||
|
||||
if aux.TimeoutDuration == nil {
|
||||
t.TimeoutDuration = time.Duration(t.Timeout)
|
||||
} else {
|
||||
switch v := aux.TimeoutDuration.(type) {
|
||||
case string:
|
||||
if t.TimeoutDuration, err = time.ParseDuration(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case float64:
|
||||
t.TimeoutDuration = time.Duration(v)
|
||||
}
|
||||
d.Timeout = ReadableDuration(d.TimeoutDuration)
|
||||
t.Timeout = ReadableDuration(t.TimeoutDuration)
|
||||
}
|
||||
if aux.DeregisterCriticalServiceAfter != "" {
|
||||
if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil {
|
||||
return err
|
||||
if aux.DeregisterCriticalServiceAfterDuration == nil {
|
||||
t.DeregisterCriticalServiceAfterDuration = time.Duration(t.DeregisterCriticalServiceAfter)
|
||||
} else {
|
||||
switch v := aux.DeregisterCriticalServiceAfterDuration.(type) {
|
||||
case string:
|
||||
if t.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case float64:
|
||||
t.DeregisterCriticalServiceAfterDuration = time.Duration(v)
|
||||
}
|
||||
d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration)
|
||||
t.DeregisterCriticalServiceAfter = ReadableDuration(t.DeregisterCriticalServiceAfterDuration)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,10 @@ type KVPair struct {
|
|||
// interactions with this key over the same session must specify the same
|
||||
// session ID.
|
||||
Session string
|
||||
|
||||
// Namespace is the namespace the KVPair is associated with
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// KVPairs is a list of KVPair objects
|
||||
|
|
|
@ -79,6 +79,7 @@ type LockOptions struct {
|
|||
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
|
||||
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
|
||||
LockTryOnce bool // Optional, defaults to false which means try forever
|
||||
Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
|
||||
}
|
||||
|
||||
// LockKey returns a handle to a lock struct which can be used
|
||||
|
@ -140,6 +141,10 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|||
return nil, ErrLockHeld
|
||||
}
|
||||
|
||||
wOpts := WriteOptions{
|
||||
Namespace: l.opts.Namespace,
|
||||
}
|
||||
|
||||
// Check if we need to create a session first
|
||||
l.lockSession = l.opts.Session
|
||||
if l.lockSession == "" {
|
||||
|
@ -150,8 +155,9 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|||
|
||||
l.sessionRenew = make(chan struct{})
|
||||
l.lockSession = s
|
||||
|
||||
session := l.c.Session()
|
||||
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
|
||||
go session.RenewPeriodic(l.opts.SessionTTL, s, &wOpts, l.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
|
@ -164,8 +170,9 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|||
|
||||
// Setup the query options
|
||||
kv := l.c.KV()
|
||||
qOpts := &QueryOptions{
|
||||
WaitTime: l.opts.LockWaitTime,
|
||||
qOpts := QueryOptions{
|
||||
WaitTime: l.opts.LockWaitTime,
|
||||
Namespace: l.opts.Namespace,
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
@ -191,7 +198,7 @@ WAIT:
|
|||
attempts++
|
||||
|
||||
// Look for an existing lock, blocking until not taken
|
||||
pair, meta, err := kv.Get(l.opts.Key, qOpts)
|
||||
pair, meta, err := kv.Get(l.opts.Key, &qOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read lock: %v", err)
|
||||
}
|
||||
|
@ -209,7 +216,8 @@ WAIT:
|
|||
|
||||
// Try to acquire the lock
|
||||
pair = l.lockEntry(l.lockSession)
|
||||
locked, _, err = kv.Acquire(pair, nil)
|
||||
|
||||
locked, _, err = kv.Acquire(pair, &wOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire lock: %v", err)
|
||||
}
|
||||
|
@ -218,7 +226,7 @@ WAIT:
|
|||
if !locked {
|
||||
// Determine why the lock failed
|
||||
qOpts.WaitIndex = 0
|
||||
pair, meta, err = kv.Get(l.opts.Key, qOpts)
|
||||
pair, meta, err = kv.Get(l.opts.Key, &qOpts)
|
||||
if pair != nil && pair.Session != "" {
|
||||
//If the session is not null, this means that a wait can safely happen
|
||||
//using a long poll
|
||||
|
@ -277,7 +285,9 @@ func (l *Lock) Unlock() error {
|
|||
|
||||
// Release the lock explicitly
|
||||
kv := l.c.KV()
|
||||
_, _, err := kv.Release(lockEnt, nil)
|
||||
w := WriteOptions{Namespace: l.opts.Namespace}
|
||||
|
||||
_, _, err := kv.Release(lockEnt, &w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to release lock: %v", err)
|
||||
}
|
||||
|
@ -298,7 +308,9 @@ func (l *Lock) Destroy() error {
|
|||
|
||||
// Look for an existing lock
|
||||
kv := l.c.KV()
|
||||
pair, _, err := kv.Get(l.opts.Key, nil)
|
||||
q := QueryOptions{Namespace: l.opts.Namespace}
|
||||
|
||||
pair, _, err := kv.Get(l.opts.Key, &q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read lock: %v", err)
|
||||
}
|
||||
|
@ -319,7 +331,8 @@ func (l *Lock) Destroy() error {
|
|||
}
|
||||
|
||||
// Attempt the delete
|
||||
didRemove, _, err := kv.DeleteCAS(pair, nil)
|
||||
w := WriteOptions{Namespace: l.opts.Namespace}
|
||||
didRemove, _, err := kv.DeleteCAS(pair, &w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove lock: %v", err)
|
||||
}
|
||||
|
@ -339,7 +352,8 @@ func (l *Lock) createSession() (string, error) {
|
|||
TTL: l.opts.SessionTTL,
|
||||
}
|
||||
}
|
||||
id, _, err := session.Create(se, nil)
|
||||
w := WriteOptions{Namespace: l.opts.Namespace}
|
||||
id, _, err := session.Create(se, &w)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -361,11 +375,14 @@ func (l *Lock) lockEntry(session string) *KVPair {
|
|||
func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
|
||||
defer close(stopCh)
|
||||
kv := l.c.KV()
|
||||
opts := &QueryOptions{RequireConsistent: true}
|
||||
opts := QueryOptions{
|
||||
RequireConsistent: true,
|
||||
Namespace: l.opts.Namespace,
|
||||
}
|
||||
WAIT:
|
||||
retries := l.opts.MonitorRetries
|
||||
RETRY:
|
||||
pair, meta, err := kv.Get(l.opts.Key, opts)
|
||||
pair, meta, err := kv.Get(l.opts.Key, &opts)
|
||||
if err != nil {
|
||||
// If configured we can try to ride out a brief Consul unavailability
|
||||
// by doing retries. Note that we have to attempt the retry in a non-
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Namespace is the configuration of a single namespace. Namespacing is a Consul Enterprise feature.
|
||||
type Namespace struct {
|
||||
// Name is the name of the Namespace. It must be unique and
|
||||
// must be a DNS hostname. There are also other reserved names
|
||||
// that may not be used.
|
||||
Name string `json:"Name"`
|
||||
|
||||
// Description is where the user puts any information they want
|
||||
// about the namespace. It is not used internally.
|
||||
Description string `json:"Description,omitempty"`
|
||||
|
||||
// ACLs is the configuration of ACLs for this namespace. It has its
|
||||
// own struct so that we can add more to it in the future.
|
||||
// This is nullable so that we can omit if empty when encoding in JSON
|
||||
ACLs *NamespaceACLConfig `json:"ACLs,omitempty"`
|
||||
|
||||
// Meta is a map that can be used to add kv metadata to the namespace definition
|
||||
Meta map[string]string `json:"Meta,omitempty"`
|
||||
|
||||
// DeletedAt is the time when the Namespace was marked for deletion
|
||||
// This is nullable so that we can omit if empty when encoding in JSON
|
||||
DeletedAt *time.Time `json:"DeletedAt,omitempty"`
|
||||
|
||||
// CreateIndex is the Raft index at which the Namespace was created
|
||||
CreateIndex uint64 `json:"CreateIndex,omitempty"`
|
||||
|
||||
// ModifyIndex is the latest Raft index at which the Namespace was modified.
|
||||
ModifyIndex uint64 `json:"ModifyIndex,omitempty"`
|
||||
}
|
||||
|
||||
// NamespaceACLConfig is the Namespace specific ACL configuration container
|
||||
type NamespaceACLConfig struct {
|
||||
// PolicyDefaults is the list of policies that should be used for the parent authorizer
|
||||
// of all tokens in the associated namespace.
|
||||
PolicyDefaults []ACLLink `json:"PolicyDefaults"`
|
||||
// RoleDefaults is the list of roles that should be used for the parent authorizer
|
||||
// of all tokens in the associated namespace.
|
||||
RoleDefaults []ACLLink `json:"RoleDefaults"`
|
||||
}
|
||||
|
||||
// Namespaces can be used to manage Namespaces in Consul Enterprise..
|
||||
type Namespaces struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Operator returns a handle to the operator endpoints.
|
||||
func (c *Client) Namespaces() *Namespaces {
|
||||
return &Namespaces{c}
|
||||
}
|
||||
|
||||
func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) {
|
||||
if ns.Name == "" {
|
||||
return nil, nil, fmt.Errorf("Must specify a Name for Namespace creation")
|
||||
}
|
||||
|
||||
r := n.c.newRequest("PUT", "/v1/namespace")
|
||||
r.setWriteOptions(q)
|
||||
r.obj = ns
|
||||
rtt, resp, err := requireOK(n.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
var out Namespace
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &out, wm, nil
|
||||
}
|
||||
|
||||
func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) {
|
||||
if ns.Name == "" {
|
||||
return nil, nil, fmt.Errorf("Must specify a Name for Namespace updating")
|
||||
}
|
||||
|
||||
r := n.c.newRequest("PUT", "/v1/namespace/"+ns.Name)
|
||||
r.setWriteOptions(q)
|
||||
r.obj = ns
|
||||
rtt, resp, err := requireOK(n.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
var out Namespace
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &out, wm, nil
|
||||
}
|
||||
|
||||
func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) {
|
||||
var out Namespace
|
||||
r := n.c.newRequest("GET", "/v1/namespace/"+name)
|
||||
r.setQueryOptions(q)
|
||||
found, rtt, resp, err := requireNotFoundOrOK(n.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if !found {
|
||||
return nil, qm, nil
|
||||
}
|
||||
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &out, qm, nil
|
||||
}
|
||||
|
||||
func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := n.c.newRequest("DELETE", "/v1/namespace/"+name)
|
||||
r.setWriteOptions(q)
|
||||
rtt, resp, err := requireOK(n.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
|
||||
var out []*Namespace
|
||||
r := n.c.newRequest("GET", "/v1/namespaces")
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(n.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
|
@ -25,6 +25,10 @@ type AutopilotConfiguration struct {
|
|||
// be behind before being considered unhealthy.
|
||||
MaxTrailingLogs uint64
|
||||
|
||||
// MinQuorum sets the minimum number of servers allowed in a cluster before
|
||||
// autopilot can prune dead servers.
|
||||
MinQuorum uint
|
||||
|
||||
// ServerStabilizationTime is the minimum amount of time a server must be
|
||||
// in a stable, healthy state before it can be added to the cluster. Only
|
||||
// applicable with Raft protocol version 3 or higher.
|
||||
|
@ -130,19 +134,28 @@ func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
|
|||
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
|
||||
}
|
||||
|
||||
func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
|
||||
func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) {
|
||||
if d == nil {
|
||||
return fmt.Errorf("cannot unmarshal to nil pointer")
|
||||
}
|
||||
|
||||
var dur time.Duration
|
||||
str := string(raw)
|
||||
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
|
||||
return fmt.Errorf("must be enclosed with quotes: %s", str)
|
||||
}
|
||||
dur, err := time.ParseDuration(str[1 : len(str)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' {
|
||||
// quoted string
|
||||
dur, err = time.ParseDuration(str[1 : len(str)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// no quotes, not a string
|
||||
v, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dur = time.Duration(v)
|
||||
}
|
||||
|
||||
*d = ReadableDuration(dur)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type License struct {
|
||||
// The unique identifier of the license
|
||||
LicenseID string `json:"license_id"`
|
||||
|
||||
// The customer ID associated with the license
|
||||
CustomerID string `json:"customer_id"`
|
||||
|
||||
// If set, an identifier that should be used to lock the license to a
|
||||
// particular site, cluster, etc.
|
||||
InstallationID string `json:"installation_id"`
|
||||
|
||||
// The time at which the license was issued
|
||||
IssueTime time.Time `json:"issue_time"`
|
||||
|
||||
// The time at which the license starts being valid
|
||||
StartTime time.Time `json:"start_time"`
|
||||
|
||||
// The time after which the license expires
|
||||
ExpirationTime time.Time `json:"expiration_time"`
|
||||
|
||||
// The time at which the license ceases to function and can
|
||||
// no longer be used in any capacity
|
||||
TerminationTime time.Time `json:"termination_time"`
|
||||
|
||||
// The product the license is valid for
|
||||
Product string `json:"product"`
|
||||
|
||||
// License Specific Flags
|
||||
Flags map[string]interface{} `json:"flags"`
|
||||
|
||||
// List of features enabled by the license
|
||||
Features []string `json:"features"`
|
||||
}
|
||||
|
||||
type LicenseReply struct {
|
||||
Valid bool
|
||||
License *License
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) {
|
||||
var reply LicenseReply
|
||||
if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return &reply, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
|
||||
r := op.c.newRequest("GET", "/v1/operator/license")
|
||||
r.params.Set("signed", "1")
|
||||
r.setQueryOptions(q)
|
||||
_, resp, err := requireOK(op.c.doRequest(r))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// LicenseReset will reset the license to the builtin one if it is still valid.
|
||||
// If the builtin license is invalid, the current license stays active.
|
||||
func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
|
||||
var reply LicenseReply
|
||||
r := op.c.newRequest("DELETE", "/v1/operator/license")
|
||||
r.setWriteOptions(opts)
|
||||
_, resp, err := requireOK(op.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := decodeBody(resp, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &reply, nil
|
||||
}
|
||||
|
||||
func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
|
||||
var reply LicenseReply
|
||||
r := op.c.newRequest("PUT", "/v1/operator/license")
|
||||
r.setWriteOptions(opts)
|
||||
r.body = strings.NewReader(license)
|
||||
_, resp, err := requireOK(op.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := decodeBody(resp, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &reply, nil
|
||||
}
|
|
@ -25,6 +25,9 @@ type ServiceQuery struct {
|
|||
// Service is the service to query.
|
||||
Service string
|
||||
|
||||
// Namespace of the service to query
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Near allows baking in the name of a node to automatically distance-
|
||||
// sort from. The magic "_agent" value is supported, which sorts near
|
||||
// the agent which initiated the request by default.
|
||||
|
@ -119,6 +122,9 @@ type PreparedQueryExecuteResponse struct {
|
|||
// Service is the service that was queried.
|
||||
Service string
|
||||
|
||||
// Namespace of the service that was queried
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Nodes has the nodes that were output by the query.
|
||||
Nodes []ServiceEntry
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ type SemaphoreOptions struct {
|
|||
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
|
||||
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
|
||||
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
|
||||
Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
|
||||
}
|
||||
|
||||
// semaphoreLock is written under the DefaultSemaphoreKey and
|
||||
|
@ -176,14 +177,17 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
|||
|
||||
// Create the contender entry
|
||||
kv := s.c.KV()
|
||||
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
|
||||
wOpts := WriteOptions{Namespace: s.opts.Namespace}
|
||||
|
||||
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), &wOpts)
|
||||
if err != nil || !made {
|
||||
return nil, fmt.Errorf("failed to make contender entry: %v", err)
|
||||
}
|
||||
|
||||
// Setup the query options
|
||||
qOpts := &QueryOptions{
|
||||
WaitTime: s.opts.SemaphoreWaitTime,
|
||||
qOpts := QueryOptions{
|
||||
WaitTime: s.opts.SemaphoreWaitTime,
|
||||
Namespace: s.opts.Namespace,
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
@ -209,7 +213,7 @@ WAIT:
|
|||
attempts++
|
||||
|
||||
// Read the prefix
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, &qOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read prefix: %v", err)
|
||||
}
|
||||
|
@ -247,7 +251,7 @@ WAIT:
|
|||
}
|
||||
|
||||
// Attempt the acquisition
|
||||
didSet, _, err := kv.CAS(newLock, nil)
|
||||
didSet, _, err := kv.CAS(newLock, &wOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update lock: %v", err)
|
||||
}
|
||||
|
@ -298,8 +302,12 @@ func (s *Semaphore) Release() error {
|
|||
// Remove ourselves as a lock holder
|
||||
kv := s.c.KV()
|
||||
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
|
||||
|
||||
wOpts := WriteOptions{Namespace: s.opts.Namespace}
|
||||
qOpts := QueryOptions{Namespace: s.opts.Namespace}
|
||||
|
||||
READ:
|
||||
pair, _, err := kv.Get(key, nil)
|
||||
pair, _, err := kv.Get(key, &qOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -320,7 +328,7 @@ READ:
|
|||
}
|
||||
|
||||
// Swap the locks
|
||||
didSet, _, err := kv.CAS(newLock, nil)
|
||||
didSet, _, err := kv.CAS(newLock, &wOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update lock: %v", err)
|
||||
}
|
||||
|
@ -331,7 +339,7 @@ READ:
|
|||
|
||||
// Destroy the contender entry
|
||||
contenderKey := path.Join(s.opts.Prefix, lockSession)
|
||||
if _, err := kv.Delete(contenderKey, nil); err != nil {
|
||||
if _, err := kv.Delete(contenderKey, &wOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -351,7 +359,9 @@ func (s *Semaphore) Destroy() error {
|
|||
|
||||
// List for the semaphore
|
||||
kv := s.c.KV()
|
||||
pairs, _, err := kv.List(s.opts.Prefix, nil)
|
||||
|
||||
q := QueryOptions{Namespace: s.opts.Namespace}
|
||||
pairs, _, err := kv.List(s.opts.Prefix, &q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read prefix: %v", err)
|
||||
}
|
||||
|
@ -380,7 +390,8 @@ func (s *Semaphore) Destroy() error {
|
|||
}
|
||||
|
||||
// Attempt the delete
|
||||
didRemove, _, err := kv.DeleteCAS(lockPair, nil)
|
||||
w := WriteOptions{Namespace: s.opts.Namespace}
|
||||
didRemove, _, err := kv.DeleteCAS(lockPair, &w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove semaphore: %v", err)
|
||||
}
|
||||
|
@ -398,7 +409,9 @@ func (s *Semaphore) createSession() (string, error) {
|
|||
TTL: s.opts.SessionTTL,
|
||||
Behavior: SessionBehaviorDelete,
|
||||
}
|
||||
id, _, err := session.Create(se, nil)
|
||||
|
||||
w := WriteOptions{Namespace: s.opts.Namespace}
|
||||
id, _, err := session.Create(se, &w)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -483,11 +496,14 @@ func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
|
|||
func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
|
||||
defer close(stopCh)
|
||||
kv := s.c.KV()
|
||||
opts := &QueryOptions{RequireConsistent: true}
|
||||
opts := QueryOptions{
|
||||
RequireConsistent: true,
|
||||
Namespace: s.opts.Namespace,
|
||||
}
|
||||
WAIT:
|
||||
retries := s.opts.MonitorRetries
|
||||
RETRY:
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, opts)
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, &opts)
|
||||
if err != nil {
|
||||
// If configured we can try to ride out a brief Consul unavailability
|
||||
// by doing retries. Note that we have to attempt the retry in a non-
|
||||
|
|
|
@ -25,10 +25,23 @@ type SessionEntry struct {
|
|||
ID string
|
||||
Name string
|
||||
Node string
|
||||
Checks []string
|
||||
LockDelay time.Duration
|
||||
Behavior string
|
||||
TTL string
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Deprecated for Consul Enterprise in v1.7.0.
|
||||
Checks []string
|
||||
|
||||
// NodeChecks and ServiceChecks are new in Consul 1.7.0.
|
||||
// When associating checks with sessions, namespaces can be specified for service checks.
|
||||
NodeChecks []string
|
||||
ServiceChecks []ServiceCheck
|
||||
}
|
||||
|
||||
type ServiceCheck struct {
|
||||
ID string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Session can be used to query the Session endpoints
|
||||
|
@ -45,7 +58,7 @@ func (c *Client) Session() *Session {
|
|||
// a session with no associated health checks.
|
||||
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
body := make(map[string]interface{})
|
||||
body["Checks"] = []string{}
|
||||
body["NodeChecks"] = []string{}
|
||||
if se != nil {
|
||||
if se.Name != "" {
|
||||
body["Name"] = se.Name
|
||||
|
@ -86,6 +99,12 @@ func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta,
|
|||
if len(se.Checks) > 0 {
|
||||
body["Checks"] = se.Checks
|
||||
}
|
||||
if len(se.NodeChecks) > 0 {
|
||||
body["NodeChecks"] = se.NodeChecks
|
||||
}
|
||||
if len(se.ServiceChecks) > 0 {
|
||||
body["ServiceChecks"] = se.ServiceChecks
|
||||
}
|
||||
if se.Behavior != "" {
|
||||
body["Behavior"] = se.Behavior
|
||||
}
|
||||
|
|
|
@ -75,12 +75,13 @@ const (
|
|||
|
||||
// KVTxnOp defines a single operation inside a transaction.
|
||||
type KVTxnOp struct {
|
||||
Verb KVOp
|
||||
Key string
|
||||
Value []byte
|
||||
Flags uint64
|
||||
Index uint64
|
||||
Session string
|
||||
Verb KVOp
|
||||
Key string
|
||||
Value []byte
|
||||
Flags uint64
|
||||
Index uint64
|
||||
Session string
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// KVTxnOps defines a set of operations to be performed inside a single
|
||||
|
@ -93,6 +94,19 @@ type KVTxnResponse struct {
|
|||
Errors TxnErrors
|
||||
}
|
||||
|
||||
// SessionOp constants give possible operations available in a transaction.
|
||||
type SessionOp string
|
||||
|
||||
const (
|
||||
SessionDelete SessionOp = "delete"
|
||||
)
|
||||
|
||||
// SessionTxnOp defines a single operation inside a transaction.
|
||||
type SessionTxnOp struct {
|
||||
Verb SessionOp
|
||||
Session Session
|
||||
}
|
||||
|
||||
// NodeOp constants give possible operations available in a transaction.
|
||||
type NodeOp string
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ package flags
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
@ -10,6 +12,7 @@ type HTTPFlags struct {
|
|||
// client api flags
|
||||
address StringValue
|
||||
token StringValue
|
||||
tokenFile StringValue
|
||||
caFile StringValue
|
||||
caPath StringValue
|
||||
certFile StringValue
|
||||
|
@ -19,6 +22,9 @@ type HTTPFlags struct {
|
|||
// server flags
|
||||
datacenter StringValue
|
||||
stale BoolValue
|
||||
|
||||
// namespace flags
|
||||
namespace StringValue
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) ClientFlags() *flag.FlagSet {
|
||||
|
@ -33,6 +39,10 @@ func (f *HTTPFlags) ClientFlags() *flag.FlagSet {
|
|||
"ACL token to use in the request. This can also be specified via the "+
|
||||
"CONSUL_HTTP_TOKEN environment variable. If unspecified, the query will "+
|
||||
"default to the token of the Consul agent at the HTTP address.")
|
||||
fs.Var(&f.tokenFile, "token-file",
|
||||
"File containing the ACL token to use in the request instead of one specified "+
|
||||
"via the -token argument or CONSUL_HTTP_TOKEN environment variable. "+
|
||||
"This can also be specified via the CONSUL_HTTP_TOKEN_FILE environment variable.")
|
||||
fs.Var(&f.caFile, "ca-file",
|
||||
"Path to a CA file to use for TLS when communicating with Consul. This "+
|
||||
"can also be specified via the CONSUL_CACERT environment variable.")
|
||||
|
@ -48,7 +58,6 @@ func (f *HTTPFlags) ClientFlags() *flag.FlagSet {
|
|||
fs.Var(&f.tlsServerName, "tls-server-name",
|
||||
"The server name to use as the SNI host when connecting via TLS. This "+
|
||||
"can also be specified via the CONSUL_TLS_SERVER_NAME environment variable.")
|
||||
|
||||
return fs
|
||||
}
|
||||
|
||||
|
@ -65,6 +74,15 @@ func (f *HTTPFlags) ServerFlags() *flag.FlagSet {
|
|||
return fs
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) NamespaceFlags() *flag.FlagSet {
|
||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
fs.Var(&f.namespace, "namespace",
|
||||
"Specifies the namespace to query. If not provided, the namespace will be inferred "+
|
||||
"from the request's ACL token, or will default to the `default` namespace. "+
|
||||
"Namespaces is a Consul Enterprise feature.")
|
||||
return fs
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) Addr() string {
|
||||
return f.address.String()
|
||||
}
|
||||
|
@ -84,6 +102,32 @@ func (f *HTTPFlags) Token() string {
|
|||
return f.token.String()
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) SetToken(v string) error {
|
||||
return f.token.Set(v)
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) TokenFile() string {
|
||||
return f.tokenFile.String()
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) SetTokenFile(v string) error {
|
||||
return f.tokenFile.Set(v)
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) ReadTokenFile() (string, error) {
|
||||
tokenFile := f.tokenFile.String()
|
||||
if tokenFile == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(data)), nil
|
||||
}
|
||||
|
||||
func (f *HTTPFlags) APIClient() (*api.Client, error) {
|
||||
c := api.DefaultConfig()
|
||||
|
||||
|
@ -95,10 +139,12 @@ func (f *HTTPFlags) APIClient() (*api.Client, error) {
|
|||
func (f *HTTPFlags) MergeOntoConfig(c *api.Config) {
|
||||
f.address.Merge(&c.Address)
|
||||
f.token.Merge(&c.Token)
|
||||
f.tokenFile.Merge(&c.TokenFile)
|
||||
f.caFile.Merge(&c.TLSConfig.CAFile)
|
||||
f.caPath.Merge(&c.TLSConfig.CAPath)
|
||||
f.certFile.Merge(&c.TLSConfig.CertFile)
|
||||
f.keyFile.Merge(&c.TLSConfig.KeyFile)
|
||||
f.tlsServerName.Merge(&c.TLSConfig.Address)
|
||||
f.datacenter.Merge(&c.Datacenter)
|
||||
f.namespace.Merge(&c.Namespace)
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"io"
|
||||
"strings"
|
||||
|
||||
text "github.com/tonnerre/golang-text"
|
||||
text "github.com/kr/text"
|
||||
)
|
||||
|
||||
func Usage(txt string, flags *flag.FlagSet) string {
|
||||
|
|
|
@ -1,139 +0,0 @@
|
|||
// Package freeport provides a helper for allocating free ports across multiple
|
||||
// processes on the same machine.
|
||||
package freeport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockSize is the size of the allocated port block. ports are given out
|
||||
// consecutively from that block with roll-over for the lifetime of the
|
||||
// application/test run.
|
||||
blockSize = 1500
|
||||
|
||||
// maxBlocks is the number of available port blocks.
|
||||
// lowPort + maxBlocks * blockSize must be less than 65535.
|
||||
maxBlocks = 30
|
||||
|
||||
// lowPort is the lowest port number that should be used.
|
||||
lowPort = 10000
|
||||
|
||||
// attempts is how often we try to allocate a port block
|
||||
// before giving up.
|
||||
attempts = 10
|
||||
)
|
||||
|
||||
var (
|
||||
// firstPort is the first port of the allocated block.
|
||||
firstPort int
|
||||
|
||||
// lockLn is the system-wide mutex for the port block.
|
||||
lockLn net.Listener
|
||||
|
||||
// mu guards nextPort
|
||||
mu sync.Mutex
|
||||
|
||||
// once is used to do the initialization on the first call to retrieve free
|
||||
// ports
|
||||
once sync.Once
|
||||
|
||||
// port is the last allocated port.
|
||||
port int
|
||||
)
|
||||
|
||||
// initialize is used to initialize freeport.
|
||||
func initialize() {
|
||||
if lowPort+maxBlocks*blockSize > 65535 {
|
||||
panic("freeport: block size too big or too many blocks requested")
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
firstPort, lockLn = alloc()
|
||||
}
|
||||
|
||||
// alloc reserves a port block for exclusive use for the lifetime of the
|
||||
// application. lockLn serves as a system-wide mutex for the port block and is
|
||||
// implemented as a TCP listener which is bound to the firstPort and which will
|
||||
// be automatically released when the application terminates.
|
||||
func alloc() (int, net.Listener) {
|
||||
for i := 0; i < attempts; i++ {
|
||||
block := int(rand.Int31n(int32(maxBlocks)))
|
||||
firstPort := lowPort + block*blockSize
|
||||
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// log.Printf("[DEBUG] freeport: allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
|
||||
return firstPort, ln
|
||||
}
|
||||
panic("freeport: cannot allocate port block")
|
||||
}
|
||||
|
||||
func tcpAddr(ip string, port int) *net.TCPAddr {
|
||||
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
|
||||
}
|
||||
|
||||
// Get wraps the Free function and panics on any failure retrieving ports.
|
||||
func Get(n int) (ports []int) {
|
||||
ports, err := Free(n)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
// GetT is suitable for use when retrieving unused ports in tests. If there is
|
||||
// an error retrieving free ports, the test will be failed.
|
||||
func GetT(t testing.T, n int) (ports []int) {
|
||||
ports, err := Free(n)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed retrieving free port: %v", err)
|
||||
}
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
// Free returns a list of free ports from the allocated port block. It is safe
|
||||
// to call this method concurrently. Ports have been tested to be available on
|
||||
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
|
||||
// future.
|
||||
func Free(n int) (ports []int, err error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if n > blockSize-1 {
|
||||
return nil, fmt.Errorf("freeport: block size too small")
|
||||
}
|
||||
|
||||
// Reserve a port block
|
||||
once.Do(initialize)
|
||||
|
||||
for len(ports) < n {
|
||||
port++
|
||||
|
||||
// roll-over the port
|
||||
if port < firstPort+1 || port >= firstPort+blockSize {
|
||||
port = firstPort + 1
|
||||
}
|
||||
|
||||
// if the port is in use then skip it
|
||||
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
|
||||
if err != nil {
|
||||
// log.Println("[DEBUG] freeport: port already in use: ", port)
|
||||
continue
|
||||
}
|
||||
ln.Close()
|
||||
|
||||
ports = append(ports, port)
|
||||
}
|
||||
// log.Println("[DEBUG] freeport: free ports:", ports)
|
||||
return ports, nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DecodeJSON is a convenience function to create a JSON decoder
|
||||
// set it up to disallow unknown fields and then decode into the
|
||||
// given value
|
||||
func DecodeJSON(data io.Reader, out interface{}) error {
|
||||
if data == nil {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(data)
|
||||
decoder.DisallowUnknownFields()
|
||||
return decoder.Decode(&out)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a convenience function around calling
|
||||
// DecodeJSON. It will mainly be useful in many of our
|
||||
// UnmarshalJSON methods for structs.
|
||||
func UnmarshalJSON(data []byte, out interface{}) error {
|
||||
return DecodeJSON(bytes.NewReader(data), out)
|
||||
}
|
|
@ -0,0 +1,194 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/mitchellh/reflectwalk"
|
||||
)
|
||||
|
||||
// MapWalk will traverse through the supplied input which should be a
|
||||
// map[string]interface{} (or something compatible that we can coerce
|
||||
// to a map[string]interface{}) and from it create a new map[string]interface{}
|
||||
// with all internal values coerced to JSON compatible types. i.e. a []uint8
|
||||
// can be converted (in most cases) to a string so it will not be base64 encoded
|
||||
// when output in JSON
|
||||
func MapWalk(input interface{}) (map[string]interface{}, error) {
|
||||
mapCopyRaw, err := copystructure.Copy(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapCopy, ok := mapCopyRaw.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: input to MapWalk is not a map[string]interface{}")
|
||||
}
|
||||
|
||||
if err := reflectwalk.Walk(mapCopy, &mapWalker{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mapCopy, nil
|
||||
}
|
||||
|
||||
var typMapIfaceIface = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
var typByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// mapWalker implements interfaces for the reflectwalk package
|
||||
// (github.com/mitchellh/reflectwalk) that can be used to automatically
|
||||
// make a JSON compatible map safe for JSON usage. This is currently
|
||||
// targeted at the map[string]interface{}
|
||||
//
|
||||
// Most of the implementation here is just keeping track of where we are
|
||||
// in the reflectwalk process, so that we can replace values. The key logic
|
||||
// is in Slice() and SliceElem().
|
||||
//
|
||||
// In particular we're looking to replace two cases the msgpack codec causes:
|
||||
//
|
||||
// 1.) String values get turned into byte slices. JSON will base64-encode
|
||||
// this and we don't want that, so we convert them back to strings.
|
||||
//
|
||||
// 2.) Nested maps turn into map[interface{}]interface{}. JSON cannot
|
||||
// encode this, so we need to turn it back into map[string]interface{}.
|
||||
//
|
||||
type mapWalker struct {
|
||||
lastValue reflect.Value // lastValue of map, required for replacement
|
||||
loc, lastLoc reflectwalk.Location // locations
|
||||
cs []reflect.Value // container stack
|
||||
csKey []reflect.Value // container keys (maps) stack
|
||||
csData interface{} // current container data
|
||||
sliceIndex []int // slice index stack (one for each slice in cs)
|
||||
}
|
||||
|
||||
func (w *mapWalker) Enter(loc reflectwalk.Location) error {
|
||||
w.lastLoc = w.loc
|
||||
w.loc = loc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *mapWalker) Exit(loc reflectwalk.Location) error {
|
||||
w.loc = reflectwalk.None
|
||||
w.lastLoc = reflectwalk.None
|
||||
|
||||
switch loc {
|
||||
case reflectwalk.Map:
|
||||
w.cs = w.cs[:len(w.cs)-1]
|
||||
case reflectwalk.MapValue:
|
||||
w.csKey = w.csKey[:len(w.csKey)-1]
|
||||
case reflectwalk.Slice:
|
||||
// Split any values that need to be split
|
||||
w.cs = w.cs[:len(w.cs)-1]
|
||||
case reflectwalk.SliceElem:
|
||||
w.csKey = w.csKey[:len(w.csKey)-1]
|
||||
w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *mapWalker) Map(m reflect.Value) error {
|
||||
w.cs = append(w.cs, m)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *mapWalker) MapElem(m, k, v reflect.Value) error {
|
||||
w.csData = k
|
||||
w.csKey = append(w.csKey, k)
|
||||
w.lastValue = v
|
||||
|
||||
// We're looking specifically for map[interface{}]interface{}, but the
|
||||
// values in a map could be wrapped up in interface{} so we need to unwrap
|
||||
// that first. Therefore, we do three checks: 1.) is it valid? so we
|
||||
// don't panic, 2.) is it an interface{}? so we can unwrap it and 3.)
|
||||
// after unwrapping the interface do we have the map we expect?
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
if inner := v.Elem(); inner.Type() == typMapIfaceIface {
|
||||
// map[interface{}]interface{}, attempt to weakly decode into string keys
|
||||
var target map[string]interface{}
|
||||
if err := mapstructure.WeakDecode(v.Interface(), &target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.SetMapIndex(k, reflect.ValueOf(target))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *mapWalker) Slice(v reflect.Value) error {
|
||||
// If we find a []byte slice, it is an HCL-string converted to []byte.
|
||||
// Convert it back to a Go string and replace the value so that JSON
|
||||
// doesn't base64-encode it.
|
||||
if v.Type() == typByteSlice {
|
||||
resultVal := reflect.ValueOf(string(v.Interface().([]byte)))
|
||||
switch w.lastLoc {
|
||||
case reflectwalk.MapKey:
|
||||
m := w.cs[len(w.cs)-1]
|
||||
|
||||
// Delete the old value
|
||||
var zero reflect.Value
|
||||
m.SetMapIndex(w.csData.(reflect.Value), zero)
|
||||
|
||||
// Set the new key with the existing value
|
||||
m.SetMapIndex(resultVal, w.lastValue)
|
||||
|
||||
// Set the key to be the new key
|
||||
w.csData = resultVal
|
||||
case reflectwalk.MapValue:
|
||||
// If we're in a map, then the only way to set a map value is
|
||||
// to set it directly.
|
||||
m := w.cs[len(w.cs)-1]
|
||||
mk := w.csData.(reflect.Value)
|
||||
m.SetMapIndex(mk, resultVal)
|
||||
case reflectwalk.Slice:
|
||||
s := w.cs[len(w.cs)-1]
|
||||
s.Index(w.sliceIndex[len(w.sliceIndex)-1]).Set(resultVal)
|
||||
default:
|
||||
return fmt.Errorf("cannot convert []byte")
|
||||
}
|
||||
}
|
||||
|
||||
w.cs = append(w.cs, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *mapWalker) SliceElem(i int, elem reflect.Value) error {
|
||||
w.csKey = append(w.csKey, reflect.ValueOf(i))
|
||||
w.sliceIndex = append(w.sliceIndex, i)
|
||||
|
||||
// We're looking specifically for map[interface{}]interface{}, but the
|
||||
// values in a slice are wrapped up in interface{} so we need to unwrap
|
||||
// that first. Therefore, we do three checks: 1.) is it valid? so we
|
||||
// don't panic, 2.) is it an interface{}? so we can unwrap it and 3.)
|
||||
// after unwrapping the interface do we have the map we expect?
|
||||
if !elem.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if elem.Kind() != reflect.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
if inner := elem.Elem(); inner.Type() == typMapIfaceIface {
|
||||
// map[interface{}]interface{}, attempt to weakly decode into string keys
|
||||
var target map[string]interface{}
|
||||
if err := mapstructure.WeakDecode(inner.Interface(), &target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
elem.Set(reflect.ValueOf(target))
|
||||
} else if inner := elem.Elem(); inner.Type() == typByteSlice {
|
||||
elem.Set(reflect.ValueOf(string(inner.Interface().([]byte))))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func PatchSliceOfMaps(m map[string]interface{}, skip []string, skipTree []string) map[string]interface{} {
|
||||
lowerSkip := make([]string, len(skip))
|
||||
lowerSkipTree := make([]string, len(skipTree))
|
||||
|
||||
for i, val := range skip {
|
||||
lowerSkip[i] = strings.ToLower(val)
|
||||
}
|
||||
|
||||
for i, val := range skipTree {
|
||||
lowerSkipTree[i] = strings.ToLower(val)
|
||||
}
|
||||
|
||||
return patchValue("", m, lowerSkip, lowerSkipTree).(map[string]interface{})
|
||||
}
|
||||
|
||||
func patchValue(name string, v interface{}, skip []string, skipTree []string) interface{} {
|
||||
switch x := v.(type) {
|
||||
case map[string]interface{}:
|
||||
if len(x) == 0 {
|
||||
return x
|
||||
}
|
||||
mm := make(map[string]interface{})
|
||||
for k, v := range x {
|
||||
key := k
|
||||
if name != "" {
|
||||
key = name + "." + k
|
||||
}
|
||||
mm[k] = patchValue(key, v, skip, skipTree)
|
||||
}
|
||||
return mm
|
||||
|
||||
case []interface{}:
|
||||
if len(x) == 0 {
|
||||
return nil
|
||||
}
|
||||
if strSliceContains(name, skipTree) {
|
||||
return x
|
||||
}
|
||||
if strSliceContains(name, skip) {
|
||||
for i, y := range x {
|
||||
x[i] = patchValue(name, y, skip, skipTree)
|
||||
}
|
||||
return x
|
||||
}
|
||||
if _, ok := x[0].(map[string]interface{}); !ok {
|
||||
return x
|
||||
}
|
||||
if len(x) > 1 {
|
||||
panic(fmt.Sprintf("%s: []map[string]interface{} with more than one element not supported: %s", name, v))
|
||||
}
|
||||
return patchValue(name, x[0], skip, skipTree)
|
||||
|
||||
case []map[string]interface{}:
|
||||
if len(x) == 0 {
|
||||
return nil
|
||||
}
|
||||
if strSliceContains(name, skipTree) {
|
||||
return x
|
||||
}
|
||||
if strSliceContains(name, skip) {
|
||||
for i, y := range x {
|
||||
x[i] = patchValue(name, y, skip, skipTree).(map[string]interface{})
|
||||
}
|
||||
return x
|
||||
}
|
||||
if len(x) > 1 {
|
||||
panic(fmt.Sprintf("%s: []map[string]interface{} with more than one element not supported: %s", name, v))
|
||||
}
|
||||
return patchValue(name, x[0], skip, skipTree)
|
||||
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func strSliceContains(s string, v []string) bool {
|
||||
lower := strings.ToLower(s)
|
||||
for _, vv := range v {
|
||||
if lower == vv {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMinFailures = 0
|
||||
defaultMaxWait = 2 * time.Minute
|
||||
)
|
||||
|
||||
// Interface used for offloading jitter calculations from the RetryWaiter
|
||||
type Jitter interface {
|
||||
AddJitter(baseTime time.Duration) time.Duration
|
||||
}
|
||||
|
||||
// Calculates a random jitter between 0 and up to a specific percentage of the baseTime
|
||||
type JitterRandomStagger struct {
|
||||
// int64 because we are going to be doing math against an int64 to represent nanoseconds
|
||||
percent int64
|
||||
}
|
||||
|
||||
// Creates a new JitterRandomStagger
|
||||
func NewJitterRandomStagger(percent int) *JitterRandomStagger {
|
||||
if percent < 0 {
|
||||
percent = 0
|
||||
}
|
||||
|
||||
return &JitterRandomStagger{
|
||||
percent: int64(percent),
|
||||
}
|
||||
}
|
||||
|
||||
// Implments the Jitter interface
|
||||
func (j *JitterRandomStagger) AddJitter(baseTime time.Duration) time.Duration {
|
||||
if j.percent == 0 {
|
||||
return baseTime
|
||||
}
|
||||
|
||||
// time.Duration is actually a type alias for int64 which is why casting
|
||||
// to the duration type and then dividing works
|
||||
return baseTime + RandomStagger((baseTime*time.Duration(j.percent))/100)
|
||||
}
|
||||
|
||||
// RetryWaiter will record failed and successful operations and provide
|
||||
// a channel to wait on before a failed operation can be retried.
|
||||
type RetryWaiter struct {
|
||||
minFailures uint
|
||||
minWait time.Duration
|
||||
maxWait time.Duration
|
||||
jitter Jitter
|
||||
failures uint
|
||||
}
|
||||
|
||||
// Creates a new RetryWaiter
|
||||
func NewRetryWaiter(minFailures int, minWait, maxWait time.Duration, jitter Jitter) *RetryWaiter {
|
||||
if minFailures < 0 {
|
||||
minFailures = defaultMinFailures
|
||||
}
|
||||
|
||||
if maxWait <= 0 {
|
||||
maxWait = defaultMaxWait
|
||||
}
|
||||
|
||||
if minWait <= 0 {
|
||||
minWait = 0 * time.Nanosecond
|
||||
}
|
||||
|
||||
return &RetryWaiter{
|
||||
minFailures: uint(minFailures),
|
||||
minWait: minWait,
|
||||
maxWait: maxWait,
|
||||
failures: 0,
|
||||
jitter: jitter,
|
||||
}
|
||||
}
|
||||
|
||||
// calculates the necessary wait time before the
|
||||
// next operation should be allowed.
|
||||
func (rw *RetryWaiter) calculateWait() time.Duration {
|
||||
waitTime := rw.minWait
|
||||
if rw.failures > rw.minFailures {
|
||||
shift := rw.failures - rw.minFailures - 1
|
||||
waitTime = rw.maxWait
|
||||
if shift < 31 {
|
||||
waitTime = (1 << shift) * time.Second
|
||||
}
|
||||
if waitTime > rw.maxWait {
|
||||
waitTime = rw.maxWait
|
||||
}
|
||||
|
||||
if rw.jitter != nil {
|
||||
waitTime = rw.jitter.AddJitter(waitTime)
|
||||
}
|
||||
}
|
||||
|
||||
if waitTime < rw.minWait {
|
||||
waitTime = rw.minWait
|
||||
}
|
||||
|
||||
return waitTime
|
||||
}
|
||||
|
||||
// calculates the waitTime and returns a chan
|
||||
// that will become selectable once that amount
|
||||
// of time has elapsed.
|
||||
func (rw *RetryWaiter) wait() <-chan struct{} {
|
||||
waitTime := rw.calculateWait()
|
||||
ch := make(chan struct{})
|
||||
if waitTime > 0 {
|
||||
time.AfterFunc(waitTime, func() { close(ch) })
|
||||
} else {
|
||||
// if there should be 0 wait time then we ensure
|
||||
// that the chan will be immediately selectable
|
||||
close(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// Marks that an operation is successful which resets the failure count.
|
||||
// The chan that is returned will be immediately selectable
|
||||
func (rw *RetryWaiter) Success() <-chan struct{} {
|
||||
rw.Reset()
|
||||
return rw.wait()
|
||||
}
|
||||
|
||||
// Marks that an operation failed. The chan returned will be selectable
|
||||
// once the calculated retry wait amount of time has elapsed
|
||||
func (rw *RetryWaiter) Failed() <-chan struct{} {
|
||||
rw.failures += 1
|
||||
ch := rw.wait()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Resets the internal failure counter
|
||||
func (rw *RetryWaiter) Reset() {
|
||||
rw.failures = 0
|
||||
}
|
||||
|
||||
// WaitIf is a convenice method to record whether the last
|
||||
// operation was a success or failure and return a chan that
|
||||
// will be selectablw when the next operation can be done.
|
||||
func (rw *RetryWaiter) WaitIf(failure bool) <-chan struct{} {
|
||||
if failure {
|
||||
return rw.Failed()
|
||||
}
|
||||
return rw.Success()
|
||||
}
|
||||
|
||||
// WaitIfErr is a convenience method to record whether the last
|
||||
// operation was a success or failure based on whether the err
|
||||
// is nil and then return a chan that will be selectable when
|
||||
// the next operation can be done.
|
||||
func (rw *RetryWaiter) WaitIfErr(err error) <-chan struct{} {
|
||||
return rw.WaitIf(err != nil)
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
|
@ -16,5 +18,27 @@ func SerfDefaultConfig() *serf.Config {
|
|||
// cluster size.
|
||||
base.MinQueueDepth = 4096
|
||||
|
||||
// This gives leaves some time to propagate through the cluster before
|
||||
// we shut down. The value was chosen to be reasonably short, but to
|
||||
// allow a leave to get to over 99.99% of the cluster with 100k nodes
|
||||
// (using https://www.serf.io/docs/internals/simulator.html).
|
||||
base.LeavePropagateDelay = 3 * time.Second
|
||||
|
||||
return base
|
||||
}
|
||||
|
||||
func GetSerfTags(serf *serf.Serf) map[string]string {
|
||||
tags := make(map[string]string)
|
||||
for tag, value := range serf.LocalMember().Tags {
|
||||
tags[tag] = value
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func UpdateSerfTag(serf *serf.Serf, tag, value string) {
|
||||
tags := GetSerfTags(serf)
|
||||
tags[tag] = value
|
||||
|
||||
serf.SetTags(tags)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StopChannelContext implements the context.Context interface
|
||||
// You provide the channel to select on to determine whether
|
||||
// the context should be canceled and other code such
|
||||
// as the rate.Limiter will automatically use the channel
|
||||
// appropriately
|
||||
type StopChannelContext struct {
|
||||
StopCh <-chan struct{}
|
||||
}
|
||||
|
||||
func (c *StopChannelContext) Deadline() (deadline time.Time, ok bool) {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
|
||||
func (c *StopChannelContext) Done() <-chan struct{} {
|
||||
return c.StopCh
|
||||
}
|
||||
|
||||
func (c *StopChannelContext) Err() error {
|
||||
select {
|
||||
case <-c.StopCh:
|
||||
return context.Canceled
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *StopChannelContext) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,376 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/circonus"
|
||||
"github.com/armon/go-metrics/datadog"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
)
|
||||
|
||||
// TelemetryConfig is embedded in config.RuntimeConfig and holds the
|
||||
// configuration variables for go-metrics. It is a separate struct to allow it
|
||||
// to be exported as JSON and passed to other process like managed connect
|
||||
// proxies so they can inherit the agent's telemetry config.
|
||||
//
|
||||
// It is in lib package rather than agent/config because we need to use it in
|
||||
// the shared InitTelemetry functions below, but we can't import agent/config
|
||||
// due to a dependency cycle.
|
||||
type TelemetryConfig struct {
|
||||
// Circonus*: see https://github.com/circonus-labs/circonus-gometrics
|
||||
// for more details on the various configuration options.
|
||||
// Valid configuration combinations:
|
||||
// - CirconusAPIToken
|
||||
// metric management enabled (search for existing check or create a new one)
|
||||
// - CirconusSubmissionUrl
|
||||
// metric management disabled (use check with specified submission_url,
|
||||
// broker must be using a public SSL certificate)
|
||||
// - CirconusAPIToken + CirconusCheckSubmissionURL
|
||||
// metric management enabled (use check with specified submission_url)
|
||||
// - CirconusAPIToken + CirconusCheckID
|
||||
// metric management enabled (use check with specified id)
|
||||
|
||||
// CirconusAPIApp is an app name associated with API token.
|
||||
// Default: "consul"
|
||||
//
|
||||
// hcl: telemetry { circonus_api_app = string }
|
||||
CirconusAPIApp string `json:"circonus_api_app,omitempty" mapstructure:"circonus_api_app"`
|
||||
|
||||
// CirconusAPIToken is a valid API Token used to create/manage check. If provided,
|
||||
// metric management is enabled.
|
||||
// Default: none
|
||||
//
|
||||
// hcl: telemetry { circonus_api_token = string }
|
||||
CirconusAPIToken string `json:"circonus_api_token,omitempty" mapstructure:"circonus_api_token"`
|
||||
|
||||
// CirconusAPIURL is the base URL to use for contacting the Circonus API.
|
||||
// Default: "https://api.circonus.com/v2"
|
||||
//
|
||||
// hcl: telemetry { circonus_api_url = string }
|
||||
CirconusAPIURL string `json:"circonus_apiurl,omitempty" mapstructure:"circonus_apiurl"`
|
||||
|
||||
// CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
|
||||
// of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
|
||||
// is provided, an attempt will be made to search for an existing check using Instance ID and
|
||||
// Search Tag. If one is not found, a new HTTPTRAP check will be created.
|
||||
// Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
|
||||
// with the specified API token or the default Circonus Broker.
|
||||
// Default: none
|
||||
//
|
||||
// hcl: telemetry { circonus_broker_id = string }
|
||||
CirconusBrokerID string `json:"circonus_broker_id,omitempty" mapstructure:"circonus_broker_id"`
|
||||
|
||||
// CirconusBrokerSelectTag is a special tag which will be used to select a broker when
|
||||
// a Broker ID is not provided. The best use of this is to as a hint for which broker
|
||||
// should be used based on *where* this particular instance is running.
|
||||
// (e.g. a specific geo location or datacenter, dc:sfo)
|
||||
// Default: none
|
||||
//
|
||||
// hcl: telemetry { circonus_broker_select_tag = string }
|
||||
CirconusBrokerSelectTag string `json:"circonus_broker_select_tag,omitempty" mapstructure:"circonus_broker_select_tag"`
|
||||
|
||||
// CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI.
|
||||
// Default: value of CirconusCheckInstanceID
|
||||
//
|
||||
// hcl: telemetry { circonus_check_display_name = string }
|
||||
CirconusCheckDisplayName string `json:"circonus_check_display_name,omitempty" mapstructure:"circonus_check_display_name"`
|
||||
|
||||
// CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
|
||||
// if the metric already exists and is NOT active. If check management is enabled, the default
|
||||
// behavior is to add new metrics as they are encountered. If the metric already exists in the
|
||||
// check, it will *NOT* be activated. This setting overrides that behavior.
|
||||
// Default: "false"
|
||||
//
|
||||
// hcl: telemetry { circonus_check_metrics_activation = (true|false)
|
||||
CirconusCheckForceMetricActivation string `json:"circonus_check_force_metric_activation,omitempty" mapstructure:"circonus_check_force_metric_activation"`
|
||||
|
||||
// CirconusCheckID is the check id (not check bundle id) from a previously created
|
||||
// HTTPTRAP check. The numeric portion of the check._cid field.
|
||||
// Default: none
|
||||
//
|
||||
// hcl: telemetry { circonus_check_id = string }
|
||||
CirconusCheckID string `json:"circonus_check_id,omitempty" mapstructure:"circonus_check_id"`
|
||||
|
||||
// CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance".
|
||||
// It can be used to maintain metric continuity with transient or ephemeral instances as
|
||||
// they move around within an infrastructure.
|
||||
// Default: hostname:app
|
||||
//
|
||||
// hcl: telemetry { circonus_check_instance_id = string }
|
||||
CirconusCheckInstanceID string `json:"circonus_check_instance_id,omitempty" mapstructure:"circonus_check_instance_id"`
|
||||
|
||||
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
|
||||
// narrow down the search results when neither a Submission URL or Check ID is provided.
|
||||
// Default: service:app (e.g. service:consul)
|
||||
//
|
||||
// hcl: telemetry { circonus_check_search_tag = string }
|
||||
CirconusCheckSearchTag string `json:"circonus_check_search_tag,omitempty" mapstructure:"circonus_check_search_tag"`
|
||||
|
||||
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
|
||||
// narrow down the search results when neither a Submission URL or Check ID is provided.
|
||||
// Default: service:app (e.g. service:consul)
|
||||
//
|
||||
// hcl: telemetry { circonus_check_tags = string }
|
||||
CirconusCheckTags string `json:"circonus_check_tags,omitempty" mapstructure:"circonus_check_tags"`
|
||||
|
||||
// CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
|
||||
// Default: 10s
|
||||
//
|
||||
// hcl: telemetry { circonus_submission_interval = "duration" }
|
||||
CirconusSubmissionInterval string `json:"circonus_submission_interval,omitempty" mapstructure:"circonus_submission_interval"`
|
||||
|
||||
// CirconusCheckSubmissionURL is the check.config.submission_url field from a
|
||||
// previously created HTTPTRAP check.
|
||||
// Default: none
|
||||
//
|
||||
// hcl: telemetry { circonus_submission_url = string }
|
||||
CirconusSubmissionURL string `json:"circonus_submission_url,omitempty" mapstructure:"circonus_submission_url"`
|
||||
|
||||
// DisableHostname will disable hostname prefixing for all metrics.
|
||||
//
|
||||
// hcl: telemetry { disable_hostname = (true|false)
|
||||
DisableHostname bool `json:"disable_hostname,omitempty" mapstructure:"disable_hostname"`
|
||||
|
||||
// DogStatsdAddr is the address of a dogstatsd instance. If provided,
|
||||
// metrics will be sent to that instance
|
||||
//
|
||||
// hcl: telemetry { dogstatsd_addr = string }
|
||||
DogstatsdAddr string `json:"dogstatsd_addr,omitempty" mapstructure:"dogstatsd_addr"`
|
||||
|
||||
// DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
|
||||
// It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
|
||||
//
|
||||
// hcl: telemetry { dogstatsd_tags = []string }
|
||||
DogstatsdTags []string `json:"dogstatsd_tags,omitempty" mapstructure:"dogstatsd_tags"`
|
||||
|
||||
// PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0.
|
||||
// A value of 0 disable Prometheus support. Regarding Prometheus, it is considered a good
|
||||
// practice to put large values here (such as a few days), and at least the interval between
|
||||
// prometheus requests.
|
||||
//
|
||||
// hcl: telemetry { prometheus_retention_time = "duration" }
|
||||
PrometheusRetentionTime time.Duration `json:"prometheus_retention_time,omitempty" mapstructure:"prometheus_retention_time"`
|
||||
|
||||
// FilterDefault is the default for whether to allow a metric that's not
|
||||
// covered by the filter.
|
||||
//
|
||||
// hcl: telemetry { filter_default = (true|false) }
|
||||
FilterDefault bool `json:"filter_default,omitempty" mapstructure:"filter_default"`
|
||||
|
||||
// AllowedPrefixes is a list of filter rules to apply for allowing metrics
|
||||
// by prefix. Use the 'prefix_filter' option and prefix rules with '+' to be
|
||||
// included.
|
||||
//
|
||||
// hcl: telemetry { prefix_filter = []string{"+<expr>", "+<expr>", ...} }
|
||||
AllowedPrefixes []string `json:"allowed_prefixes,omitempty" mapstructure:"allowed_prefixes"`
|
||||
|
||||
// BlockedPrefixes is a list of filter rules to apply for blocking metrics
|
||||
// by prefix. Use the 'prefix_filter' option and prefix rules with '-' to be
|
||||
// excluded.
|
||||
//
|
||||
// hcl: telemetry { prefix_filter = []string{"-<expr>", "-<expr>", ...} }
|
||||
BlockedPrefixes []string `json:"blocked_prefixes,omitempty" mapstructure:"blocked_prefixes"`
|
||||
|
||||
// MetricsPrefix is the prefix used to write stats values to.
|
||||
// Default: "consul."
|
||||
//
|
||||
// hcl: telemetry { metrics_prefix = string }
|
||||
MetricsPrefix string `json:"metrics_prefix,omitempty" mapstructure:"metrics_prefix"`
|
||||
|
||||
// StatsdAddr is the address of a statsd instance. If provided,
|
||||
// metrics will be sent to that instance.
|
||||
//
|
||||
// hcl: telemetry { statsd_address = string }
|
||||
StatsdAddr string `json:"statsd_address,omitempty" mapstructure:"statsd_address"`
|
||||
|
||||
// StatsiteAddr is the address of a statsite instance. If provided,
|
||||
// metrics will be streamed to that instance.
|
||||
//
|
||||
// hcl: telemetry { statsite_address = string }
|
||||
StatsiteAddr string `json:"statsite_address,omitempty" mapstructure:"statsite_address"`
|
||||
}
|
||||
|
||||
// MergeDefaults copies any non-zero field from defaults into the current
|
||||
// config.
|
||||
func (c *TelemetryConfig) MergeDefaults(defaults *TelemetryConfig) {
|
||||
if defaults == nil {
|
||||
return
|
||||
}
|
||||
cfgPtrVal := reflect.ValueOf(c)
|
||||
cfgVal := cfgPtrVal.Elem()
|
||||
otherVal := reflect.ValueOf(*defaults)
|
||||
for i := 0; i < cfgVal.NumField(); i++ {
|
||||
f := cfgVal.Field(i)
|
||||
if !f.IsValid() || !f.CanSet() {
|
||||
continue
|
||||
}
|
||||
// See if the current value is a zero-value, if _not_ skip it
|
||||
//
|
||||
// No built in way to check for zero-values for all types so only
|
||||
// implementing this for the types we actually have for now. Test failure
|
||||
// should catch the case where we add new types later.
|
||||
switch f.Kind() {
|
||||
case reflect.Slice:
|
||||
if !f.IsNil() {
|
||||
continue
|
||||
}
|
||||
case reflect.Int, reflect.Int64: // time.Duration == int64
|
||||
if f.Int() != 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.String:
|
||||
if f.String() != "" {
|
||||
continue
|
||||
}
|
||||
case reflect.Bool:
|
||||
if f.Bool() {
|
||||
continue
|
||||
}
|
||||
default:
|
||||
// Needs implementing, should be caught by tests.
|
||||
continue
|
||||
}
|
||||
|
||||
// It's zero, copy it from defaults
|
||||
f.Set(otherVal.Field(i))
|
||||
}
|
||||
}
|
||||
|
||||
func statsiteSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) {
|
||||
addr := cfg.StatsiteAddr
|
||||
if addr == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return metrics.NewStatsiteSink(addr)
|
||||
}
|
||||
|
||||
func statsdSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) {
|
||||
addr := cfg.StatsdAddr
|
||||
if addr == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return metrics.NewStatsdSink(addr)
|
||||
}
|
||||
|
||||
func dogstatdSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) {
|
||||
addr := cfg.DogstatsdAddr
|
||||
if addr == "" {
|
||||
return nil, nil
|
||||
}
|
||||
sink, err := datadog.NewDogStatsdSink(addr, hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sink.SetTags(cfg.DogstatsdTags)
|
||||
return sink, nil
|
||||
}
|
||||
|
||||
func prometheusSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) {
|
||||
if cfg.PrometheusRetentionTime.Nanoseconds() < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
prometheusOpts := prometheus.PrometheusOpts{
|
||||
Expiration: cfg.PrometheusRetentionTime,
|
||||
}
|
||||
sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sink, nil
|
||||
}
|
||||
|
||||
func circonusSink(cfg TelemetryConfig, hostname string) (metrics.MetricSink, error) {
|
||||
token := cfg.CirconusAPIToken
|
||||
url := cfg.CirconusSubmissionURL
|
||||
if token == "" && url == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
conf := &circonus.Config{}
|
||||
conf.Interval = cfg.CirconusSubmissionInterval
|
||||
conf.CheckManager.API.TokenKey = token
|
||||
conf.CheckManager.API.TokenApp = cfg.CirconusAPIApp
|
||||
conf.CheckManager.API.URL = cfg.CirconusAPIURL
|
||||
conf.CheckManager.Check.SubmissionURL = url
|
||||
conf.CheckManager.Check.ID = cfg.CirconusCheckID
|
||||
conf.CheckManager.Check.ForceMetricActivation = cfg.CirconusCheckForceMetricActivation
|
||||
conf.CheckManager.Check.InstanceID = cfg.CirconusCheckInstanceID
|
||||
conf.CheckManager.Check.SearchTag = cfg.CirconusCheckSearchTag
|
||||
conf.CheckManager.Check.DisplayName = cfg.CirconusCheckDisplayName
|
||||
conf.CheckManager.Check.Tags = cfg.CirconusCheckTags
|
||||
conf.CheckManager.Broker.ID = cfg.CirconusBrokerID
|
||||
conf.CheckManager.Broker.SelectTag = cfg.CirconusBrokerSelectTag
|
||||
|
||||
if conf.CheckManager.Check.DisplayName == "" {
|
||||
conf.CheckManager.Check.DisplayName = "Consul"
|
||||
}
|
||||
|
||||
if conf.CheckManager.API.TokenApp == "" {
|
||||
conf.CheckManager.API.TokenApp = "consul"
|
||||
}
|
||||
|
||||
if conf.CheckManager.Check.SearchTag == "" {
|
||||
conf.CheckManager.Check.SearchTag = "service:consul"
|
||||
}
|
||||
|
||||
sink, err := circonus.NewCirconusSink(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sink.Start()
|
||||
return sink, nil
|
||||
}
|
||||
|
||||
// InitTelemetry configures go-metrics based on map of telemetry config
|
||||
// values as returned by Runtimecfg.Config().
|
||||
func InitTelemetry(cfg TelemetryConfig) (*metrics.InmemSink, error) {
|
||||
// Setup telemetry
|
||||
// Aggregate on 10 second intervals for 1 minute. Expose the
|
||||
// metrics over stderr when there is a SIGUSR1 received.
|
||||
memSink := metrics.NewInmemSink(10*time.Second, time.Minute)
|
||||
metrics.DefaultInmemSignal(memSink)
|
||||
metricsConf := metrics.DefaultConfig(cfg.MetricsPrefix)
|
||||
metricsConf.EnableHostname = !cfg.DisableHostname
|
||||
metricsConf.FilterDefault = cfg.FilterDefault
|
||||
metricsConf.AllowedPrefixes = cfg.AllowedPrefixes
|
||||
metricsConf.BlockedPrefixes = cfg.BlockedPrefixes
|
||||
|
||||
var sinks metrics.FanoutSink
|
||||
addSink := func(name string, fn func(TelemetryConfig, string) (metrics.MetricSink, error)) error {
|
||||
s, err := fn(cfg, metricsConf.HostName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s != nil {
|
||||
sinks = append(sinks, s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := addSink("statsite", statsiteSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("statsd", statsdSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("dogstatd", dogstatdSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("circonus", circonusSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("prometheus", prometheusSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(sinks) > 0 {
|
||||
sinks = append(sinks, memSink)
|
||||
metrics.NewGlobal(metricsConf, sinks)
|
||||
} else {
|
||||
metricsConf.EnableHostname = false
|
||||
metrics.NewGlobal(metricsConf, memSink)
|
||||
}
|
||||
return memSink, nil
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TranslateKeys recursively translates all keys from m in-place to their
|
||||
// canonical form as defined in dict which maps an alias name to the canonical
|
||||
// name. If m already has a value for the canonical name then that one is used
|
||||
// and the value for the alias name is discarded. Alias names are matched
|
||||
// case-insensitive.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// m = TranslateKeys(m, map[string]string{"snake_case": "CamelCase"})
|
||||
//
|
||||
// If the canonical string provided is the empty string, the effect is to stop
|
||||
// recursing into any key matching the left hand side. In this case the left
|
||||
// hand side must use periods to specify a full path e.g.
|
||||
// `connect.proxy.config`. The path must be the canonical key names (i.e.
|
||||
// CamelCase) AFTER translation so NodeName not node_name. These are still match
|
||||
// in a case-insensitive way.
|
||||
//
|
||||
// This is needed for example because parts of the Service Definition are
|
||||
// "opaque" maps of metadata or config passed to another process or component.
|
||||
// If we allow translation to recurse we might mangle the "opaque" keys given
|
||||
// where the clash with key names in other parts of the definition :sob:
|
||||
//
|
||||
// Example:
|
||||
// m - TranslateKeys(m, map[string]string{
|
||||
// "foo_bar": "FooBar",
|
||||
// "widget.config": "",
|
||||
// // Assume widgets is an array, this will prevent recursing into any
|
||||
// // item's config field
|
||||
// "widgets.config": "",
|
||||
// })
|
||||
func TranslateKeys(v map[string]interface{}, dict map[string]string) {
|
||||
// Convert all dict keys for exclusions to lower. so we can match against them
|
||||
// unambiguously with a single lookup.
|
||||
for k, v := range dict {
|
||||
if v == "" {
|
||||
dict[strings.ToLower(k)] = ""
|
||||
}
|
||||
}
|
||||
ck(v, dict, "")
|
||||
}
|
||||
|
||||
func ck(v interface{}, dict map[string]string, pathPfx string) interface{} {
|
||||
// In array case we don't add a path segment for the item as they are all
|
||||
// assumed to be same which is why we check the prefix doesn't already end in
|
||||
// a .
|
||||
if pathPfx != "" && !strings.HasSuffix(pathPfx, ".") {
|
||||
pathPfx += "."
|
||||
}
|
||||
switch x := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range x {
|
||||
lowerK := strings.ToLower(k)
|
||||
|
||||
// Check if this path has been excluded
|
||||
val, ok := dict[pathPfx+lowerK]
|
||||
if ok && val == "" {
|
||||
// Don't recurse into this key
|
||||
continue
|
||||
}
|
||||
|
||||
canonKey, ok := dict[lowerK]
|
||||
|
||||
// no canonical key? -> use this key
|
||||
if !ok {
|
||||
x[k] = ck(v, dict, pathPfx+lowerK)
|
||||
continue
|
||||
}
|
||||
|
||||
// delete the alias
|
||||
delete(x, k)
|
||||
|
||||
// if there is a value for the canonical key then keep it
|
||||
if _, ok := x[canonKey]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// otherwise translate to the canonical key
|
||||
x[canonKey] = ck(v, dict, pathPfx+strings.ToLower(canonKey))
|
||||
}
|
||||
return x
|
||||
|
||||
case []interface{}:
|
||||
var a []interface{}
|
||||
for _, xv := range x {
|
||||
a = append(a, ck(xv, dict, pathPfx))
|
||||
}
|
||||
return a
|
||||
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/hashicorp/consul/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// projectURL is the project URL.
|
||||
projectURL = "https://www.consul.io/"
|
||||
|
||||
// rt is the runtime - variable for tests.
|
||||
rt = runtime.Version()
|
||||
|
||||
// versionFunc is the func that returns the current version. This is a
|
||||
// function to take into account the different build processes and distinguish
|
||||
// between enterprise and oss builds.
|
||||
versionFunc = func() string {
|
||||
return version.GetHumanVersion()
|
||||
}
|
||||
)
|
||||
|
||||
// UserAgent returns the consistent user-agent string for Consul.
|
||||
func UserAgent() string {
|
||||
return fmt.Sprintf("Consul/%s (+%s; %s)",
|
||||
versionFunc(), projectURL, rt)
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
||||
// UUIDCheckFunc should determine whether the given UUID is actually
|
||||
// unique and allowed to be used
|
||||
type UUIDCheckFunc func(string) (bool, error)
|
||||
|
||||
func GenerateUUID(checkFn UUIDCheckFunc) (string, error) {
|
||||
for {
|
||||
id, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if checkFn == nil {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
if ok, err := checkFn(id); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
}
|
7
vendor/github.com/hashicorp/consul/sdk/freeport/ephemeral_fallback.go
generated
vendored
Normal file
7
vendor/github.com/hashicorp/consul/sdk/freeport/ephemeral_fallback.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
//+build !linux
|
||||
|
||||
package freeport
|
||||
|
||||
func getEphemeralPortRange() (int, int, error) {
|
||||
return 0, 0, nil
|
||||
}
|
36
vendor/github.com/hashicorp/consul/sdk/freeport/ephemeral_linux.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/consul/sdk/freeport/ephemeral_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
//+build linux
|
||||
|
||||
package freeport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const ephemeralPortRangeSysctlKey = "net.ipv4.ip_local_port_range"
|
||||
|
||||
var ephemeralPortRangePatt = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`)
|
||||
|
||||
func getEphemeralPortRange() (int, int, error) {
|
||||
cmd := exec.Command("/sbin/sysctl", "-n", ephemeralPortRangeSysctlKey)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
val := string(out)
|
||||
|
||||
m := ephemeralPortRangePatt.FindStringSubmatch(val)
|
||||
if m != nil {
|
||||
min, err1 := strconv.Atoi(m[1])
|
||||
max, err2 := strconv.Atoi(m[2])
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
return min, max, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, fmt.Errorf("unexpected sysctl value %q for key %q", val, ephemeralPortRangeSysctlKey)
|
||||
}
|
|
@ -0,0 +1,356 @@
|
|||
// Package freeport provides a helper for allocating free ports across multiple
|
||||
// processes on the same machine.
|
||||
package freeport
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxBlocks is the number of available port blocks before exclusions.
|
||||
maxBlocks = 30
|
||||
|
||||
// lowPort is the lowest port number that should be used.
|
||||
lowPort = 10000
|
||||
|
||||
// attempts is how often we try to allocate a port block
|
||||
// before giving up.
|
||||
attempts = 10
|
||||
)
|
||||
|
||||
var (
|
||||
// blockSize is the size of the allocated port block. ports are given out
|
||||
// consecutively from that block and after that point in a LRU fashion.
|
||||
blockSize int
|
||||
|
||||
// effectiveMaxBlocks is the number of available port blocks.
|
||||
// lowPort + effectiveMaxBlocks * blockSize must be less than 65535.
|
||||
effectiveMaxBlocks int
|
||||
|
||||
// firstPort is the first port of the allocated block.
|
||||
firstPort int
|
||||
|
||||
// lockLn is the system-wide mutex for the port block.
|
||||
lockLn net.Listener
|
||||
|
||||
// mu guards:
|
||||
// - pendingPorts
|
||||
// - freePorts
|
||||
// - total
|
||||
mu sync.Mutex
|
||||
|
||||
// once is used to do the initialization on the first call to retrieve free
|
||||
// ports
|
||||
once sync.Once
|
||||
|
||||
// condNotEmpty is a condition variable to wait for freePorts to be not
|
||||
// empty. Linked to 'mu'
|
||||
condNotEmpty *sync.Cond
|
||||
|
||||
// freePorts is a FIFO of all currently free ports. Take from the front,
|
||||
// and return to the back.
|
||||
freePorts *list.List
|
||||
|
||||
// pendingPorts is a FIFO of recently freed ports that have not yet passed
|
||||
// the not-in-use check.
|
||||
pendingPorts *list.List
|
||||
|
||||
// total is the total number of available ports in the block for use.
|
||||
total int
|
||||
)
|
||||
|
||||
// initialize is used to initialize freeport.
|
||||
func initialize() {
|
||||
var err error
|
||||
|
||||
blockSize = 1500
|
||||
limit, err := systemLimit()
|
||||
if err != nil {
|
||||
panic("freeport: error getting system limit: " + err.Error())
|
||||
}
|
||||
if limit > 0 && limit < blockSize {
|
||||
logf("INFO", "blockSize %d too big for system limit %d. Adjusting...", blockSize, limit)
|
||||
blockSize = limit - 3
|
||||
}
|
||||
|
||||
effectiveMaxBlocks, err = adjustMaxBlocks()
|
||||
if err != nil {
|
||||
panic("freeport: ephemeral port range detection failed: " + err.Error())
|
||||
}
|
||||
if effectiveMaxBlocks < 0 {
|
||||
panic("freeport: no blocks of ports available outside of ephemeral range")
|
||||
}
|
||||
if lowPort+effectiveMaxBlocks*blockSize > 65535 {
|
||||
panic("freeport: block size too big or too many blocks requested")
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
firstPort, lockLn = alloc()
|
||||
|
||||
condNotEmpty = sync.NewCond(&mu)
|
||||
freePorts = list.New()
|
||||
pendingPorts = list.New()
|
||||
|
||||
// fill with all available free ports
|
||||
for port := firstPort + 1; port < firstPort+blockSize; port++ {
|
||||
if used := isPortInUse(port); !used {
|
||||
freePorts.PushBack(port)
|
||||
}
|
||||
}
|
||||
total = freePorts.Len()
|
||||
|
||||
go checkFreedPorts()
|
||||
}
|
||||
|
||||
// reset will reverse the setup from initialize() and then redo it (for tests)
|
||||
func reset() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
logf("INFO", "resetting the freeport package state")
|
||||
|
||||
effectiveMaxBlocks = 0
|
||||
firstPort = 0
|
||||
if lockLn != nil {
|
||||
lockLn.Close()
|
||||
lockLn = nil
|
||||
}
|
||||
|
||||
once = sync.Once{}
|
||||
|
||||
freePorts = nil
|
||||
pendingPorts = nil
|
||||
total = 0
|
||||
}
|
||||
|
||||
func checkFreedPorts() {
|
||||
ticker := time.NewTicker(250 * time.Millisecond)
|
||||
for {
|
||||
<-ticker.C
|
||||
checkFreedPortsOnce()
|
||||
}
|
||||
}
|
||||
|
||||
func checkFreedPortsOnce() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
pending := pendingPorts.Len()
|
||||
remove := make([]*list.Element, 0, pending)
|
||||
for elem := pendingPorts.Front(); elem != nil; elem = elem.Next() {
|
||||
port := elem.Value.(int)
|
||||
if used := isPortInUse(port); !used {
|
||||
freePorts.PushBack(port)
|
||||
remove = append(remove, elem)
|
||||
}
|
||||
}
|
||||
|
||||
retained := pending - len(remove)
|
||||
|
||||
if retained > 0 {
|
||||
logf("WARN", "%d out of %d pending ports are still in use; something probably didn't wait around for the port to be closed!", retained, pending)
|
||||
}
|
||||
|
||||
if len(remove) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, elem := range remove {
|
||||
pendingPorts.Remove(elem)
|
||||
}
|
||||
|
||||
condNotEmpty.Broadcast()
|
||||
}
|
||||
|
||||
// adjustMaxBlocks avoids having the allocation ranges overlap the ephemeral
|
||||
// port range.
|
||||
func adjustMaxBlocks() (int, error) {
|
||||
ephemeralPortMin, ephemeralPortMax, err := getEphemeralPortRange()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if ephemeralPortMin <= 0 || ephemeralPortMax <= 0 {
|
||||
logf("INFO", "ephemeral port range detection not configured for GOOS=%q", runtime.GOOS)
|
||||
return maxBlocks, nil
|
||||
}
|
||||
|
||||
logf("INFO", "detected ephemeral port range of [%d, %d]", ephemeralPortMin, ephemeralPortMax)
|
||||
for block := 0; block < maxBlocks; block++ {
|
||||
min := lowPort + block*blockSize
|
||||
max := min + blockSize
|
||||
overlap := intervalOverlap(min, max-1, ephemeralPortMin, ephemeralPortMax)
|
||||
if overlap {
|
||||
logf("INFO", "reducing max blocks from %d to %d to avoid the ephemeral port range", maxBlocks, block)
|
||||
return block, nil
|
||||
}
|
||||
}
|
||||
return maxBlocks, nil
|
||||
}
|
||||
|
||||
// alloc reserves a port block for exclusive use for the lifetime of the
|
||||
// application. lockLn serves as a system-wide mutex for the port block and is
|
||||
// implemented as a TCP listener which is bound to the firstPort and which will
|
||||
// be automatically released when the application terminates.
|
||||
func alloc() (int, net.Listener) {
|
||||
for i := 0; i < attempts; i++ {
|
||||
block := int(rand.Int31n(int32(effectiveMaxBlocks)))
|
||||
firstPort := lowPort + block*blockSize
|
||||
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// logf("DEBUG", "allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
|
||||
return firstPort, ln
|
||||
}
|
||||
panic("freeport: cannot allocate port block")
|
||||
}
|
||||
|
||||
// MustTake is the same as Take except it panics on error.
|
||||
func MustTake(n int) (ports []int) {
|
||||
ports, err := Take(n)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// Take returns a list of free ports from the allocated port block. It is safe
|
||||
// to call this method concurrently. Ports have been tested to be available on
|
||||
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
|
||||
// future.
|
||||
func Take(n int) (ports []int, err error) {
|
||||
if n <= 0 {
|
||||
return nil, fmt.Errorf("freeport: cannot take %d ports", n)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
// Reserve a port block
|
||||
once.Do(initialize)
|
||||
|
||||
if n > total {
|
||||
return nil, fmt.Errorf("freeport: block size too small")
|
||||
}
|
||||
|
||||
for len(ports) < n {
|
||||
for freePorts.Len() == 0 {
|
||||
if total == 0 {
|
||||
return nil, fmt.Errorf("freeport: impossible to satisfy request; there are no actual free ports in the block anymore")
|
||||
}
|
||||
condNotEmpty.Wait()
|
||||
}
|
||||
|
||||
elem := freePorts.Front()
|
||||
freePorts.Remove(elem)
|
||||
port := elem.Value.(int)
|
||||
|
||||
if used := isPortInUse(port); used {
|
||||
// Something outside of the test suite has stolen this port, possibly
|
||||
// due to assignment to an ephemeral port, remove it completely.
|
||||
logf("WARN", "leaked port %d due to theft; removing from circulation", port)
|
||||
total--
|
||||
continue
|
||||
}
|
||||
|
||||
ports = append(ports, port)
|
||||
}
|
||||
|
||||
// logf("DEBUG", "free ports: %v", ports)
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
// peekFree returns the next port that will be returned by Take to aid in testing.
|
||||
func peekFree() int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return freePorts.Front().Value.(int)
|
||||
}
|
||||
|
||||
// peekAllFree returns all free ports that could be returned by Take to aid in testing.
|
||||
func peekAllFree() []int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
var out []int
|
||||
for elem := freePorts.Front(); elem != nil; elem = elem.Next() {
|
||||
port := elem.Value.(int)
|
||||
out = append(out, port)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// stats returns diagnostic data to aid in testing
|
||||
func stats() (numTotal, numPending, numFree int) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return total, pendingPorts.Len(), freePorts.Len()
|
||||
}
|
||||
|
||||
// Return returns a block of ports back to the general pool. These ports should
|
||||
// have been returned from a call to Take().
|
||||
func Return(ports []int) {
|
||||
if len(ports) == 0 {
|
||||
return // convenience short circuit for test ergonomics
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
for _, port := range ports {
|
||||
if port > firstPort && port < firstPort+blockSize {
|
||||
pendingPorts.PushBack(port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isPortInUse(port int) bool {
|
||||
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
ln.Close()
|
||||
return false
|
||||
}
|
||||
|
||||
func tcpAddr(ip string, port int) *net.TCPAddr {
|
||||
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
|
||||
}
|
||||
|
||||
// intervalOverlap returns true if the doubly-inclusive integer intervals
|
||||
// represented by [min1, max1] and [min2, max2] overlap.
|
||||
func intervalOverlap(min1, max1, min2, max2 int) bool {
|
||||
if min1 > max1 {
|
||||
logf("WARN", "interval1 is not ordered [%d, %d]", min1, max1)
|
||||
return false
|
||||
}
|
||||
if min2 > max2 {
|
||||
logf("WARN", "interval2 is not ordered [%d, %d]", min2, max2)
|
||||
return false
|
||||
}
|
||||
return min1 <= max2 && min2 <= max1
|
||||
}
|
||||
|
||||
func logf(severity string, format string, a ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "["+severity+"] freeport: "+format+"\n", a...)
|
||||
}
|
||||
|
||||
// Deprecated: Please use Take/Return calls instead.
|
||||
func Get(n int) (ports []int) { return MustTake(n) }
|
||||
|
||||
// Deprecated: Please use Take/Return calls instead.
|
||||
func GetT(t testing.T, n int) (ports []int) { return MustTake(n) }
|
||||
|
||||
// Deprecated: Please use Take/Return calls instead.
|
||||
func Free(n int) (ports []int, err error) { return MustTake(n), nil }
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package freeport
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func systemLimit() (int, error) {
|
||||
var limit unix.Rlimit
|
||||
err := unix.Getrlimit(unix.RLIMIT_NOFILE, &limit)
|
||||
return int(limit.Cur), err
|
||||
}
|
7
vendor/github.com/hashicorp/consul/sdk/freeport/systemlimit_windows.go
generated
vendored
Normal file
7
vendor/github.com/hashicorp/consul/sdk/freeport/systemlimit_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build windows
|
||||
|
||||
package freeport
|
||||
|
||||
func systemLimit() (int, error) {
|
||||
return 0, nil
|
||||
}
|
|
@ -22,12 +22,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
func TestFoo_bar(t *testing.T) {
|
||||
// Create a test Consul server
|
||||
srv1, err := testutil.NewTestServer()
|
||||
srv1, err := testutil.NewTestServerT(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ func TestFoo_bar(t *testing.T) {
|
|||
|
||||
// Create a secondary server, passing in configuration
|
||||
// to avoid bootstrapping as we are forming a cluster.
|
||||
srv2, err := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
|
||||
srv2, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) {
|
||||
c.Bootstrap = false
|
||||
})
|
||||
if err != nil {
|
|
@ -39,6 +39,9 @@ func TempDir(t *testing.T, name string) string {
|
|||
name = strings.Replace(name, "/", "_", -1)
|
||||
d, err := ioutil.TempDir(tmpdir, name)
|
||||
if err != nil {
|
||||
if t == nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
return d
|
||||
|
@ -53,8 +56,12 @@ func TempFile(t *testing.T, name string) *os.File {
|
|||
if t != nil && t.Name() != "" {
|
||||
name = t.Name() + "-" + name
|
||||
}
|
||||
name = strings.Replace(name, "/", "_", -1)
|
||||
f, err := ioutil.TempFile(tmpdir, name)
|
||||
if err != nil {
|
||||
if t == nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
return f
|
|
@ -56,6 +56,11 @@ func (r *R) Error(args ...interface{}) {
|
|||
r.fail = true
|
||||
}
|
||||
|
||||
func (r *R) Errorf(format string, args ...interface{}) {
|
||||
r.log(fmt.Sprintf(format, args...))
|
||||
r.fail = true
|
||||
}
|
||||
|
||||
func (r *R) Check(err error) {
|
||||
if err != nil {
|
||||
r.log(err.Error())
|
||||
|
@ -105,7 +110,7 @@ func dedup(a []string) string {
|
|||
delete(m, s)
|
||||
}
|
||||
}
|
||||
return string(b.Bytes())
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func run(r Retryer, t Failer, f func(r *R)) {
|
|
@ -22,13 +22,14 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/freeport"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -42,12 +43,14 @@ type TestPerformanceConfig struct {
|
|||
// TestPortConfig configures the various ports used for services
|
||||
// provided by the Consul server.
|
||||
type TestPortConfig struct {
|
||||
DNS int `json:"dns,omitempty"`
|
||||
HTTP int `json:"http,omitempty"`
|
||||
HTTPS int `json:"https,omitempty"`
|
||||
SerfLan int `json:"serf_lan,omitempty"`
|
||||
SerfWan int `json:"serf_wan,omitempty"`
|
||||
Server int `json:"server,omitempty"`
|
||||
DNS int `json:"dns,omitempty"`
|
||||
HTTP int `json:"http,omitempty"`
|
||||
HTTPS int `json:"https,omitempty"`
|
||||
SerfLan int `json:"serf_lan,omitempty"`
|
||||
SerfWan int `json:"serf_wan,omitempty"`
|
||||
Server int `json:"server,omitempty"`
|
||||
ProxyMinPort int `json:"proxy_min_port,omitempty"`
|
||||
ProxyMaxPort int `json:"proxy_max_port,omitempty"`
|
||||
}
|
||||
|
||||
// TestAddressConfig contains the bind addresses for various
|
||||
|
@ -83,8 +86,10 @@ type TestServerConfig struct {
|
|||
RaftProtocol int `json:"raft_protocol,omitempty"`
|
||||
ACLMasterToken string `json:"acl_master_token,omitempty"`
|
||||
ACLDatacenter string `json:"acl_datacenter,omitempty"`
|
||||
PrimaryDatacenter string `json:"primary_datacenter,omitempty"`
|
||||
ACLDefaultPolicy string `json:"acl_default_policy,omitempty"`
|
||||
ACLEnforceVersion8 bool `json:"acl_enforce_version_8"`
|
||||
ACL TestACLs `json:"acl,omitempty"`
|
||||
Encrypt string `json:"encrypt,omitempty"`
|
||||
CAFile string `json:"ca_file,omitempty"`
|
||||
CertFile string `json:"cert_file,omitempty"`
|
||||
|
@ -94,9 +99,32 @@ type TestServerConfig struct {
|
|||
VerifyIncomingHTTPS bool `json:"verify_incoming_https,omitempty"`
|
||||
VerifyOutgoing bool `json:"verify_outgoing,omitempty"`
|
||||
EnableScriptChecks bool `json:"enable_script_checks,omitempty"`
|
||||
Connect map[string]interface{} `json:"connect,omitempty"`
|
||||
EnableDebug bool `json:"enable_debug,omitempty"`
|
||||
ReadyTimeout time.Duration `json:"-"`
|
||||
Stdout, Stderr io.Writer `json:"-"`
|
||||
Args []string `json:"-"`
|
||||
ReturnPorts func() `json:"-"`
|
||||
}
|
||||
|
||||
type TestACLs struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
TokenReplication bool `json:"enable_token_replication,omitempty"`
|
||||
PolicyTTL string `json:"policy_ttl,omitempty"`
|
||||
TokenTTL string `json:"token_ttl,omitempty"`
|
||||
DownPolicy string `json:"down_policy,omitempty"`
|
||||
DefaultPolicy string `json:"default_policy,omitempty"`
|
||||
EnableKeyListPolicy bool `json:"enable_key_list_policy,omitempty"`
|
||||
Tokens TestTokens `json:"tokens,omitempty"`
|
||||
DisabledTTL string `json:"disabled_ttl,omitempty"`
|
||||
}
|
||||
|
||||
type TestTokens struct {
|
||||
Master string `json:"master,omitempty"`
|
||||
Replication string `json:"replication,omitempty"`
|
||||
AgentMaster string `json:"agent_master,omitempty"`
|
||||
Default string `json:"default,omitempty"`
|
||||
Agent string `json:"agent,omitempty"`
|
||||
}
|
||||
|
||||
// ServerConfigCallback is a function interface which can be
|
||||
|
@ -111,7 +139,8 @@ func defaultServerConfig() *TestServerConfig {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
ports := freeport.Get(6)
|
||||
ports := freeport.MustTake(6)
|
||||
|
||||
return &TestServerConfig{
|
||||
NodeName: "node-" + nodeID,
|
||||
NodeID: nodeID,
|
||||
|
@ -133,6 +162,16 @@ func defaultServerConfig() *TestServerConfig {
|
|||
Server: ports[5],
|
||||
},
|
||||
ReadyTimeout: 10 * time.Second,
|
||||
Connect: map[string]interface{}{
|
||||
"enabled": true,
|
||||
"ca_config": map[string]interface{}{
|
||||
// const TestClusterID causes import cycle so hard code it here.
|
||||
"cluster_id": "11111111-2222-3333-4444-555555555555",
|
||||
},
|
||||
},
|
||||
ReturnPorts: func() {
|
||||
freeport.Return(ports)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,12 +212,20 @@ type TestServer struct {
|
|||
tmpdir string
|
||||
}
|
||||
|
||||
// NewTestServer is an easy helper method to create a new Consul
|
||||
// test server with the most basic configuration.
|
||||
// Deprecated: Use NewTestServerT instead.
|
||||
func NewTestServer() (*TestServer, error) {
|
||||
return NewTestServerConfigT(nil, nil)
|
||||
}
|
||||
|
||||
// NewTestServerT is an easy helper method to create a new Consul
|
||||
// test server with the most basic configuration.
|
||||
func NewTestServerT(t *testing.T) (*TestServer, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("testutil: a non-nil *testing.T is required")
|
||||
}
|
||||
return NewTestServerConfigT(t, nil)
|
||||
}
|
||||
|
||||
func NewTestServerConfig(cb ServerConfigCallback) (*TestServer, error) {
|
||||
return NewTestServerConfigT(nil, cb)
|
||||
}
|
||||
|
@ -187,20 +234,33 @@ func NewTestServerConfig(cb ServerConfigCallback) (*TestServer, error) {
|
|||
// callback function to modify the configuration. If there is an error
|
||||
// configuring or starting the server, the server will NOT be running when the
|
||||
// function returns (thus you do not need to stop it).
|
||||
func NewTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, error) {
|
||||
func NewTestServerConfigT(t testing.TB, cb ServerConfigCallback) (*TestServer, error) {
|
||||
return newTestServerConfigT(t, cb)
|
||||
}
|
||||
|
||||
// newTestServerConfigT is the internal helper for NewTestServerConfigT.
|
||||
func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, error) {
|
||||
func newTestServerConfigT(t testing.TB, cb ServerConfigCallback) (*TestServer, error) {
|
||||
path, err := exec.LookPath("consul")
|
||||
if err != nil || path == "" {
|
||||
return nil, fmt.Errorf("consul not found on $PATH - download and install " +
|
||||
"consul or skip this test")
|
||||
}
|
||||
|
||||
tmpdir := TempDir(t, "consul")
|
||||
prefix := "consul"
|
||||
if t != nil {
|
||||
// Use test name for tmpdir if available
|
||||
prefix = strings.Replace(t.Name(), "/", "_", -1)
|
||||
}
|
||||
tmpdir, err := ioutil.TempDir("", prefix)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create tempdir")
|
||||
}
|
||||
|
||||
cfg := defaultServerConfig()
|
||||
testWriter := TestWriter(t)
|
||||
cfg.Stdout = testWriter
|
||||
cfg.Stderr = testWriter
|
||||
|
||||
cfg.DataDir = filepath.Join(tmpdir, "data")
|
||||
if cb != nil {
|
||||
cb(cfg)
|
||||
|
@ -208,20 +268,27 @@ func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, e
|
|||
|
||||
b, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
cfg.ReturnPorts()
|
||||
os.RemoveAll(tmpdir)
|
||||
return nil, errors.Wrap(err, "failed marshaling json")
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
// if you really want this output ensure to pass a valid t
|
||||
t.Logf("CONFIG JSON: %s", string(b))
|
||||
}
|
||||
configFile := filepath.Join(tmpdir, "config.json")
|
||||
if err := ioutil.WriteFile(configFile, b, 0644); err != nil {
|
||||
defer os.RemoveAll(tmpdir)
|
||||
cfg.ReturnPorts()
|
||||
os.RemoveAll(tmpdir)
|
||||
return nil, errors.Wrap(err, "failed writing config content")
|
||||
}
|
||||
|
||||
stdout := io.Writer(os.Stdout)
|
||||
stdout := testWriter
|
||||
if cfg.Stdout != nil {
|
||||
stdout = cfg.Stdout
|
||||
}
|
||||
stderr := io.Writer(os.Stderr)
|
||||
stderr := testWriter
|
||||
if cfg.Stderr != nil {
|
||||
stderr = cfg.Stderr
|
||||
}
|
||||
|
@ -233,6 +300,8 @@ func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, e
|
|||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
cfg.ReturnPorts()
|
||||
os.RemoveAll(tmpdir)
|
||||
return nil, errors.Wrap(err, "failed starting command")
|
||||
}
|
||||
|
||||
|
@ -262,21 +331,18 @@ func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, e
|
|||
}
|
||||
|
||||
// Wait for the server to be ready
|
||||
if cfg.Bootstrap {
|
||||
err = server.waitForLeader()
|
||||
} else {
|
||||
err = server.waitForAPI()
|
||||
}
|
||||
if err != nil {
|
||||
defer server.Stop()
|
||||
return nil, errors.Wrap(err, "failed waiting for server to start")
|
||||
if err := server.waitForAPI(); err != nil {
|
||||
server.Stop()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Stop stops the test Consul server, and removes the Consul data
|
||||
// directory once we are done.
|
||||
func (s *TestServer) Stop() error {
|
||||
defer s.Config.ReturnPorts()
|
||||
defer os.RemoveAll(s.tmpdir)
|
||||
|
||||
// There was no process
|
||||
|
@ -285,8 +351,14 @@ func (s *TestServer) Stop() error {
|
|||
}
|
||||
|
||||
if s.cmd.Process != nil {
|
||||
if err := s.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
return errors.Wrap(err, "failed to kill consul server")
|
||||
if runtime.GOOS == "windows" {
|
||||
if err := s.cmd.Process.Kill(); err != nil {
|
||||
return errors.Wrap(err, "failed to kill consul server")
|
||||
}
|
||||
} else { // interrupt is not supported in windows
|
||||
if err := s.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
return errors.Wrap(err, "failed to kill consul server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -295,51 +367,49 @@ func (s *TestServer) Stop() error {
|
|||
return s.cmd.Wait()
|
||||
}
|
||||
|
||||
type failer struct {
|
||||
failed bool
|
||||
}
|
||||
|
||||
func (f *failer) Log(args ...interface{}) { fmt.Println(args) }
|
||||
func (f *failer) FailNow() { f.failed = true }
|
||||
|
||||
// waitForAPI waits for only the agent HTTP endpoint to start
|
||||
// responding. This is an indication that the agent has started,
|
||||
// but will likely return before a leader is elected.
|
||||
func (s *TestServer) waitForAPI() error {
|
||||
f := &failer{}
|
||||
retry.Run(f, func(r *retry.R) {
|
||||
var failed bool
|
||||
|
||||
// This retry replicates the logic of retry.Run to allow for nested retries.
|
||||
// By returning an error we can wrap TestServer creation with retry.Run
|
||||
// in makeClientWithConfig.
|
||||
timer := retry.TwoSeconds()
|
||||
deadline := time.Now().Add(timer.Timeout)
|
||||
for !time.Now().After(deadline) {
|
||||
time.Sleep(timer.Wait)
|
||||
|
||||
resp, err := s.HTTPClient.Get(s.url("/v1/agent/self"))
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
failed = true
|
||||
continue
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := s.requireOK(resp); err != nil {
|
||||
r.Fatal("failed OK response", err)
|
||||
resp.Body.Close()
|
||||
|
||||
if err = s.requireOK(resp); err != nil {
|
||||
failed = true
|
||||
continue
|
||||
}
|
||||
})
|
||||
if f.failed {
|
||||
return errors.New("failed waiting for API")
|
||||
failed = false
|
||||
}
|
||||
if failed {
|
||||
return fmt.Errorf("api unavailable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForLeader waits for the Consul server's HTTP API to become
|
||||
// available, and then waits for a known leader and an index of
|
||||
// 1 or more to be observed to confirm leader election is done.
|
||||
// It then waits to ensure the anti-entropy sync has completed.
|
||||
func (s *TestServer) waitForLeader() error {
|
||||
f := &failer{}
|
||||
timer := &retry.Timer{
|
||||
Timeout: s.Config.ReadyTimeout,
|
||||
Wait: 250 * time.Millisecond,
|
||||
}
|
||||
var index int64
|
||||
retry.RunWith(timer, f, func(r *retry.R) {
|
||||
// 2 or more to be observed to confirm leader election is done.
|
||||
func (s *TestServer) WaitForLeader(t *testing.T) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Query the API and check the status code.
|
||||
url := s.url(fmt.Sprintf("/v1/catalog/nodes?index=%d", index))
|
||||
url := s.url("/v1/catalog/nodes")
|
||||
resp, err := s.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
r.Fatal("failed http get", err)
|
||||
r.Fatalf("failed http get '%s': %v", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := s.requireOK(resp); err != nil {
|
||||
|
@ -350,33 +420,104 @@ func (s *TestServer) waitForLeader() error {
|
|||
if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" {
|
||||
r.Fatalf("Consul leader status: %#v", leader)
|
||||
}
|
||||
index, err = strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64)
|
||||
index, err := strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64)
|
||||
if err != nil {
|
||||
r.Fatal("bad consul index", err)
|
||||
}
|
||||
if index == 0 {
|
||||
r.Fatal("consul index is 0")
|
||||
if index < 2 {
|
||||
r.Fatal("consul index should be at least 2")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForActiveCARoot waits until the server can return a Connect CA meaning
|
||||
// connect has completed bootstrapping and is ready to use.
|
||||
func (s *TestServer) WaitForActiveCARoot(t *testing.T) {
|
||||
// don't need to fully decode the response
|
||||
type rootsResponse struct {
|
||||
ActiveRootID string
|
||||
TrustDomain string
|
||||
Roots []interface{}
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Query the API and check the status code.
|
||||
url := s.url("/v1/agent/connect/ca/roots")
|
||||
resp, err := s.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
r.Fatalf("failed http get '%s': %v", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// Roots will return an error status until it's been bootstrapped. We could
|
||||
// parse the body and sanity check but that causes either import cycles
|
||||
// since this is used in both `api` and consul test or duplication. The 200
|
||||
// is all we really need to wait for.
|
||||
if err := s.requireOK(resp); err != nil {
|
||||
r.Fatal("failed OK response", err)
|
||||
}
|
||||
|
||||
var roots rootsResponse
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
if err := dec.Decode(&roots); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
||||
if roots.ActiveRootID == "" || len(roots.Roots) < 1 {
|
||||
r.Fatalf("/v1/agent/connect/ca/roots returned 200 but without roots: %+v", roots)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForSerfCheck ensures we have a node with serfHealth check registered
|
||||
// Behavior mirrors testrpc.WaitForTestAgent but avoids the dependency cycle in api pkg
|
||||
func (s *TestServer) WaitForSerfCheck(t *testing.T) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Query the API and check the status code.
|
||||
url := s.url("/v1/catalog/nodes?index=0")
|
||||
resp, err := s.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
r.Fatal("failed http get", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := s.requireOK(resp); err != nil {
|
||||
r.Fatal("failed OK response", err)
|
||||
}
|
||||
|
||||
// Watch for the anti-entropy sync to finish.
|
||||
var v []map[string]interface{}
|
||||
var payload []map[string]interface{}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
if err := dec.Decode(&payload); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
if len(v) < 1 {
|
||||
if len(payload) < 1 {
|
||||
r.Fatal("No nodes")
|
||||
}
|
||||
taggedAddresses, ok := v[0]["TaggedAddresses"].(map[string]interface{})
|
||||
if !ok {
|
||||
r.Fatal("Missing tagged addresses")
|
||||
|
||||
// Ensure the serfHealth check is registered
|
||||
url = s.url(fmt.Sprintf("/v1/health/node/%s", payload[0]["Node"]))
|
||||
resp, err = s.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
r.Fatal("failed http get", err)
|
||||
}
|
||||
if _, ok := taggedAddresses["lan"]; !ok {
|
||||
r.Fatal("No lan tagged addresses")
|
||||
defer resp.Body.Close()
|
||||
if err := s.requireOK(resp); err != nil {
|
||||
r.Fatal("failed OK response", err)
|
||||
}
|
||||
dec = json.NewDecoder(resp.Body)
|
||||
if err = dec.Decode(&payload); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, check := range payload {
|
||||
if check["CheckID"].(string) == "serfHealth" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
r.Fatal("missing serfHealth registration")
|
||||
}
|
||||
})
|
||||
if f.failed {
|
||||
return errors.New("failed waiting for leader")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
var sendTestLogsToStdout bool
|
||||
|
||||
func init() {
|
||||
sendTestLogsToStdout = os.Getenv("NOLOGBUFFER") == "1"
|
||||
}
|
||||
|
||||
// Deprecated: use Logger(t)
|
||||
func TestLogger(t testing.TB) *log.Logger {
|
||||
return log.New(&testWriter{t}, t.Name()+": ", log.LstdFlags)
|
||||
}
|
||||
|
||||
func NewDiscardLogger() hclog.Logger {
|
||||
return hclog.New(&hclog.LoggerOptions{
|
||||
Level: 0,
|
||||
Output: ioutil.Discard,
|
||||
})
|
||||
}
|
||||
|
||||
func Logger(t testing.TB) hclog.InterceptLogger {
|
||||
return LoggerWithOutput(t, &testWriter{t})
|
||||
}
|
||||
|
||||
func LoggerWithOutput(t testing.TB, output io.Writer) hclog.InterceptLogger {
|
||||
return hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: t.Name(),
|
||||
Level: hclog.Trace,
|
||||
Output: output,
|
||||
})
|
||||
}
|
||||
|
||||
// Deprecated: use LoggerWithName(t)
|
||||
func TestLoggerWithName(t testing.TB, name string) *log.Logger {
|
||||
return log.New(&testWriter{t}, "test["+name+"]: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
func LoggerWithName(t testing.TB, name string) hclog.InterceptLogger {
|
||||
return hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: "test[" + name + "]",
|
||||
Level: hclog.Debug,
|
||||
Output: &testWriter{t},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriter(t testing.TB) io.Writer {
|
||||
return &testWriter{t}
|
||||
}
|
||||
|
||||
type testWriter struct {
|
||||
t testing.TB
|
||||
}
|
||||
|
||||
func (tw *testWriter) Write(p []byte) (n int, err error) {
|
||||
if tw.t != nil {
|
||||
tw.t.Helper()
|
||||
}
|
||||
if sendTestLogsToStdout || tw.t == nil {
|
||||
fmt.Fprint(os.Stdout, strings.TrimSpace(string(p))+"\n")
|
||||
} else {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if sr, ok := r.(string); ok {
|
||||
if strings.HasPrefix(sr, "Log in goroutine after ") {
|
||||
// These sorts of panics are undesirable, but requires
|
||||
// total control over goroutine lifetimes to correct.
|
||||
fmt.Fprint(os.Stdout, "SUPPRESSED PANIC: "+sr+"\n")
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
tw.t.Log(strings.TrimSpace(string(p)))
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
package porter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultAddr is the the default bind address of a Porter server. This acts
|
||||
// as the fallback address if the Porter server is not specified.
|
||||
DefaultAddr = "127.0.0.1:7965"
|
||||
)
|
||||
|
||||
const (
|
||||
// porterErrPrefix is the string returned when displaying a porter error
|
||||
porterErrPrefix = `Are you running porter?
|
||||
Install with 'go install github.com/hashicorp/consul/test/porter/cmd/porter'
|
||||
Then run 'porter go test ...'`
|
||||
)
|
||||
|
||||
// PorterExistErr is used to wrap an error that is likely from Porter not being
|
||||
// run.
|
||||
type PorterExistErr struct {
|
||||
Wrapped error
|
||||
}
|
||||
|
||||
func (p *PorterExistErr) Error() string {
|
||||
return fmt.Sprintf("%s:\n%s", porterErrPrefix, p.Wrapped)
|
||||
}
|
||||
|
||||
func RandomPorts(n int) ([]int, error) {
|
||||
addr := os.Getenv("PORTER_ADDR")
|
||||
if addr == "" {
|
||||
b, err := ioutil.ReadFile("/tmp/porter.addr")
|
||||
if err == nil {
|
||||
addr = string(b)
|
||||
}
|
||||
}
|
||||
if addr == "" {
|
||||
addr = DefaultAddr
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%d", addr, n))
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, &PorterExistErr{Wrapped: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var p []int
|
||||
err = json.Unmarshal(data, &p)
|
||||
return p, err
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// The git commit that was compiled. These will be filled in by the
|
||||
// compiler.
|
||||
GitCommit string
|
||||
GitDescribe string
|
||||
|
||||
// The main version number that is being run at the moment.
|
||||
//
|
||||
// Version must conform to the format expected by github.com/hashicorp/go-version
|
||||
// for tests to work.
|
||||
Version = "1.7.0"
|
||||
|
||||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
VersionPrerelease = "dev"
|
||||
)
|
||||
|
||||
// GetHumanVersion composes the parts of the version in a way that's suitable
|
||||
// for displaying to humans.
|
||||
func GetHumanVersion() string {
|
||||
version := Version
|
||||
if GitDescribe != "" {
|
||||
version = GitDescribe
|
||||
}
|
||||
|
||||
release := VersionPrerelease
|
||||
if GitDescribe == "" && release == "" {
|
||||
release = "dev"
|
||||
}
|
||||
|
||||
if release != "" {
|
||||
if !strings.HasSuffix(version, "-"+release) {
|
||||
// if we tagged a prerelease version then the release is in the version already
|
||||
version += fmt.Sprintf("-%s", release)
|
||||
}
|
||||
if GitCommit != "" {
|
||||
version += fmt.Sprintf(" (%s)", GitCommit)
|
||||
}
|
||||
}
|
||||
|
||||
// Strip off any single quotes added by the git information.
|
||||
return strings.Replace(version, "'", "", -1)
|
||||
}
|
|
@ -206,14 +206,14 @@
|
|||
{"path":"github.com/hashicorp/consul-template/version","checksumSHA1":"CqEejkuDiTgPVrLg0xrMmAWvNwY=","revision":"f04989c64e9bd4c49a7217ac4635732dd8e0bb26","revisionTime":"2019-11-08T20:12:44Z","version":"v0.22.1","versionExact":"v0.22.1"},
|
||||
{"path":"github.com/hashicorp/consul-template/watch","checksumSHA1":"cBIJewG416sFREUenIUK9v3zrUk=","revision":"f04989c64e9bd4c49a7217ac4635732dd8e0bb26","revisionTime":"2019-11-08T20:12:44Z","version":"v0.22.1","versionExact":"v0.22.1"},
|
||||
{"path":"github.com/hashicorp/consul/agent/consul/autopilot","checksumSHA1":"lu2pzUDqU0jwul48T6IkTZK9Gxc=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/api","checksumSHA1":"7JPBtnIgLkdcJ0ldXMTEnVjNEjA=","revision":"40cec98468b829e5cdaacb0629b3e23a028db688","revisionTime":"2019-05-22T20:19:12Z"},
|
||||
{"path":"github.com/hashicorp/consul/command/flags","checksumSHA1":"soNN4xaHTbeXFgNkZ7cX0gbFXQk=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/lib","checksumSHA1":"Nrh9BhiivRyJiuPzttstmq9xl/w=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/lib/freeport","checksumSHA1":"E28E4zR1FN2v1Xiq4FUER7KVN9M=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/api","checksumSHA1":"N8Br+KaZ4HRAmTYIpAKOzuk+hHo=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/command/flags","checksumSHA1":"wC8+MhTwWmFDdSOHv1anqBo06GA=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/lib","checksumSHA1":"qdXwmZL03jqKB4Q2Fhb+2jFw1iE=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/logging","checksumSHA1":"xddCR1b2SU/XPi3UrLGz7Ns5HiQ=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/test/porter","checksumSHA1":"5XjgqE4UIfwXvkq5VssGNc7uPhQ=","revision":"ad9425ca6353b8afcfebd19130a8cf768f7eac30","revisionTime":"2017-10-21T00:05:25Z"},
|
||||
{"path":"github.com/hashicorp/consul/testutil","checksumSHA1":"T4CeQD+QRsjf1BJ1n7FSojS5zDQ=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/testutil/retry","checksumSHA1":"SCb2b91UYiB/23+SNDBlU5OZfFA=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/sdk/freeport","checksumSHA1":"KlKviCDDfFY+6YNXEt+DnNKCfOs=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/sdk/testutil","checksumSHA1":"BdbalXv3cKiFTZpRCy4fgIzHBEU=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/sdk/testutil/retry","checksumSHA1":"d3PJhffDKar25kzK0iEqssVMkck=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/version","checksumSHA1":"fRbV3oycM2uY4oOkDoSXtP4o6Tc=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/errwrap","checksumSHA1":"cdOCt0Yb+hdErz8NAQqayxPmRsY=","revision":"7554cd9344cec97297fa6649b055a8c98c2a1e55"},
|
||||
{"path":"github.com/hashicorp/go-checkpoint","checksumSHA1":"D267IUMW2rcb+vNe3QU+xhfSrgY=","revision":"1545e56e46dec3bba264e41fde2c1e2aa65b5dd4","revisionTime":"2017-10-09T17:35:28Z"},
|
||||
{"path":"github.com/hashicorp/go-cleanhttp","checksumSHA1":"6ihdHMkDfFx/rJ1A36com2F6bQk=","revision":"a45970658e51fea2c41445ff0f7e07106d007617","revisionTime":"2017-02-11T00:33:01Z"},
|
||||
|
|
Loading…
Reference in New Issue