Merge branch 'main' of github.com:hashicorp/consul into derekm/split-grpc-ports

This commit is contained in:
Derek Menteer 2022-09-08 14:53:08 -05:00
commit 8efe862b76
104 changed files with 3369 additions and 1432 deletions

3
.changelog/14285.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
connect: Server address changes are streamed to peers
```

3
.changelog/14495.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
ui: Detect a TokenSecretID cookie and passthrough to localStorage
```

3
.changelog/14521.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: Reuse connections for requests to /v1/internal/ui/metrics-proxy/
```

View File

@ -972,8 +972,9 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
}
srv := &HTTPHandlers{
agent: a,
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
agent: a,
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
proxyTransport: http.DefaultTransport,
}
a.configReloaders = append(a.configReloaders, srv.ReloadConfig)
a.httpHandlers = srv
@ -2140,6 +2141,21 @@ func (a *Agent) AddService(req AddServiceRequest) error {
// addServiceLocked adds a service entry to the service manager if enabled, or directly
// to the local state if it is not. This function assumes the state lock is already held.
func (a *Agent) addServiceLocked(req addServiceLockedRequest) error {
// Must auto-assign the port and default checks (if needed) here to avoid race collisions.
if req.Service.LocallyRegisteredAsSidecar {
if req.Service.Port < 1 {
port, err := a.sidecarPortFromServiceIDLocked(req.Service.CompoundServiceID())
if err != nil {
return err
}
req.Service.Port = port
}
// Setup default check if none given.
if len(req.chkTypes) < 1 {
req.chkTypes = sidecarDefaultChecks(req.Service.ID, req.Service.Address, req.Service.Proxy.LocalServiceAddress, req.Service.Port)
}
}
req.Service.EnterpriseMeta.Normalize()
if err := a.validateService(req.Service, req.chkTypes); err != nil {
@ -3404,7 +3420,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
}
// Grab and validate sidecar if there is one too
sidecar, sidecarChecks, sidecarToken, err := a.sidecarServiceFromNodeService(ns, service.Token)
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, service.Token)
if err != nil {
return fmt.Errorf("Failed to validate sidecar for service %q: %v", service.Name, err)
}
@ -4304,7 +4320,10 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources {
sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth))
sources.Intentions = proxycfgglue.ServerIntentions(deps)
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps)
sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache))
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
sources.ResolvedServiceConfig = proxycfgglue.ServerResolvedServiceConfig(deps, proxycfgglue.CacheResolvedServiceConfig(a.cache))
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))
sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps)
sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps)

View File

@ -1180,7 +1180,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
}
// See if we have a sidecar to register too
sidecar, sidecarChecks, sidecarToken, err := s.agent.sidecarServiceFromNodeService(ns, token)
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, token)
if err != nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
}

View File

@ -2786,7 +2786,7 @@ func TestAgent_DeregisterPersistedSidecarAfterRestart(t *testing.T) {
},
}
connectSrv, _, _, err := a.sidecarServiceFromNodeService(srv, "")
connectSrv, _, _, err := sidecarServiceFromNodeService(srv, "")
require.NoError(t, err)
// First persist the check
@ -2959,11 +2959,24 @@ func testAgent_loadServices_sidecar(t *testing.T, extraHCL string) {
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
sidecarSvc := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
// Verify default checks have been added
wantChecks := sidecarDefaultChecks(sidecarSvc.ID, sidecarSvc.Address, sidecarSvc.Proxy.LocalServiceAddress, sidecarSvc.Port)
gotChecks := a.State.ChecksForService(sidecarSvc.CompoundServiceID(), true)
gotChkNames := make(map[string]types.CheckID)
for _, check := range gotChecks {
requireCheckExists(t, a, check.CheckID)
gotChkNames[check.Name] = check.CheckID
}
for _, check := range wantChecks {
chkName := check.Name
require.NotNil(t, gotChkNames[chkName])
}
// Sanity check rabbitmq service should NOT have sidecar info in state since
// it's done it's job and should be a registration syntax sugar only.
assert.Nil(t, svc.Connect.SidecarService)

View File

@ -0,0 +1,229 @@
package configentry
import (
"fmt"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/structs"
)
func ComputeResolvedServiceConfig(
args *structs.ServiceConfigRequest,
upstreamIDs []structs.ServiceID,
legacyUpstreams bool,
entries *ResolvedServiceConfigSet,
logger hclog.Logger,
) (*structs.ServiceConfigResponse, error) {
var thisReply structs.ServiceConfigResponse
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
var proxyConfGlobalProtocol string
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
if proxyConf != nil {
// Apply the proxy defaults to the sidecar's proxy config
mapCopy, err := copystructure.Copy(proxyConf.Config)
if err != nil {
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
}
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
thisReply.Mode = proxyConf.Mode
thisReply.TransparentProxy = proxyConf.TransparentProxy
thisReply.MeshGateway = proxyConf.MeshGateway
thisReply.Expose = proxyConf.Expose
// Extract the global protocol from proxyConf for upstream configs.
rawProtocol := proxyConf.Config["protocol"]
if rawProtocol != nil {
var ok bool
proxyConfGlobalProtocol, ok = rawProtocol.(string)
if !ok {
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
}
}
}
serviceConf := entries.GetServiceDefaults(
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
)
if serviceConf != nil {
if serviceConf.Expose.Checks {
thisReply.Expose.Checks = true
}
if len(serviceConf.Expose.Paths) >= 1 {
thisReply.Expose.Paths = serviceConf.Expose.Paths
}
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
}
if serviceConf.Protocol != "" {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = make(map[string]interface{})
}
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
}
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
}
if serviceConf.TransparentProxy.DialedDirectly {
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
}
if serviceConf.Mode != structs.ProxyModeDefault {
thisReply.Mode = serviceConf.Mode
}
if serviceConf.Destination != nil {
thisReply.Destination = *serviceConf.Destination
}
if serviceConf.MaxInboundConnections > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
}
thisReply.Meta = serviceConf.Meta
}
// First collect all upstreams into a set of seen upstreams.
// Upstreams can come from:
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
// - Implicitly from centralized upstream config in service-defaults
seenUpstreams := map[structs.ServiceID]struct{}{}
var (
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
// will never be transparent because the service config request does not use the resolved value.
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
)
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
if noUpstreamArgs && !tproxy {
return &thisReply, nil
}
// First store all upstreams that were provided in the request
for _, sid := range upstreamIDs {
if _, ok := seenUpstreams[sid]; !ok {
seenUpstreams[sid] = struct{}{}
}
}
// Then store upstreams inferred from service-defaults and mapify the overrides.
var (
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
upstreamDefaults *structs.UpstreamConfig
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs = make(map[structs.ServiceID]map[string]interface{})
)
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
for i, override := range serviceConf.UpstreamConfig.Overrides {
if override.Name == "" {
logger.Warn(
"Skipping UpstreamConfig.Overrides entry without a required name field",
"entryIndex", i,
"kind", serviceConf.GetKind(),
"name", serviceConf.GetName(),
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
)
continue // skip this impossible condition
}
seenUpstreams[override.ServiceID()] = struct{}{}
upstreamConfigs[override.ServiceID()] = override
}
if serviceConf.UpstreamConfig.Defaults != nil {
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
// Store the upstream defaults under a wildcard key so that they can be applied to
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
cfgMap := make(map[string]interface{})
upstreamDefaults.MergeInto(cfgMap)
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
usConfigs[wildcard] = cfgMap
}
}
for upstream := range seenUpstreams {
resolvedCfg := make(map[string]interface{})
// The protocol of an upstream is resolved in this order:
// 1. Default protocol from proxy-defaults (how all services should be addressed)
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
// (how the downstream wants to address it)
protocol := proxyConfGlobalProtocol
upstreamSvcDefaults := entries.GetServiceDefaults(
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
)
if upstreamSvcDefaults != nil {
if upstreamSvcDefaults.Protocol != "" {
protocol = upstreamSvcDefaults.Protocol
}
}
if protocol != "" {
resolvedCfg["protocol"] = protocol
}
// Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg)
}
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
// because it is specific to the proxy instance.
//
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_config
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
}
if upstreamConfigs[upstream] != nil {
upstreamConfigs[upstream].MergeInto(resolvedCfg)
}
if len(resolvedCfg) > 0 {
usConfigs[upstream] = resolvedCfg
}
}
// don't allocate the slices just to not fill them
if len(usConfigs) == 0 {
return &thisReply, nil
}
if legacyUpstreams {
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
for us, conf := range usConfigs {
thisReply.UpstreamConfigs[us.ID] = conf
}
} else {
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
for us, conf := range usConfigs {
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
}
}
return &thisReply, nil
}

View File

@ -0,0 +1,56 @@
package configentry
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
)
func Test_ComputeResolvedServiceConfig(t *testing.T) {
type args struct {
scReq *structs.ServiceConfigRequest
upstreamIDs []structs.ServiceID
entries *ResolvedServiceConfigSet
}
sid := structs.ServiceID{
ID: "sid",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
tests := []struct {
name string
args args
want *structs.ServiceConfigResponse
}{
{
name: "proxy with maxinboundsconnections",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
MaxInboundConnections: 20,
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"max_inbound_connections": 20,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ComputeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
false, tt.args.entries, nil)
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}

View File

@ -24,6 +24,8 @@ var (
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
spiffeIDAgentRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
spiffeIDServerRegexp = regexp.MustCompile(
`^/agent/server/dc/([^/]+)$`)
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
)
@ -144,6 +146,19 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
Partition: ap,
Datacenter: dc,
}, nil
} else if v := spiffeIDServerRegexp.FindStringSubmatch(path); v != nil {
dc := v[1]
if input.RawPath != "" {
var err error
if dc, err = url.PathUnescape(v[1]); err != nil {
return nil, fmt.Errorf("Invalid datacenter: %s", err)
}
}
return &SpiffeIDServer{
Host: input.Host,
Datacenter: dc,
}, nil
}
// Test for signing ID

View File

@ -0,0 +1,20 @@
package connect
import (
"fmt"
"net/url"
)
type SpiffeIDServer struct {
Host string
Datacenter string
}
// URI returns the *url.URL for this SPIFFE ID.
func (id SpiffeIDServer) URI() *url.URL {
var result url.URL
result.Scheme = "spiffe"
result.Host = id.Host
result.Path = fmt.Sprintf("/agent/server/dc/%s", id.Datacenter)
return &result
}

View File

@ -54,6 +54,12 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
case *SpiffeIDServer:
// The host component of the service must be an exact match for now under
// ascii case folding (since hostnames are case-insensitive). Later we might
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
default:
return false
}

View File

@ -78,7 +78,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
want: true,
},
{
name: "service - good midex case",
name: "service - good mixed case",
id: testSigning,
input: &SpiffeIDService{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", Datacenter: "dc1", Service: "WEB"},
want: true,
@ -102,7 +102,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
want: true,
},
{
name: "mesh gateway - good midex case",
name: "mesh gateway - good mixed case",
id: testSigning,
input: &SpiffeIDMeshGateway{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
want: true,
@ -119,6 +119,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".fake", Datacenter: "dc1"},
want: false,
},
{
name: "server - good",
id: testSigning,
input: &SpiffeIDServer{Host: TestClusterID + ".consul", Datacenter: "dc1"},
want: true,
},
{
name: "server - good mixed case",
id: testSigning,
input: &SpiffeIDServer{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
want: true,
},
{
name: "server - different cluster",
id: testSigning,
input: &SpiffeIDServer{Host: "55555555-4444-3333-2222-111111111111.consul", Datacenter: "dc1"},
want: false,
},
{
name: "server - different TLD",
id: testSigning,
input: &SpiffeIDServer{Host: TestClusterID + ".fake", Datacenter: "dc1"},
want: false,
},
}
for _, tt := range tests {

View File

@ -19,109 +19,118 @@ func TestParseCertURIFromString(t *testing.T) {
ParseError string
}{
{
"invalid scheme",
"http://google.com/",
nil,
"scheme",
Name: "invalid scheme",
URI: "http://google.com/",
Struct: nil,
ParseError: "scheme",
},
{
"basic service ID",
"spiffe://1234.consul/ns/default/dc/dc01/svc/web",
&SpiffeIDService{
Name: "basic service ID",
URI: "spiffe://1234.consul/ns/default/dc/dc01/svc/web",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
Datacenter: "dc01",
Service: "web",
},
"",
ParseError: "",
},
{
"basic service ID with partition",
"spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
&SpiffeIDService{
Name: "basic service ID with partition",
URI: "spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: "bizdev",
Namespace: "default",
Datacenter: "dc01",
Service: "web",
},
"",
ParseError: "",
},
{
"basic agent ID",
"spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
&SpiffeIDAgent{
Name: "basic agent ID",
URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
Struct: &SpiffeIDAgent{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Datacenter: "dc1",
Agent: "uuid",
},
"",
ParseError: "",
},
{
"basic agent ID with partition",
"spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
&SpiffeIDAgent{
Name: "basic agent ID with partition",
URI: "spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
Struct: &SpiffeIDAgent{
Host: "1234.consul",
Partition: "bizdev",
Datacenter: "dc1",
Agent: "uuid",
},
"",
ParseError: "",
},
{
"mesh-gateway with no partition",
"spiffe://1234.consul/gateway/mesh/dc/dc1",
&SpiffeIDMeshGateway{
Name: "basic server",
URI: "spiffe://1234.consul/agent/server/dc/dc1",
Struct: &SpiffeIDServer{
Host: "1234.consul",
Datacenter: "dc1",
},
ParseError: "",
},
{
Name: "mesh-gateway with no partition",
URI: "spiffe://1234.consul/gateway/mesh/dc/dc1",
Struct: &SpiffeIDMeshGateway{
Host: "1234.consul",
Partition: "default",
Datacenter: "dc1",
},
"",
ParseError: "",
},
{
"mesh-gateway with partition",
"spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
&SpiffeIDMeshGateway{
Name: "mesh-gateway with partition",
URI: "spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
Struct: &SpiffeIDMeshGateway{
Host: "1234.consul",
Partition: "bizdev",
Datacenter: "dc1",
},
"",
ParseError: "",
},
{
"service with URL-encoded values",
"spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
&SpiffeIDService{
Name: "service with URL-encoded values",
URI: "spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "foo/bar",
Datacenter: "bar/baz",
Service: "baz/qux",
},
"",
ParseError: "",
},
{
"service with URL-encoded values with partition",
"spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
&SpiffeIDService{
Name: "service with URL-encoded values with partition",
URI: "spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: "biz/dev",
Namespace: "foo/bar",
Datacenter: "bar/baz",
Service: "baz/qux",
},
"",
ParseError: "",
},
{
"signing ID",
"spiffe://1234.consul",
&SpiffeIDSigning{
Name: "signing ID",
URI: "spiffe://1234.consul",
Struct: &SpiffeIDSigning{
ClusterID: "1234",
Domain: "consul",
},
"",
ParseError: "",
},
}
@ -139,3 +148,12 @@ func TestParseCertURIFromString(t *testing.T) {
})
}
}
func TestSpiffeIDServer_URI(t *testing.T) {
srv := &SpiffeIDServer{
Host: "1234.consul",
Datacenter: "dc1",
}
require.Equal(t, "spiffe://1234.consul/agent/server/dc/dc1", srv.URI().String())
}

View File

@ -4,6 +4,8 @@ package autopilotevents
import (
acl "github.com/hashicorp/consul/acl"
memdb "github.com/hashicorp/go-memdb"
mock "github.com/stretchr/testify/mock"
structs "github.com/hashicorp/consul/agent/structs"
@ -48,6 +50,36 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _
return r0, r1, r2
}
// NodeService provides a mock function with given fields: ws, nodeName, serviceID, entMeta, peerName
func (_m *MockStateStore) NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error) {
ret := _m.Called(ws, nodeName, serviceID, entMeta, peerName)
var r0 uint64
if rf, ok := ret.Get(0).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) uint64); ok {
r0 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
r0 = ret.Get(0).(uint64)
}
var r1 *structs.NodeService
if rf, ok := ret.Get(1).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) *structs.NodeService); ok {
r1 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*structs.NodeService)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) error); ok {
r2 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockStateStore(t testing.TB) *MockStateStore {
mock := &MockStateStore{}

View File

@ -4,9 +4,11 @@ import (
"fmt"
"net"
"sort"
"strconv"
"sync"
"time"
"github.com/hashicorp/go-memdb"
autopilot "github.com/hashicorp/raft-autopilot"
"github.com/hashicorp/consul/acl"
@ -26,6 +28,7 @@ type ReadyServerInfo struct {
ID string
Address string
TaggedAddresses map[string]string
ExtGRPCPort int
Version string
}
@ -122,6 +125,7 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
type StateStore interface {
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error)
}
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
@ -226,6 +230,7 @@ func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopil
Address: host,
Version: srv.Server.Version,
TaggedAddresses: r.getTaggedAddresses(srv),
ExtGRPCPort: r.getGRPCPort(srv),
})
}
}
@ -254,7 +259,7 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
// code and reason about and having those addresses be updated within 30s is good enough.
_, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil || node == nil {
// no catalog information means we should return a nil addres map
// no catalog information means we should return a nil address map
return nil
}
@ -276,6 +281,38 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
return addrs
}
// getGRPCPort will get the external gRPC port for a Consul server.
// Returns 0 if there is none assigned or if an error is encountered.
func (r *ReadyServersEventPublisher) getGRPCPort(srv *autopilot.ServerState) int {
if r.GetStore == nil {
return 0
}
_, n, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil || n == nil {
return 0
}
_, ns, err := r.GetStore().NodeService(
nil,
n.Node,
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
)
if err != nil || ns == nil || ns.Meta == nil {
return 0
}
if str, ok := ns.Meta["grpc_port"]; ok {
grpcPort, err := strconv.Atoi(str)
if err == nil {
return grpcPort
}
}
return 0
}
// newReadyServersEvent will create a stream.Event with the provided ready server info.
func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event {
now := time.Now()

View File

@ -4,6 +4,7 @@ import (
"testing"
time "time"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/raft"
autopilot "github.com/hashicorp/raft-autopilot"
mock "github.com/stretchr/testify/mock"
@ -164,9 +165,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
nil,
)
@ -174,9 +187,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
nil,
)
@ -184,9 +209,119 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
nil,
)
r := NewReadyServersEventPublisher(Config{
GetStore: func() StateStore { return store },
})
actual := r.autopilotStateToReadyServers(exampleState)
require.ElementsMatch(t, expected, actual)
}
func TestAutopilotStateToReadyServersWithExtGRPCPort(t *testing.T) {
expected := EventPayloadReadyServers{
{
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
Address: "198.18.0.2",
ExtGRPCPort: 1234,
Version: "v1.12.0",
},
{
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
Address: "198.18.0.3",
ExtGRPCPort: 2345,
Version: "v1.12.0",
},
{
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
Address: "198.18.0.4",
ExtGRPCPort: 3456,
Version: "v1.12.0",
},
}
store := &MockStateStore{}
t.Cleanup(func() { store.AssertExpectations(t) })
store.On("GetNodeID",
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "1234"}},
nil,
)
store.On("GetNodeID",
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "2345"}},
nil,
)
store.On("GetNodeID",
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "3456"}},
nil,
)
@ -493,9 +628,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
nil,
)
@ -503,9 +650,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
nil,
)
@ -513,9 +672,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
nil,
)

View File

@ -12,6 +12,7 @@ import (
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
)
@ -510,7 +511,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
ranOnce = true
}
thisReply, err := computeResolvedServiceConfig(
thisReply, err := configentry.ComputeResolvedServiceConfig(
args,
upstreamIDs,
legacyUpstreams,

View File

@ -1451,6 +1451,19 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
"we are %s", v.Datacenter, dc)
}
case *connect.SpiffeIDServer:
// The authorizer passed in should have unlimited permissions.
if err := allow.ACLWriteAllowed(&authzContext); err != nil {
return nil, err
}
// Verify that the DC in the URI matches us.
// The request must have been issued by a local server.
dc := c.serverConf.Datacenter
if v.Datacenter != dc {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
"we are %s", v.Datacenter, dc)
}
default:
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
}
@ -1472,9 +1485,11 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
if err != nil {
return nil, err
}
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
serverID, isServer := spiffeID.(*connect.SpiffeIDServer)
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
var entMeta acl.EnterpriseMeta
@ -1493,6 +1508,12 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
}
entMeta.Merge(mgwID.GetEnterpriseMeta())
case isServer:
if !signingID.CanSign(spiffeID) {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
"we are %s", serverID.Host, signingID.Host())
}
entMeta.Normalize()
case isAgent:
// isAgent - if we support more ID types then this would need to be an else if
// here we are just automatically fixing the trust domain. For auto-encrypt and
@ -1519,7 +1540,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
entMeta.Merge(agentID.GetEnterpriseMeta())
default:
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, or mesh gateway ID")
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, server, or mesh gateway ID")
}
commonCfg, err := config.GetCommonConfig()
@ -1608,6 +1629,8 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
case isAgent:
reply.Agent = agentID.Agent
reply.AgentURI = cert.URIs[0].String()
case isServer:
reply.ServerURI = cert.URIs[0].String()
default:
return nil, errors.New("not possible")
}

View File

@ -1042,3 +1042,43 @@ func setupPrimaryCA(t *testing.T, client *vaultapi.Client, path string, rootPEM
require.NoError(t, err, "failed to set signed intermediate")
return lib.EnsureTrailingNewline(buf.String())
}
func TestCAManager_Sign_SpiffeIDServer(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, s1 := testServerWithConfig(t)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
codec := rpcClient(t, s1)
roots := structs.IndexedCARoots{}
retry.Run(t, func(r *retry.R) {
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(r, err)
require.Len(r, roots.Roots, 1)
})
pk, _, err := connect.GeneratePrivateKey()
require.NoError(t, err)
// Request a leaf certificate for a server.
spiffeID := &connect.SpiffeIDServer{
Host: roots.TrustDomain,
Datacenter: "dc1",
}
csr, err := connect.CreateCSR(spiffeID, pk, nil, nil)
require.NoError(t, err)
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
// Verify the chain of trust.
verifyLeafCert(t, roots.Roots[0], cert.CertPEM)
// Verify the Server's URI.
require.Equal(t, fmt.Sprintf("spiffe://%s/agent/server/dc/dc1", roots.TrustDomain), cert.ServerURI)
}

View File

@ -3,13 +3,14 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
"github.com/imdario/mergo"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
)
// mergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the
@ -66,7 +67,7 @@ func mergeNodeServiceWithCentralConfig(
ns.ID, err)
}
defaults, err := computeResolvedServiceConfig(
defaults, err := configentry.ComputeResolvedServiceConfig(
configReq,
upstreams,
false,
@ -87,225 +88,6 @@ func mergeNodeServiceWithCentralConfig(
return cfgIndex, mergedns, nil
}
func computeResolvedServiceConfig(
args *structs.ServiceConfigRequest,
upstreamIDs []structs.ServiceID,
legacyUpstreams bool,
entries *configentry.ResolvedServiceConfigSet,
logger hclog.Logger,
) (*structs.ServiceConfigResponse, error) {
var thisReply structs.ServiceConfigResponse
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
var proxyConfGlobalProtocol string
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
if proxyConf != nil {
// Apply the proxy defaults to the sidecar's proxy config
mapCopy, err := copystructure.Copy(proxyConf.Config)
if err != nil {
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
}
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
thisReply.Mode = proxyConf.Mode
thisReply.TransparentProxy = proxyConf.TransparentProxy
thisReply.MeshGateway = proxyConf.MeshGateway
thisReply.Expose = proxyConf.Expose
// Extract the global protocol from proxyConf for upstream configs.
rawProtocol := proxyConf.Config["protocol"]
if rawProtocol != nil {
var ok bool
proxyConfGlobalProtocol, ok = rawProtocol.(string)
if !ok {
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
}
}
}
serviceConf := entries.GetServiceDefaults(
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
)
if serviceConf != nil {
if serviceConf.Expose.Checks {
thisReply.Expose.Checks = true
}
if len(serviceConf.Expose.Paths) >= 1 {
thisReply.Expose.Paths = serviceConf.Expose.Paths
}
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
}
if serviceConf.Protocol != "" {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = make(map[string]interface{})
}
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
}
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
}
if serviceConf.TransparentProxy.DialedDirectly {
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
}
if serviceConf.Mode != structs.ProxyModeDefault {
thisReply.Mode = serviceConf.Mode
}
if serviceConf.Destination != nil {
thisReply.Destination = *serviceConf.Destination
}
if serviceConf.MaxInboundConnections > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
}
thisReply.Meta = serviceConf.Meta
}
// First collect all upstreams into a set of seen upstreams.
// Upstreams can come from:
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
// - Implicitly from centralized upstream config in service-defaults
seenUpstreams := map[structs.ServiceID]struct{}{}
var (
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
// will never be transparent because the service config request does not use the resolved value.
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
)
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
if noUpstreamArgs && !tproxy {
return &thisReply, nil
}
// First store all upstreams that were provided in the request
for _, sid := range upstreamIDs {
if _, ok := seenUpstreams[sid]; !ok {
seenUpstreams[sid] = struct{}{}
}
}
// Then store upstreams inferred from service-defaults and mapify the overrides.
var (
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
upstreamDefaults *structs.UpstreamConfig
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs = make(map[structs.ServiceID]map[string]interface{})
)
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
for i, override := range serviceConf.UpstreamConfig.Overrides {
if override.Name == "" {
logger.Warn(
"Skipping UpstreamConfig.Overrides entry without a required name field",
"entryIndex", i,
"kind", serviceConf.GetKind(),
"name", serviceConf.GetName(),
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
)
continue // skip this impossible condition
}
seenUpstreams[override.ServiceID()] = struct{}{}
upstreamConfigs[override.ServiceID()] = override
}
if serviceConf.UpstreamConfig.Defaults != nil {
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
// Store the upstream defaults under a wildcard key so that they can be applied to
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
cfgMap := make(map[string]interface{})
upstreamDefaults.MergeInto(cfgMap)
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
usConfigs[wildcard] = cfgMap
}
}
for upstream := range seenUpstreams {
resolvedCfg := make(map[string]interface{})
// The protocol of an upstream is resolved in this order:
// 1. Default protocol from proxy-defaults (how all services should be addressed)
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
// (how the downstream wants to address it)
protocol := proxyConfGlobalProtocol
upstreamSvcDefaults := entries.GetServiceDefaults(
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
)
if upstreamSvcDefaults != nil {
if upstreamSvcDefaults.Protocol != "" {
protocol = upstreamSvcDefaults.Protocol
}
}
if protocol != "" {
resolvedCfg["protocol"] = protocol
}
// Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg)
}
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
// because it is specific to the proxy instance.
//
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_config
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
}
if upstreamConfigs[upstream] != nil {
upstreamConfigs[upstream].MergeInto(resolvedCfg)
}
if len(resolvedCfg) > 0 {
usConfigs[upstream] = resolvedCfg
}
}
// don't allocate the slices just to not fill them
if len(usConfigs) == 0 {
return &thisReply, nil
}
if legacyUpstreams {
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
for us, conf := range usConfigs {
thisReply.UpstreamConfigs[us.ID] = conf
}
} else {
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
for us, conf := range usConfigs {
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
}
}
return &thisReply, nil
}
// MergeServiceConfig merges the service into defaults to produce the final effective
// config for the specified service.
func MergeServiceConfig(defaults *structs.ServiceConfigResponse, service *structs.NodeService) (*structs.NodeService, error) {

View File

@ -3,60 +3,13 @@ package consul
import (
"testing"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
)
func Test_ComputeResolvedServiceConfig(t *testing.T) {
type args struct {
scReq *structs.ServiceConfigRequest
upstreamIDs []structs.ServiceID
entries *configentry.ResolvedServiceConfigSet
}
sid := structs.ServiceID{
ID: "sid",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
tests := []struct {
name string
args args
want *structs.ServiceConfigResponse
}{
{
name: "proxy with maxinboundsconnections",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &configentry.ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
MaxInboundConnections: 20,
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"max_inbound_connections": 20,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := computeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
false, tt.args.entries, nil)
require.NoError(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
type args struct {
defaults *structs.ServiceConfigResponse

View File

@ -5,12 +5,13 @@ import (
"fmt"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/golang/protobuf/proto"
"google.golang.org/genproto/googleapis/rpc/code"
newproto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
@ -35,7 +36,6 @@ import (
// Each cache.UpdateEvent will contain all instances for a service name.
// If there are no instances in the event, we consider that to be a de-registration.
func makeServiceResponse(
logger hclog.Logger,
mst *MutableStatus,
update cache.UpdateEvent,
) (*pbpeerstream.ReplicationMessage_Response, error) {
@ -87,7 +87,6 @@ func makeServiceResponse(
}
func makeCARootsResponse(
logger hclog.Logger,
update cache.UpdateEvent,
) (*pbpeerstream.ReplicationMessage_Response, error) {
any, _, err := marshalToProtoAny[*pbpeering.PeeringTrustBundle](update.Result)
@ -105,6 +104,24 @@ func makeCARootsResponse(
}, nil
}
func makeServerAddrsResponse(
update cache.UpdateEvent,
) (*pbpeerstream.ReplicationMessage_Response, error) {
any, _, err := marshalToProtoAny[*pbpeering.PeeringServerAddresses](update.Result)
if err != nil {
return nil, fmt.Errorf("failed to marshal: %w", err)
}
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLPeeringServerAddresses,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: "server-addrs",
Operation: pbpeerstream.Operation_OPERATION_UPSERT,
Resource: any,
}, nil
}
// marshalToProtoAny takes any input and returns:
// the protobuf.Any type, the asserted T type, and any errors
// during marshalling or type assertion.
@ -127,7 +144,6 @@ func (s *Server) processResponse(
partition string,
mutableStatus *MutableStatus,
resp *pbpeerstream.ReplicationMessage_Response,
logger hclog.Logger,
) (*pbpeerstream.ReplicationMessage, error) {
if !pbpeerstream.KnownTypeURL(resp.ResourceURL) {
err := fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL)
@ -151,7 +167,7 @@ func (s *Server) processResponse(
), err
}
if err := s.handleUpsert(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, resp.Resource, logger); err != nil {
if err := s.handleUpsert(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, resp.Resource); err != nil {
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
@ -163,7 +179,7 @@ func (s *Server) processResponse(
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
case pbpeerstream.Operation_OPERATION_DELETE:
if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, logger); err != nil {
if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID); err != nil {
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
@ -196,7 +212,6 @@ func (s *Server) handleUpsert(
resourceURL string,
resourceID string,
resource *anypb.Any,
logger hclog.Logger,
) error {
if resource.TypeUrl != resourceURL {
return fmt.Errorf("mismatched resourceURL %q and Any typeUrl %q", resourceURL, resource.TypeUrl)
@ -229,15 +244,23 @@ func (s *Server) handleUpsert(
return s.handleUpsertRoots(peerName, partition, roots)
case pbpeerstream.TypeURLPeeringServerAddresses:
addrs := &pbpeering.PeeringServerAddresses{}
if err := resource.UnmarshalTo(addrs); err != nil {
return fmt.Errorf("failed to unmarshal resource: %w", err)
}
return s.handleUpsertServerAddrs(peerName, partition, addrs)
default:
return fmt.Errorf("unexpected resourceURL: %s", resourceURL)
}
}
// handleUpdateService handles both deletion and upsert events for a service.
// On an UPSERT event:
// - All nodes, services, checks in the input pbNodes are re-applied through Raft.
// - Any nodes, services, or checks in the catalog that were not in the input pbNodes get deleted.
//
// On an UPSERT event:
// - All nodes, services, checks in the input pbNodes are re-applied through Raft.
// - Any nodes, services, or checks in the catalog that were not in the input pbNodes get deleted.
//
// On a DELETE event:
// - A reconciliation against nil or empty input pbNodes leads to deleting all stored catalog resources
@ -449,13 +472,39 @@ func (s *Server) handleUpsertRoots(
return s.Backend.PeeringTrustBundleWrite(req)
}
func (s *Server) handleUpsertServerAddrs(
peerName string,
partition string,
addrs *pbpeering.PeeringServerAddresses,
) error {
q := state.Query{
Value: peerName,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(partition),
}
_, existing, err := s.GetStore().PeeringRead(nil, q)
if err != nil {
return fmt.Errorf("failed to read peering: %w", err)
}
if existing == nil || !existing.IsActive() {
return fmt.Errorf("peering does not exist or has been marked for deletion")
}
// Clone to avoid mutating the existing data
p := proto.Clone(existing).(*pbpeering.Peering)
p.PeerServerAddresses = addrs.GetAddresses()
req := &pbpeering.PeeringWriteRequest{
Peering: p,
}
return s.Backend.PeeringWrite(req)
}
func (s *Server) handleDelete(
peerName string,
partition string,
mutableStatus *MutableStatus,
resourceURL string,
resourceID string,
logger hclog.Logger,
) error {
switch resourceURL {
case pbpeerstream.TypeURLExportedService:

View File

@ -105,6 +105,7 @@ type Backend interface {
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
CatalogRegister(req *structs.RegisterRequest) error
CatalogDeregister(req *structs.DeregisterRequest) error
PeeringWrite(req *pbpeering.PeeringWriteRequest) error
}
// StateStore provides a read-only interface for querying Peering data.

View File

@ -161,8 +161,22 @@ func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamRes
if p == nil {
return grpcstatus.Error(codes.InvalidArgument, "initial subscription for unknown PeerID: "+req.PeerID)
}
if !p.IsActive() {
// If peering is terminated, then our peer sent the termination message.
// For other non-active states, send the termination message.
if p.State != pbpeering.PeeringState_TERMINATED {
term := &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Terminated_{
Terminated: &pbpeerstream.ReplicationMessage_Terminated{},
},
}
logTraceSend(logger, term)
// TODO(peering): If the peering is marked as deleted, send a Terminated message and return
// we don't care if send fails; stream will be killed by termination message or grpc error
_ = stream.Send(term)
}
return grpcstatus.Error(codes.Aborted, "peering is marked as deleted: "+req.PeerID)
}
secrets, err := s.GetStore().PeeringSecretsRead(nil, req.PeerID)
if err != nil {
@ -347,6 +361,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
for _, resourceURL := range []string{
pbpeerstream.TypeURLExportedService,
pbpeerstream.TypeURLPeeringTrustBundle,
pbpeerstream.TypeURLPeeringServerAddresses,
} {
sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
ResourceURL: resourceURL,
@ -544,14 +559,11 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// At this point we have a valid ResourceURL and we are subscribed to it.
switch {
case req.ResponseNonce == "" && req.Error != nil:
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request for a resource type must not contain an error")
case req.ResponseNonce != "" && req.Error == nil: // ACK
case req.Error == nil: // ACK
// TODO(peering): handle ACK fully
status.TrackAck()
case req.ResponseNonce != "" && req.Error != nil: // NACK
case req.Error != nil: // NACK
// TODO(peering): handle NACK fully
logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message)
status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message))
@ -567,7 +579,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
if resp := msg.GetResponse(); resp != nil {
// TODO(peering): Ensure there's a nonce
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp, logger)
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp)
if err != nil {
logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID)
status.TrackRecvError(err.Error())
@ -613,7 +625,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
var resp *pbpeerstream.ReplicationMessage_Response
switch {
case strings.HasPrefix(update.CorrelationID, subExportedService):
resp, err = makeServiceResponse(logger, status, update)
resp, err = makeServiceResponse(status, update)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to create service response", "error", err)
@ -624,13 +636,20 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// TODO(Peering): figure out how to sync this separately
case update.CorrelationID == subCARoot:
resp, err = makeCARootsResponse(logger, update)
resp, err = makeCARootsResponse(update)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to create ca roots response", "error", err)
continue
}
case update.CorrelationID == subServerAddrs:
resp, err = makeServerAddrsResponse(update)
if err != nil {
logger.Error("failed to create server address response", "error", err)
continue
}
default:
logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID)
continue
@ -641,6 +660,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
replResp := makeReplicationResponse(resp)
if err := streamSend(replResp); err != nil {
// note: govet warns of context leak but it is cleaned up in a defer
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
}
}

View File

@ -126,7 +126,7 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) {
// Receive a subscription from a peer. This message arrives while the
// server is a leader and should work.
testutil.RunStep(t, "send subscription request to leader and consume its two requests", func(t *testing.T) {
testutil.RunStep(t, "send subscription request to leader and consume its three requests", func(t *testing.T) {
sub := &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Open_{
Open: &pbpeerstream.ReplicationMessage_Open{
@ -145,6 +145,10 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) {
msg2, err := client.Recv()
require.NoError(t, err)
require.NotEmpty(t, msg2)
msg3, err := client.Recv()
require.NoError(t, err)
require.NotEmpty(t, msg3)
})
// The ACK will be a new request but at this point the server is not the
@ -1126,7 +1130,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
}
srv, store := newTestServer(t, func(c *Config) {
c.incomingHeartbeatTimeout = 5 * time.Millisecond
c.incomingHeartbeatTimeout = 50 * time.Millisecond
})
srv.Tracker.setClock(it.Now)
@ -1312,7 +1316,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
// makeClient sets up a *MockClient with the initial subscription
// message handshake.
func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID string) *MockClient {
func makeClient(t *testing.T, srv *testServer, peerID string) *MockClient {
t.Helper()
client := NewMockClient(context.Background())
@ -1324,7 +1328,7 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
// Pass errors from server handler into ErrCh so that they can be seen by the client on Recv().
// This matches gRPC's behavior when an error is returned by a server.
if err := srv.StreamResources(client.ReplicationStream); err != nil {
errCh <- srv.StreamResources(client.ReplicationStream)
errCh <- err
}
}()
@ -1343,11 +1347,19 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
require.NoError(t, err)
receivedSub2, err := client.Recv()
require.NoError(t, err)
receivedSub3, err := client.Recv()
require.NoError(t, err)
// Issue a services and roots subscription pair to server
// This is required when the client subscribes to server address replication messages.
// We assert for the handler to be called at least once but the data doesn't matter.
srv.mockSnapshotHandler.expect("", 0, 0, nil)
// Issue services, roots, and server address subscription to server.
// Note that server address may not come as an initial message
for _, resourceURL := range []string{
pbpeerstream.TypeURLExportedService,
pbpeerstream.TypeURLPeeringTrustBundle,
pbpeerstream.TypeURLPeeringServerAddresses,
} {
init := &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Request_{
@ -1383,10 +1395,22 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
},
},
},
{
Payload: &pbpeerstream.ReplicationMessage_Request_{
Request: &pbpeerstream.ReplicationMessage_Request{
ResourceURL: pbpeerstream.TypeURLPeeringServerAddresses,
// The PeerID field is only set for the messages coming FROM
// the establishing side and are going to be empty from the
// other side.
PeerID: "",
},
},
},
}
got := []*pbpeerstream.ReplicationMessage{
receivedSub1,
receivedSub2,
receivedSub3,
}
prototest.AssertElementsMatch(t, expect, got)
@ -1443,6 +1467,10 @@ func (b *testStreamBackend) PeeringSecretsWrite(req *pbpeering.SecretsWriteReque
return b.store.PeeringSecretsWrite(1, req)
}
func (b *testStreamBackend) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
return b.store.PeeringWrite(1, req)
}
// CatalogRegister mocks catalog registrations through Raft by copying the logic of FSM.applyRegister.
func (b *testStreamBackend) CatalogRegister(req *structs.RegisterRequest) error {
return b.store.EnsureRegistration(1, req)
@ -1496,7 +1524,7 @@ func Test_makeServiceResponse_ExportedServicesCount(t *testing.T) {
},
},
}}
_, err := makeServiceResponse(srv.Logger, mst, update)
_, err := makeServiceResponse(mst, update)
require.NoError(t, err)
require.Equal(t, 1, mst.GetExportedServicesCount())
@ -1508,7 +1536,7 @@ func Test_makeServiceResponse_ExportedServicesCount(t *testing.T) {
Result: &pbservice.IndexedCheckServiceNodes{
Nodes: []*pbservice.CheckServiceNode{},
}}
_, err := makeServiceResponse(srv.Logger, mst, update)
_, err := makeServiceResponse(mst, update)
require.NoError(t, err)
require.Equal(t, 0, mst.GetExportedServicesCount())
@ -1539,7 +1567,7 @@ func Test_processResponse_Validation(t *testing.T) {
require.NoError(t, err)
run := func(t *testing.T, tc testCase) {
reply, err := srv.processResponse(peerName, "", mst, tc.in, srv.Logger)
reply, err := srv.processResponse(peerName, "", mst, tc.in)
if tc.wantErr {
require.Error(t, err)
} else {
@ -1865,7 +1893,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) {
}
// Simulate an update arriving for billing/api.
_, err = srv.processResponse(peerName, acl.DefaultPartitionName, mst, in, srv.Logger)
_, err = srv.processResponse(peerName, acl.DefaultPartitionName, mst, in)
require.NoError(t, err)
for svc, expect := range tc.expect {
@ -2731,11 +2759,16 @@ func requireEqualInstances(t *testing.T, expect, got structs.CheckServiceNodes)
type testServer struct {
*Server
// mockSnapshotHandler is solely used for handling autopilot events
// which don't come from the state store.
mockSnapshotHandler *mockSnapshotHandler
}
func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.Store) {
t.Helper()
publisher := stream.NewEventPublisher(10 * time.Second)
store := newStateStore(t, publisher)
store, handler := newStateStore(t, publisher)
ports := freeport.GetN(t, 1) // {grpc}
@ -2771,7 +2804,8 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
t.Cleanup(grpcServer.Stop)
return &testServer{
Server: srv,
Server: srv,
mockSnapshotHandler: handler,
}, store
}

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
@ -12,6 +13,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
@ -42,6 +44,7 @@ type subscriptionManager struct {
getStore func() StateStore
serviceSubReady <-chan struct{}
trustBundlesSubReady <-chan struct{}
serverAddrsSubReady <-chan struct{}
}
// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering.
@ -67,6 +70,7 @@ func newSubscriptionManager(
getStore: getStore,
serviceSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLExportedService),
trustBundlesSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringTrustBundle),
serverAddrsSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringServerAddresses),
}
}
@ -83,6 +87,7 @@ func (m *subscriptionManager) subscribe(ctx context.Context, peerID, peerName, p
// Wrap our bare state store queries in goroutines that emit events.
go m.notifyExportedServicesForPeerID(ctx, state, peerID)
go m.notifyServerAddrUpdates(ctx, state.updateCh)
if m.config.ConnectEnabled {
go m.notifyMeshGatewaysForPartition(ctx, state, state.partition)
// If connect is enabled, watch for updates to CA roots.
@ -262,6 +267,17 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
state.sendPendingEvents(ctx, m.logger, pending)
case u.CorrelationID == subServerAddrs:
addrs, ok := u.Result.(*pbpeering.PeeringServerAddresses)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
pending := &pendingPayload{}
if err := pending.Add(serverAddrsPayloadID, u.CorrelationID, addrs); err != nil {
return err
}
state.sendPendingEvents(ctx, m.logger, pending)
default:
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
}
@ -333,6 +349,8 @@ func (m *subscriptionManager) notifyRootCAUpdatesForPartition(
}
}
const subCARoot = "roots"
// subscribeCARoots subscribes to state.EventTopicCARoots for changes to CA roots.
// Upon receiving an event it will send the payload in updateCh.
func (m *subscriptionManager) subscribeCARoots(
@ -414,8 +432,6 @@ func (m *subscriptionManager) subscribeCARoots(
}
}
const subCARoot = "roots"
func (m *subscriptionManager) syncNormalServices(
ctx context.Context,
state *subscriptionState,
@ -721,3 +737,112 @@ const syntheticProxyNameSuffix = "-sidecar-proxy"
func generateProxyNameForDiscoveryChain(sn structs.ServiceName) structs.ServiceName {
return structs.NewServiceName(sn.Name+syntheticProxyNameSuffix, &sn.EnterpriseMeta)
}
const subServerAddrs = "server-addrs"
func (m *subscriptionManager) notifyServerAddrUpdates(
ctx context.Context,
updateCh chan<- cache.UpdateEvent,
) {
// Wait until this is subscribed-to.
select {
case <-m.serverAddrsSubReady:
case <-ctx.Done():
return
}
var idx uint64
// TODO(peering): retry logic; fail past a threshold
for {
var err error
// Typically, this function will block inside `m.subscribeServerAddrs` and only return on error.
// Errors are logged and the watch is retried.
idx, err = m.subscribeServerAddrs(ctx, idx, updateCh)
if errors.Is(err, stream.ErrSubForceClosed) {
m.logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt resume")
} else if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
m.logger.Warn("failed to subscribe to server addresses, will attempt resume", "error", err.Error())
} else {
m.logger.Trace(err.Error())
}
select {
case <-ctx.Done():
return
default:
}
}
}
func (m *subscriptionManager) subscribeServerAddrs(
ctx context.Context,
idx uint64,
updateCh chan<- cache.UpdateEvent,
) (uint64, error) {
// following code adapted from serverdiscovery/watch_servers.go
sub, err := m.backend.Subscribe(&stream.SubscribeRequest{
Topic: autopilotevents.EventTopicReadyServers,
Subject: stream.SubjectNone,
Token: "", // using anonymous token for now
Index: idx,
})
if err != nil {
return 0, fmt.Errorf("failed to subscribe to ReadyServers events: %w", err)
}
defer sub.Unsubscribe()
for {
event, err := sub.Next(ctx)
switch {
case errors.Is(err, context.Canceled):
return 0, err
case err != nil:
return idx, err
}
// We do not send framing events (e.g. EndOfSnapshot, NewSnapshotToFollow)
// because we send a full list of ready servers on every event, rather than expecting
// clients to maintain a state-machine in the way they do for service health.
if event.IsFramingEvent() {
continue
}
// Note: this check isn't strictly necessary because the event publishing
// machinery will ensure the index increases monotonically, but it can be
// tricky to faithfully reproduce this in tests (e.g. the EventPublisher
// garbage collects topic buffers and snapshots aggressively when streams
// disconnect) so this avoids a bunch of confusing setup code.
if event.Index <= idx {
continue
}
idx = event.Index
payload, ok := event.Payload.(autopilotevents.EventPayloadReadyServers)
if !ok {
return 0, fmt.Errorf("unexpected event payload type: %T", payload)
}
var serverAddrs = make([]string, 0, len(payload))
for _, srv := range payload {
if srv.ExtGRPCPort == 0 {
continue
}
grpcAddr := srv.Address + ":" + strconv.Itoa(srv.ExtGRPCPort)
serverAddrs = append(serverAddrs, grpcAddr)
}
if len(serverAddrs) == 0 {
m.logger.Warn("did not find any server addresses with external gRPC ports to publish")
continue
}
updateCh <- cache.UpdateEvent{
CorrelationID: subServerAddrs,
Result: &pbpeering.PeeringServerAddresses{
Addresses: serverAddrs,
},
}
}
}

View File

@ -3,14 +3,17 @@ package peerstream
import (
"context"
"sort"
"sync"
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
@ -627,20 +630,100 @@ func TestSubscriptionManager_CARoots(t *testing.T) {
})
}
func TestSubscriptionManager_ServerAddrs(t *testing.T) {
backend := newTestSubscriptionBackend(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
// Create a peering
_, id := backend.ensurePeering(t, "my-peering")
partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty()
payload := autopilotevents.EventPayloadReadyServers{
autopilotevents.ReadyServerInfo{
ID: "9aeb73f6-e83e-43c1-bdc9-ca5e43efe3e4",
Address: "198.18.0.1",
Version: "1.13.1",
ExtGRPCPort: 8502,
},
}
// mock handler only gets called once during the initial subscription
backend.handler.expect("", 0, 1, payload)
// Only configure a tracker for server address events.
tracker := newResourceSubscriptionTracker()
tracker.Subscribe(pbpeerstream.TypeURLPeeringServerAddresses)
mgr := newSubscriptionManager(ctx,
testutil.Logger(t),
Config{
Datacenter: "dc1",
ConnectEnabled: true,
},
connect.TestTrustDomain,
backend,
func() StateStore {
return backend.store
},
tracker)
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
testutil.RunStep(t, "initial events", func(t *testing.T) {
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, subServerAddrs, got.CorrelationID)
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
require.True(t, ok)
require.Equal(t, []string{"198.18.0.1:8502"}, addrs.GetAddresses())
},
)
})
testutil.RunStep(t, "added server", func(t *testing.T) {
payload = append(payload, autopilotevents.ReadyServerInfo{
ID: "eec8721f-c42b-48da-a5a5-07565158015e",
Address: "198.18.0.2",
Version: "1.13.1",
ExtGRPCPort: 9502,
})
backend.Publish([]stream.Event{
{
Topic: autopilotevents.EventTopicReadyServers,
Index: 2,
Payload: payload,
},
})
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, subServerAddrs, got.CorrelationID)
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
require.True(t, ok)
require.Equal(t, []string{"198.18.0.1:8502", "198.18.0.2:9502"}, addrs.GetAddresses())
},
)
})
}
type testSubscriptionBackend struct {
state.EventPublisher
store *state.Store
store *state.Store
handler *mockSnapshotHandler
lastIdx uint64
}
func newTestSubscriptionBackend(t *testing.T) *testSubscriptionBackend {
publisher := stream.NewEventPublisher(10 * time.Second)
store := newStateStore(t, publisher)
store, handler := newStateStore(t, publisher)
backend := &testSubscriptionBackend{
EventPublisher: publisher,
store: store,
handler: handler,
}
backend.ensureCAConfig(t, &structs.CAConfiguration{
@ -739,20 +822,35 @@ func setupTestPeering(t *testing.T, store *state.Store, name string, index uint6
return p.ID
}
func newStateStore(t *testing.T, publisher *stream.EventPublisher) *state.Store {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
func newStateStore(t *testing.T, publisher *stream.EventPublisher) (*state.Store, *mockSnapshotHandler) {
gc, err := state.NewTombstoneGC(time.Second, time.Millisecond)
require.NoError(t, err)
handler := newMockSnapshotHandler(t)
store := state.NewStateStoreWithEventPublisher(gc, publisher)
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot, false))
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot, false))
require.NoError(t, publisher.RegisterHandler(state.EventTopicCARoots, store.CARootsSnapshot, false))
go publisher.Run(ctx)
require.NoError(t, publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, handler.handle, false))
return store
// WaitGroup used to make sure that the publisher returns
// before handler's t.Cleanup is called (otherwise an event
// might fire during an assertion and cause a data race).
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(func() {
cancel()
wg.Wait()
})
wg.Add(1)
go func() {
publisher.Run(ctx)
wg.Done()
}()
return store, handler
}
func expectEvents(
@ -870,3 +968,39 @@ func pbCheck(node, svcID, svcName, status string, entMeta *pbcommon.EnterpriseMe
EnterpriseMeta: entMeta,
}
}
// mockSnapshotHandler is copied from server_discovery/server_test.go
type mockSnapshotHandler struct {
mock.Mock
}
func newMockSnapshotHandler(t *testing.T) *mockSnapshotHandler {
handler := &mockSnapshotHandler{}
t.Cleanup(func() {
handler.AssertExpectations(t)
})
return handler
}
func (m *mockSnapshotHandler) handle(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
ret := m.Called(req, buf)
return ret.Get(0).(uint64), ret.Error(1)
}
func (m *mockSnapshotHandler) expect(token string, requestIndex uint64, eventIndex uint64, payload autopilotevents.EventPayloadReadyServers) {
m.On("handle", stream.SubscribeRequest{
Topic: autopilotevents.EventTopicReadyServers,
Subject: stream.SubjectNone,
Token: token,
Index: requestIndex,
}, mock.Anything).Run(func(args mock.Arguments) {
buf := args.Get(1).(stream.SnapshotAppender)
buf.Append([]stream.Event{
{
Topic: autopilotevents.EventTopicReadyServers,
Index: eventIndex,
Payload: payload,
},
})
}).Return(eventIndex, nil)
}

View File

@ -93,6 +93,9 @@ func (s *subscriptionState) cleanupEventVersions(logger hclog.Logger) {
case id == caRootsPayloadID:
keep = true
case id == serverAddrsPayloadID:
keep = true
case strings.HasPrefix(id, servicePayloadIDPrefix):
name := strings.TrimPrefix(id, servicePayloadIDPrefix)
sn := structs.ServiceNameFromString(name)
@ -129,6 +132,7 @@ type pendingEvent struct {
}
const (
serverAddrsPayloadID = "server-addrs"
caRootsPayloadID = "roots"
meshGatewayPayloadID = "mesh-gateway"
servicePayloadIDPrefix = "service:"

View File

@ -81,6 +81,10 @@ type HTTPHandlers struct {
configReloaders []ConfigReloader
h http.Handler
metricsProxyCfg atomic.Value
// proxyTransport is used by UIMetricsProxy to keep
// a managed pool of connections.
proxyTransport http.RoundTripper
}
// endpoint is a Consul-specific HTTP handler that takes the usual arguments in

View File

@ -4,11 +4,9 @@ import (
"context"
"fmt"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/consul/stream"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/submatview"
@ -17,15 +15,16 @@ import (
"github.com/hashicorp/consul/proto/pbsubscribe"
)
// ServerDataSourceDeps contains the dependencies needed for sourcing data from
// server-local sources (e.g. materialized views).
type ServerDataSourceDeps struct {
Datacenter string
ViewStore *submatview.Store
EventPublisher *stream.EventPublisher
Logger hclog.Logger
ACLResolver submatview.ACLResolver
GetStore func() Store
// CacheConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
// data from the agent cache.
func CacheConfigEntry(c *cache.Cache) proxycfg.ConfigEntry {
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryName}
}
// CacheConfigEntryList satisfies the proxycfg.ConfigEntryList interface by
// sourcing data from the agent cache.
func CacheConfigEntryList(c *cache.Cache) proxycfg.ConfigEntryList {
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryListName}
}
// ServerConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing

View File

@ -3,20 +3,35 @@ package proxycfgglue
import (
"context"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/consul/watch"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/submatview"
)
// ServerDataSourceDeps contains the dependencies needed for sourcing data from
// server-local sources (e.g. materialized views).
type ServerDataSourceDeps struct {
Datacenter string
ViewStore *submatview.Store
EventPublisher *stream.EventPublisher
Logger hclog.Logger
ACLResolver submatview.ACLResolver
GetStore func() Store
}
// Store is the state store interface required for server-local data sources.
type Store interface {
watch.StateStore
@ -25,7 +40,9 @@ type Store interface {
FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error)
GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error)
IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error)
ReadResolvedServiceConfigEntries(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, upstreamIDs []structs.ServiceID, proxyMode structs.ProxyMode) (uint64, *configentry.ResolvedServiceConfigSet, error)
ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error)
ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error)
PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
@ -34,24 +51,18 @@ type Store interface {
// CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from
// the agent cache.
//
// Note: there isn't a server-local equivalent of this data source because
// "agentless" proxies obtain certificates via SDS served by consul-dataplane.
func CacheCARoots(c *cache.Cache) proxycfg.CARoots {
return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ConnectCARootName}
}
// CacheConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
// data from the agent cache.
func CacheConfigEntry(c *cache.Cache) proxycfg.ConfigEntry {
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryName}
}
// CacheConfigEntryList satisfies the proxycfg.ConfigEntryList interface by
// sourcing data from the agent cache.
func CacheConfigEntryList(c *cache.Cache) proxycfg.ConfigEntryList {
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryListName}
}
// CacheDatacenters satisfies the proxycfg.Datacenters interface by sourcing
// data from the agent cache.
//
// Note: there isn't a server-local equivalent of this data source because it
// relies on polling (so a more efficient method isn't available).
func CacheDatacenters(c *cache.Cache) proxycfg.Datacenters {
return &cacheProxyDataSource[*structs.DatacentersRequest]{c, cachetype.CatalogDatacentersName}
}
@ -64,46 +75,31 @@ func CacheServiceGateways(c *cache.Cache) proxycfg.GatewayServices {
// CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing
// data from the agent cache.
//
// Note: there isn't a server-local equivalent of this data source because only
// services registered to the local agent can be health checked by it.
func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks {
return &cacheProxyDataSource[*cachetype.ServiceHTTPChecksRequest]{c, cachetype.ServiceHTTPChecksName}
}
// CacheIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
// by sourcing data from the agent cache.
func CacheIntentionUpstreams(c *cache.Cache) proxycfg.IntentionUpstreams {
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsName}
}
// CacheIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreamsDestination interface
// by sourcing data from the agent cache.
func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstreams {
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsDestinationName}
}
// CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump
// interface by sourcing data from the agent cache.
func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump {
return &cacheProxyDataSource[*structs.ServiceDumpRequest]{c, cachetype.InternalServiceDumpName}
}
// CacheLeafCertificate satisifies the proxycfg.LeafCertificate interface by
// sourcing data from the agent cache.
//
// Note: there isn't a server-local equivalent of this data source because
// "agentless" proxies obtain certificates via SDS served by consul-dataplane.
func CacheLeafCertificate(c *cache.Cache) proxycfg.LeafCertificate {
return &cacheProxyDataSource[*cachetype.ConnectCALeafRequest]{c, cachetype.ConnectCALeafName}
}
// CachePrepraredQuery satisfies the proxycfg.PreparedQuery interface by
// sourcing data from the agent cache.
//
// Note: there isn't a server-local equivalent of this data source because it
// relies on polling (so a more efficient method isn't available).
func CachePrepraredQuery(c *cache.Cache) proxycfg.PreparedQuery {
return &cacheProxyDataSource[*structs.PreparedQueryExecuteRequest]{c, cachetype.PreparedQueryName}
}
// CacheResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
// interface by sourcing data from the agent cache.
func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig {
return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName}
}
// cacheProxyDataSource implements a generic wrapper around the agent cache to
// provide data to the proxycfg.Manager.
type cacheProxyDataSource[ReqType cache.Request] struct {
@ -131,6 +127,15 @@ func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
}
}
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
return func(ctx context.Context, correlationID string, result ResultType, err error) {
select {
case ch <- newUpdateEvent(correlationID, result, err):
case <-ctx.Done():
}
}
}
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
if acl.IsErrNotFound(err) {

View File

@ -5,20 +5,45 @@ import (
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul/watch"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
)
// CacheIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
// by sourcing upstreams for the given service, inferred from intentions, from
// the agent cache.
func CacheIntentionUpstreams(c *cache.Cache) proxycfg.IntentionUpstreams {
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsName}
}
// CacheIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreams
// interface by sourcing upstreams for the given destination, inferred from
// intentions, from the agent cache.
func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstreams {
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsDestinationName}
}
// ServerIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
// by sourcing data from a blocking query against the server's state store.
// by sourcing upstreams for the given service, inferred from intentions, from
// the server's state store.
func ServerIntentionUpstreams(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams {
return serverIntentionUpstreams{deps}
return serverIntentionUpstreams{deps, structs.IntentionTargetService}
}
// ServerIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreams
// interface by sourcing upstreams for the given destination, inferred from
// intentions, from the server's state store.
func ServerIntentionUpstreamsDestination(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams {
return serverIntentionUpstreams{deps, structs.IntentionTargetDestination}
}
type serverIntentionUpstreams struct {
deps ServerDataSourceDeps
deps ServerDataSourceDeps
target structs.IntentionTargetType
}
func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
@ -32,7 +57,7 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
}
defaultDecision := authz.IntentionDefaultAllow(nil)
index, services, err := store.IntentionTopology(ws, target, false, defaultDecision, structs.IntentionTargetService)
index, services, err := store.IntentionTopology(ws, target, false, defaultDecision, s.target)
if err != nil {
return 0, nil, err
}
@ -51,12 +76,3 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
dispatchBlockingQueryUpdate[*structs.IndexedServiceList](ch),
)
}
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
return func(ctx context.Context, correlationID string, result ResultType, err error) {
select {
case ch <- newUpdateEvent(correlationID, result, err):
case <-ctx.Done():
}
}
}

View File

@ -0,0 +1,99 @@
package proxycfgglue
import (
"context"
"fmt"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul/watch"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
)
// CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump
// interface by sourcing data from the agent cache.
func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump {
return &cacheInternalServiceDump{c}
}
// cacheInternalServiceDump wraps the underlying cache-type to return a simpler
// subset of the response (as this is all we use in proxycfg).
type cacheInternalServiceDump struct {
c *cache.Cache
}
func (c *cacheInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
dispatch := dispatchCacheUpdate(ch)
return c.c.NotifyCallback(ctx, cachetype.InternalServiceDumpName, req, correlationID,
func(ctx context.Context, event cache.UpdateEvent) {
if r, _ := event.Result.(*structs.IndexedNodesWithGateways); r != nil {
event.Result = &structs.IndexedCheckServiceNodes{
Nodes: r.Nodes,
QueryMeta: r.QueryMeta,
}
}
dispatch(ctx, event)
})
}
// ServerInternalServiceDump satisfies the proxycfg.InternalServiceDump
// interface by sourcing data from a blocking query against the server's
// state store.
func ServerInternalServiceDump(deps ServerDataSourceDeps, remoteSource proxycfg.InternalServiceDump) proxycfg.InternalServiceDump {
return &serverInternalServiceDump{deps, remoteSource}
}
type serverInternalServiceDump struct {
deps ServerDataSourceDeps
remoteSource proxycfg.InternalServiceDump
}
func (s *serverInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
if req.Datacenter != s.deps.Datacenter {
return s.remoteSource.Notify(ctx, req, correlationID, ch)
}
filter, err := bexpr.CreateFilter(req.Filter, nil, structs.CheckServiceNodes{})
if err != nil {
return err
}
// This is just the small subset of the Internal.ServiceDump RPC handler used
// by proxycfg.
return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore,
func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedCheckServiceNodes, error) {
authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil)
if err != nil {
return 0, nil, err
}
idx, nodes, err := store.ServiceDump(ws, req.ServiceKind, req.UseServiceKind, &req.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return 0, nil, err
}
raw, err := filter.Execute(nodes)
if err != nil {
return 0, nil, fmt.Errorf("could not filter local service dump: %w", err)
}
nodes = raw.(structs.CheckServiceNodes)
aclfilter.New(authz, s.deps.Logger).Filter(&nodes)
return idx, &structs.IndexedCheckServiceNodes{
Nodes: nodes,
QueryMeta: structs.QueryMeta{
Index: idx,
Backend: structs.QueryBackendBlocking,
},
}, nil
},
dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch),
)
}

View File

@ -0,0 +1,139 @@
package proxycfgglue
import (
"context"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
)
func TestServerInternalServiceDump(t *testing.T) {
t.Run("remote queries are delegated to the remote source", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var (
req = &structs.ServiceDumpRequest{Datacenter: "dc2"}
correlationID = "correlation-id"
ch = make(chan<- proxycfg.UpdateEvent)
result = errors.New("KABOOM")
)
remoteSource := newMockInternalServiceDump(t)
remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result)
dataSource := ServerInternalServiceDump(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource)
err := dataSource.Notify(ctx, req, correlationID, ch)
require.Equal(t, result, err)
})
t.Run("local queries are served from the state store", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
nextIndex := indexGenerator()
store := state.NewStateStore(nil)
services := []*structs.NodeService{
{
Service: "mgw",
Kind: structs.ServiceKindMeshGateway,
},
{
Service: "web",
Kind: structs.ServiceKindTypical,
},
{
Service: "db",
Kind: structs.ServiceKindTypical,
},
}
for idx, service := range services {
require.NoError(t, store.EnsureRegistration(nextIndex(), &structs.RegisterRequest{
Node: fmt.Sprintf("node-%d", idx),
Service: service,
}))
}
authz := newStaticResolver(
policyAuthorizer(t, `
service "mgw" { policy = "read" }
service "web" { policy = "read" }
service "db" { policy = "read" }
node_prefix "node-" { policy = "read" }
`),
)
dataSource := ServerInternalServiceDump(ServerDataSourceDeps{
GetStore: func() Store { return store },
ACLResolver: authz,
}, nil)
t.Run("filter by kind", func(t *testing.T) {
eventCh := make(chan proxycfg.UpdateEvent)
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
ServiceKind: structs.ServiceKindMeshGateway,
UseServiceKind: true,
}, "", eventCh))
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
require.Len(t, result.Nodes, 1)
require.Equal(t, "mgw", result.Nodes[0].Service.Service)
})
t.Run("bexpr filtering", func(t *testing.T) {
eventCh := make(chan proxycfg.UpdateEvent)
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
QueryOptions: structs.QueryOptions{Filter: `Service.Service == "web"`},
}, "", eventCh))
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
require.Len(t, result.Nodes, 1)
require.Equal(t, "web", result.Nodes[0].Service.Service)
})
t.Run("all services", func(t *testing.T) {
eventCh := make(chan proxycfg.UpdateEvent)
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{}, "", eventCh))
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
require.Len(t, result.Nodes, 3)
})
t.Run("access denied", func(t *testing.T) {
authz.SwapAuthorizer(acl.DenyAll())
eventCh := make(chan proxycfg.UpdateEvent)
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{}, "", eventCh))
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
require.Empty(t, result.Nodes)
})
})
}
func newMockInternalServiceDump(t *testing.T) *mockInternalServiceDump {
mock := &mockInternalServiceDump{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
type mockInternalServiceDump struct {
mock.Mock
}
func (m *mockInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
return m.Called(ctx, req, correlationID, ch).Error(0)
}

View File

@ -0,0 +1,70 @@
package proxycfgglue
import (
"context"
"errors"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/watch"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
)
// CacheResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
// interface by sourcing data from the agent cache.
func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig {
return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName}
}
// ServerResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
// interface by sourcing data from a blocking query against the server's state
// store.
func ServerResolvedServiceConfig(deps ServerDataSourceDeps, remoteSource proxycfg.ResolvedServiceConfig) proxycfg.ResolvedServiceConfig {
return &serverResolvedServiceConfig{deps, remoteSource}
}
type serverResolvedServiceConfig struct {
deps ServerDataSourceDeps
remoteSource proxycfg.ResolvedServiceConfig
}
func (s *serverResolvedServiceConfig) Notify(ctx context.Context, req *structs.ServiceConfigRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
if req.Datacenter != s.deps.Datacenter {
return s.remoteSource.Notify(ctx, req, correlationID, ch)
}
if len(req.Upstreams) != 0 {
return errors.New("ServerResolvedServiceConfig does not support the legacy Upstreams parameter")
}
return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore,
func(ws memdb.WatchSet, store Store) (uint64, *structs.ServiceConfigResponse, error) {
authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil)
if err != nil {
return 0, nil, err
}
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(req.Name, nil); err != nil {
return 0, nil, err
}
idx, entries, err := store.ReadResolvedServiceConfigEntries(ws, req.Name, &req.EnterpriseMeta, req.UpstreamIDs, req.Mode)
if err != nil {
return 0, nil, err
}
reply, err := configentry.ComputeResolvedServiceConfig(req, req.UpstreamIDs, false, entries, s.deps.Logger)
if err != nil {
return 0, nil, err
}
reply.Index = idx
return idx, reply, nil
},
dispatchBlockingQueryUpdate[*structs.ServiceConfigResponse](ch),
)
}

View File

@ -0,0 +1,116 @@
package proxycfgglue
import (
"context"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestServerResolvedServiceConfig(t *testing.T) {
t.Run("remote queries are delegated to the remote source", func(t *testing.T) {
var (
ctx = context.Background()
req = &structs.ServiceConfigRequest{Datacenter: "dc2"}
correlationID = "correlation-id"
ch = make(chan<- proxycfg.UpdateEvent)
result = errors.New("KABOOM")
)
remoteSource := newMockResolvedServiceConfig(t)
remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result)
dataSource := ServerResolvedServiceConfig(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource)
err := dataSource.Notify(ctx, req, correlationID, ch)
require.Equal(t, result, err)
})
t.Run("local queries are served from the state store", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
const (
serviceName = "web"
datacenter = "dc1"
)
store := state.NewStateStore(nil)
nextIndex := indexGenerator()
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ServiceConfigEntry{
Name: serviceName,
Protocol: "http",
}))
authz := newStaticResolver(
policyAuthorizer(t, fmt.Sprintf(`service "%s" { policy = "read" }`, serviceName)),
)
dataSource := ServerResolvedServiceConfig(ServerDataSourceDeps{
Datacenter: datacenter,
ACLResolver: authz,
GetStore: func() Store { return store },
}, nil)
eventCh := make(chan proxycfg.UpdateEvent)
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceConfigRequest{Datacenter: datacenter, Name: serviceName}, "", eventCh))
testutil.RunStep(t, "initial state", func(t *testing.T) {
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
require.Equal(t, map[string]any{"protocol": "http"}, result.ProxyConfig)
})
testutil.RunStep(t, "write proxy defaults", func(t *testing.T) {
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ProxyConfigEntry{
Name: structs.ProxyConfigGlobal,
Mode: structs.ProxyModeDirect,
}))
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
require.Equal(t, structs.ProxyModeDirect, result.Mode)
})
testutil.RunStep(t, "delete service config", func(t *testing.T) {
require.NoError(t, store.DeleteConfigEntry(nextIndex(), structs.ServiceDefaults, serviceName, nil))
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
require.Empty(t, result.ProxyConfig)
})
testutil.RunStep(t, "revoke access", func(t *testing.T) {
authz.SwapAuthorizer(acl.DenyAll())
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ServiceConfigEntry{
Name: serviceName,
Protocol: "http",
}))
expectNoEvent(t, eventCh)
})
})
}
func newMockResolvedServiceConfig(t *testing.T) *mockResolvedServiceConfig {
mock := &mockResolvedServiceConfig{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
type mockResolvedServiceConfig struct {
mock.Mock
}
func (m *mockResolvedServiceConfig) Notify(ctx context.Context, req *structs.ServiceConfigRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
return m.Called(ctx, req, correlationID, ch).Error(0)
}

View File

@ -89,10 +89,10 @@ type DataSources struct {
// IntentionUpstreamsDestination provides intention-inferred upstream updates on a
// notification channel.
IntentionUpstreamsDestination IntentionUpstreamsDestination
IntentionUpstreamsDestination IntentionUpstreams
// InternalServiceDump provides updates about a (gateway) service on a
// notification channel.
// InternalServiceDump provides updates about services of a given kind (e.g.
// mesh gateways) on a notification channel.
InternalServiceDump InternalServiceDump
// LeafCertificate provides updates about the service's leaf certificate on a
@ -197,14 +197,8 @@ type IntentionUpstreams interface {
Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error
}
// IntentionUpstreamsDestination is the interface used to consume updates about upstreams destination
// inferred from service intentions.
type IntentionUpstreamsDestination interface {
Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error
}
// InternalServiceDump is the interface used to consume updates about a (gateway)
// service via the internal ServiceDump RPC.
// InternalServiceDump is the interface used to consume updates about services
// of a given kind (e.g. mesh gateways).
type InternalServiceDump interface {
Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- UpdateEvent) error
}

View File

@ -491,7 +491,7 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u UpdateEvent, sn
}
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
resp, ok := u.Result.(*structs.IndexedNodesWithGateways)
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}

View File

@ -927,7 +927,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
events: []UpdateEvent{
{
CorrelationID: "mesh-gateway:dc4",
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC4Hostname(t),
},
Err: nil,

View File

@ -974,7 +974,7 @@ func NewTestDataSources() *TestDataSources {
Intentions: NewTestDataSource[*structs.ServiceSpecificRequest, structs.Intentions](),
IntentionUpstreams: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
IntentionUpstreamsDestination: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
InternalServiceDump: NewTestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways](),
InternalServiceDump: NewTestDataSource[*structs.ServiceDumpRequest, *structs.IndexedCheckServiceNodes](),
LeafCertificate: NewTestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert](),
PreparedQuery: NewTestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse](),
ResolvedServiceConfig: NewTestDataSource[*structs.ServiceConfigRequest, *structs.ServiceConfigResponse](),
@ -1000,7 +1000,7 @@ type TestDataSources struct {
Intentions *TestDataSource[*structs.ServiceSpecificRequest, structs.Intentions]
IntentionUpstreams *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
IntentionUpstreamsDestination *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
InternalServiceDump *TestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways]
InternalServiceDump *TestDataSource[*structs.ServiceDumpRequest, *structs.IndexedCheckServiceNodes]
LeafCertificate *TestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert]
PeeredUpstreams *TestDataSource[*structs.PartitionSpecificRequest, *structs.IndexedPeeredServiceList]
PreparedQuery *TestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse]

View File

@ -316,19 +316,19 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st
baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{
{
CorrelationID: "mesh-gateway:dc2",
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC2(t),
},
},
{
CorrelationID: "mesh-gateway:dc4",
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC4Hostname(t),
},
},
{
CorrelationID: "mesh-gateway:dc6",
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC6Hostname(t),
},
},
@ -376,7 +376,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st
// Have the cross-dc query mechanism not work for dc2 so
// fedstates will infill.
CorrelationID: "mesh-gateway:dc2",
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: nil,
},
},

View File

@ -69,7 +69,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
})
events = append(events, UpdateEvent{
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC2(t),
},
})
@ -114,13 +114,13 @@ func setupTestVariationConfigEntriesAndSnapshot(
})
events = append(events, UpdateEvent{
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC2(t),
},
})
events = append(events, UpdateEvent{
CorrelationID: "mesh-gateway:dc3:" + dbUID.String(),
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC3(t),
},
})
@ -141,7 +141,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
})
events = append(events, UpdateEvent{
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC1(t),
},
})
@ -168,7 +168,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
})
events = append(events, UpdateEvent{
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
Result: &structs.IndexedNodesWithGateways{
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestGatewayNodesDC1(t),
},
})

View File

@ -186,7 +186,7 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
}
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
resp, ok := u.Result.(*structs.IndexedNodesWithGateways)
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}

View File

@ -2,6 +2,7 @@ package agent
import (
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/ipaddr"
@ -13,6 +14,10 @@ func sidecarServiceID(serviceID string) string {
return serviceID + "-sidecar-proxy"
}
func serviceIDFromSidecarID(sidecarServiceID string) string {
return strings.Split(sidecarServiceID, "-")[0]
}
// sidecarServiceFromNodeService returns a *structs.NodeService representing a
// sidecar service with all defaults populated based on the current agent
// config.
@ -30,7 +35,7 @@ func sidecarServiceID(serviceID string) string {
// registration. This will be the same as the token parameter passed unless the
// SidecarService definition contains a distinct one.
// TODO: return AddServiceRequest
func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token string) (*structs.NodeService, []*structs.CheckType, string, error) {
func sidecarServiceFromNodeService(ns *structs.NodeService, token string) (*structs.NodeService, []*structs.CheckType, string, error) {
if ns.Connect.SidecarService == nil {
return nil, nil, "", nil
}
@ -114,41 +119,18 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
}
}
if sidecar.Port < 1 {
port, err := a.sidecarPortFromServiceID(sidecar.CompoundServiceID())
if err != nil {
return nil, nil, "", err
}
sidecar.Port = port
}
// Setup checks
checks, err := ns.Connect.SidecarService.CheckTypes()
if err != nil {
return nil, nil, "", err
}
// Setup default check if none given.
if len(checks) < 1 {
// The check should use the sidecar's address because it makes a request to the sidecar.
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
// (which in most cases it is because it's running as a sidecar in the same network).
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
// process of fixing #14433.
checkAddress := sidecar.Address
if checkAddress == "" {
checkAddress = sidecar.Proxy.LocalServiceAddress
}
checks = sidecarDefaultChecks(ns.ID, checkAddress, sidecar.Port)
}
return sidecar, checks, token, nil
}
// sidecarPortFromServiceID is used to allocate a unique port for a sidecar proxy.
// sidecarPortFromServiceIDLocked is used to allocate a unique port for a sidecar proxy.
// This is called immediately before registration to avoid value collisions. This function assumes the state lock is already held.
func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.ServiceID) (int, error) {
func (a *Agent) sidecarPortFromServiceIDLocked(sidecarCompoundServiceID structs.ServiceID) (int, error) {
sidecarPort := 0
// Allocate port if needed (min and max inclusive).
@ -213,11 +195,23 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic
return sidecarPort, nil
}
func sidecarDefaultChecks(serviceID string, address string, port int) []*structs.CheckType {
func sidecarDefaultChecks(sidecarID string, sidecarAddress string, proxyServiceAddress string, port int) []*structs.CheckType {
// The check should use the sidecar's address because it makes a request to the sidecar.
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
// (which in most cases it is because it's running as a sidecar in the same network).
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
// process of fixing #14433.
checkAddress := sidecarAddress
if checkAddress == "" {
checkAddress = proxyServiceAddress
}
serviceID := serviceIDFromSidecarID(sidecarID)
return []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: ipaddr.FormatAddressPort(address, port),
TCP: ipaddr.FormatAddressPort(checkAddress, port),
Interval: 10 * time.Second,
},
{

View File

@ -54,7 +54,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Port: 0,
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
@ -63,18 +63,8 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
wantChecks: nil,
wantToken: "foo",
},
{
name: "all the allowed overrides",
@ -157,7 +147,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Port: 0,
Tags: []string{"foo"},
Meta: map[string]string{"foo": "bar"},
LocallyRegisteredAsSidecar: true,
@ -168,17 +158,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantChecks: nil,
},
{
name: "invalid check type",
@ -215,158 +195,14 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
token: "foo",
wantErr: "reserved for internal use",
},
{
name: "uses proxy address for check",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "123.123.123.123",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "255.255.255.255",
},
},
},
Address: "255.255.255.255",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "123.123.123.123",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "255.255.255.255",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "123.123.123.123:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses proxy.local_service_address for check if proxy address is empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "", // Proxy address empty.
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "1.2.3.4",
},
},
},
Address: "", // Service address empty.
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "1.2.3.4",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "1.2.3.4:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses 127.0.0.1 for check if proxy and proxy.local_service_address are empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "",
},
},
},
Address: "",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hcl := `
ports {
sidecar_min_port = 2222
sidecar_max_port = 2222
}
`
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
defer a.Shutdown()
ns := tt.sd.NodeService()
err := ns.Validate()
require.NoError(t, err, "Invalid test case - NodeService must validate")
gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token)
gotNS, gotChecks, gotToken, err := sidecarServiceFromNodeService(ns, tt.token)
if tt.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
@ -464,7 +300,7 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
}
`
}
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
a := NewTestAgent(t, hcl)
defer a.Shutdown()
if tt.preRegister != nil {
@ -472,7 +308,7 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
require.NoError(t, err)
}
gotPort, err := a.sidecarPortFromServiceID(structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta})
gotPort, err := a.sidecarPortFromServiceIDLocked(structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta})
if tt.wantErr != "" {
require.Error(t, err)
@ -485,3 +321,52 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
})
}
}
func TestAgent_SidecarDefaultChecks(t *testing.T) {
tests := []struct {
name string
svcAddress string
proxyLocalSvcAddress string
port int
wantChecks []*structs.CheckType
}{{
name: "uses proxy address for check",
svcAddress: "123.123.123.123",
proxyLocalSvcAddress: "255.255.255.255",
port: 2222,
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "123.123.123.123:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
},
{
name: "uses proxy.local_service_address for check if proxy address is empty",
proxyLocalSvcAddress: "1.2.3.4",
port: 2222,
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "1.2.3.4:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotChecks := sidecarDefaultChecks("web1", tt.svcAddress, tt.proxyLocalSvcAddress, tt.port)
require.Equal(t, tt.wantChecks, gotChecks)
})
}
}

View File

@ -224,6 +224,10 @@ type IssuedCert struct {
// AgentURI is the cert URI value.
AgentURI string `json:",omitempty"`
// ServerURI is the URI value of a cert issued for a server agent.
// The same URI is shared by all servers in a Consul datacenter.
ServerURI string `json:",omitempty"`
// Kind is the kind of service for which the cert was issued.
Kind ServiceKind `json:",omitempty"`
// KindURI is the cert URI value.

View File

@ -1257,8 +1257,9 @@ type NodeService struct {
// a pointer so that we never have to nil-check this.
Connect ServiceConnect
// TODO: rename to reflect that this is used to express future intent to register.
// LocallyRegisteredAsSidecar is private as it is only used by a local agent
// state to track if the service was registered from a nested sidecar_service
// state to track if the service was or will be registered from a nested sidecar_service
// block. We need to track that so we can know whether we need to deregister
// it automatically too if it's removed from the service definition or if the
// parent service is deregistered. Relying only on ID would cause us to

View File

@ -771,6 +771,7 @@ func (s *HTTPHandlers) UIMetricsProxy(resp http.ResponseWriter, req *http.Reques
Director: func(r *http.Request) {
r.URL = u
},
Transport: s.proxyTransport,
ErrorLog: log.StandardLogger(&hclog.StandardLoggerOptions{
InferLevels: true,
}),

3
go.mod
View File

@ -6,6 +6,8 @@ replace github.com/hashicorp/consul/api => ./api
replace github.com/hashicorp/consul/sdk => ./sdk
replace github.com/hashicorp/consul/proto-public => ./proto-public
replace launchpad.net/gocheck => github.com/go-check/check v0.0.0-20140225173054-eb6ee6f84d0a
require (
@ -28,6 +30,7 @@ require (
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706
github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4
github.com/hashicorp/consul/api v1.13.1
github.com/hashicorp/consul/proto-public v0.1.0
github.com/hashicorp/consul/sdk v0.10.0
github.com/hashicorp/go-bexpr v0.1.2
github.com/hashicorp/go-checkpoint v0.5.0

16
proto-public/go.mod Normal file
View File

@ -0,0 +1,16 @@
module github.com/hashicorp/consul/proto-public
go 1.19
require (
github.com/golang/protobuf v1.5.0
google.golang.org/grpc v1.37.1
google.golang.org/protobuf v1.27.1
)
require (
golang.org/x/net v0.0.0-20190311183353-d8887717615a // indirect
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a // indirect
golang.org/x/text v0.3.0 // indirect
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
)

88
proto-public/go.sum Normal file
View File

@ -0,0 +1,88 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -93,6 +93,7 @@ func IssuedCertToStructsIssuedCert(s *IssuedCert, t *structs.IssuedCert) {
t.ServiceURI = s.ServiceURI
t.Agent = s.Agent
t.AgentURI = s.AgentURI
t.ServerURI = s.ServerURI
t.Kind = structs.ServiceKind(s.Kind)
t.KindURI = s.KindURI
t.ValidAfter = structs.TimeFromProto(s.ValidAfter)
@ -111,6 +112,7 @@ func IssuedCertFromStructsIssuedCert(t *structs.IssuedCert, s *IssuedCert) {
s.ServiceURI = t.ServiceURI
s.Agent = t.Agent
s.AgentURI = t.AgentURI
s.ServerURI = t.ServerURI
s.Kind = string(t.Kind)
s.KindURI = t.KindURI
s.ValidAfter = structs.TimeToProto(t.ValidAfter)

View File

@ -377,6 +377,9 @@ type IssuedCert struct {
Kind string `protobuf:"bytes,12,opt,name=Kind,proto3" json:"Kind,omitempty"`
// KindURI is the cert URI value.
KindURI string `protobuf:"bytes,13,opt,name=KindURI,proto3" json:"KindURI,omitempty"`
// ServerURI is the URI value of a cert issued for a server agent.
// The same URI is shared by all servers in a Consul datacenter.
ServerURI string `protobuf:"bytes,14,opt,name=ServerURI,proto3" json:"ServerURI,omitempty"`
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
// mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto
@ -485,6 +488,13 @@ func (x *IssuedCert) GetKindURI() string {
return ""
}
func (x *IssuedCert) GetServerURI() string {
if x != nil {
return x.ServerURI
}
return ""
}
func (x *IssuedCert) GetValidAfter() *timestamppb.Timestamp {
if x != nil {
return x.ValidAfter
@ -579,7 +589,7 @@ var file_proto_pbconnect_connect_proto_rawDesc = []byte{
0x32, 0x2b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52,
0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xa9, 0x04, 0x0a, 0x0a, 0x49, 0x73, 0x73,
0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xc7, 0x04, 0x0a, 0x0a, 0x49, 0x73, 0x73,
0x75, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61,
0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x53,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x43,
@ -596,42 +606,44 @@ var file_proto_pbconnect_connect_proto_rawDesc = []byte{
0x67, 0x65, 0x6e, 0x74, 0x55, 0x52, 0x49, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18,
0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x4b,
0x69, 0x6e, 0x64, 0x55, 0x52, 0x49, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4b, 0x69,
0x6e, 0x64, 0x55, 0x52, 0x49, 0x12, 0x3a, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66,
0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65,
0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65,
0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12,
0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74,
0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x09, 0x52, 0x61, 0x66,
0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x42, 0x8a, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73,
0x6e, 0x64, 0x55, 0x52, 0x49, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55,
0x52, 0x49, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
0x55, 0x52, 0x49, 0x12, 0x3a, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65,
0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12,
0x3c, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x09,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x52, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x58, 0x0a,
0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18,
0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x0c,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xa2, 0x02, 0x04, 0x48, 0x43,
0x49, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43,
0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5c, 0x47,
0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a,
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61,
0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64,
0x65, 0x78, 0x42, 0x8a, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x0c, 0x43, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x43,
0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e,
0x6e, 0x65, 0x63, 0x74, 0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5c, 0x47, 0x50, 0x42,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -165,6 +165,10 @@ message IssuedCert {
// KindURI is the cert URI value.
string KindURI = 13;
// ServerURI is the URI value of a cert issued for a server agent.
// The same URI is shared by all servers in a Consul datacenter.
string ServerURI = 14;
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
// mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto

View File

@ -107,6 +107,16 @@ func (msg *PeeringTrustBundle) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *PeeringServerAddresses) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *PeeringServerAddresses) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *PeeringReadRequest) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)

File diff suppressed because it is too large Load Diff

View File

@ -225,6 +225,12 @@ message PeeringTrustBundle {
uint64 ModifyIndex = 7;
}
// PeeringServerAddresses contains the latest snapshot of all known
// server addresses for a peer.
message PeeringServerAddresses {
repeated string Addresses = 1;
}
// @consul-rpc-glue: LeaderReadTODO
message PeeringReadRequest {
string Name = 1;

View File

@ -3,13 +3,14 @@ package pbpeerstream
const (
apiTypePrefix = "type.googleapis.com/"
TypeURLExportedService = apiTypePrefix + "hashicorp.consul.internal.peerstream.ExportedService"
TypeURLPeeringTrustBundle = apiTypePrefix + "hashicorp.consul.internal.peering.PeeringTrustBundle"
TypeURLExportedService = apiTypePrefix + "hashicorp.consul.internal.peerstream.ExportedService"
TypeURLPeeringTrustBundle = apiTypePrefix + "hashicorp.consul.internal.peering.PeeringTrustBundle"
TypeURLPeeringServerAddresses = apiTypePrefix + "hashicorp.consul.internal.peering.PeeringServerAddresses"
)
func KnownTypeURL(s string) bool {
switch s {
case TypeURLExportedService, TypeURLPeeringTrustBundle:
case TypeURLExportedService, TypeURLPeeringTrustBundle, TypeURLPeeringServerAddresses:
return true
}
return false

View File

@ -2,7 +2,7 @@
class="consul-hcp-home"
...attributes
>
<a href={{env 'CONSUL_HCP_URL'}}>
<a href={{env 'CONSUL_HCP_URL'}} data-native-href="true">
Back to HCP
</a>
</div>

View File

@ -0,0 +1,38 @@
import { module, test } from 'qunit';
import { setupRenderingTest } from 'ember-qunit';
import { render } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
import ConsulHcpHome from 'consul-ui/components/consul/hcp/home';
module('Integration | Component | consul hcp home', function(hooks) {
setupRenderingTest(hooks);
test('it prints the value of CONSUL_HCP_URL', async function(assert) {
// temporary registration until we are running as separate applications
this.owner.register(
'component:consul/hcp/home',
ConsulHcpHome
);
//
const Helper = this.owner.resolveRegistration('helper:env');
this.owner.register(
'helper:env',
class extends Helper {
compute([name, def]) {
switch(name) {
case 'CONSUL_HCP_URL':
return 'http://hcp';
}
return super.compute(...arguments);
}
}
);
await render(hbs`<Consul::Hcp::Home />`);
assert.dom('a').hasAttribute('href', 'http://hcp');
});
});

View File

@ -58,6 +58,9 @@
{{else}}
<div class="dc-name" data-test-datacenter-single>
{{@dcs.firstObject.Name}}
{{#if (env 'CONSUL_HCP_MANAGED_RUNTIME')}}
<span>Self-managed</span>
{{/if}}
</div>
{{/if}}
</li>

View File

@ -13,6 +13,17 @@
nav .dcs li.is-local span {
@extend %menu-panel-badge;
}
nav .dcs .dc-name {
color: rgb(var(--tone-gray-600));
padding: 3.25px 0px;
font-weight: var(--typo-weight-semibold)
}
nav .dcs .dc-name span {
@extend %pill-200;
margin-left: 1rem;
background-color: rgb(var(--tone-gray-300));
color: rgb(var(--tone-gray-999));
}
nav li.partitions,
nav li.nspaces {
@extend %main-nav-vertical-popover-menu;
@ -50,11 +61,6 @@
top: 2px;
margin-left: 2px;
}
.dc-name {
color: rgb(var(--tone-gray-600));
padding: 3.25px 0px;
font-weight: var(--typo-weight-semibold)
}
}
.hashicorp-consul {
@extend %hashicorp-consul;

View File

@ -1,4 +1,4 @@
import { is, clickable, attribute } from 'ember-cli-page-object';
import { is, clickable, attribute, isVisible } from 'ember-cli-page-object';
import ucfirst from 'consul-ui/utils/ucfirst';
export default function(name, items, blankKey = 'all') {
return items.reduce(function(prev, item, i, arr) {
@ -19,6 +19,7 @@ export default function(name, items, blankKey = 'all') {
[`${key}IsSelected`]: is('.selected', `[data-test-tab="${name}_${item}"]`),
[`${key}Url`]: attribute('href', `[data-test-tab="${name}_${item}"] a`),
[key]: clickable(`[data-test-tab="${name}_${item}"] a`),
[`${key}IsVisible`]: isVisible(`[data-test-tab="${name}_${item}"] a`),
},
};
}, {});

View File

@ -1,14 +1,44 @@
import Route from 'consul-ui/routing/route';
import { action } from '@ember/object';
import { inject as service } from '@ember/service';
import { runInDebug } from '@ember/debug';
import WithBlockingActions from 'consul-ui/mixins/with-blocking-actions';
export default class ApplicationRoute extends Route.extend(WithBlockingActions) {
@service('client/http') client;
@service('env') env;
@service('repository/token') tokenRepo;
@service('settings') settings;
data;
async model() {
if(this.env.var('CONSUL_ACLS_ENABLED')) {
const secret = this.env.var('CONSUL_HTTP_TOKEN');
const existing = await this.settings.findBySlug('token');
if(!existing.AccessorID && secret) {
try {
const token = await this.tokenRepo.self({
secret: secret,
dc: this.env.var('CONSUL_DATACENTER_LOCAL')
});
await this.settings.persist({
token: {
AccessorID: token.AccessorID,
SecretID: token.SecretID,
Namespace: token.Namespace,
Partition: token.Partition,
}
});
} catch(e) {
runInDebug(_ => console.error(e));
}
}
}
return {};
}
@action
onClientChanged(e) {
let data = e.data;

View File

@ -132,12 +132,16 @@ export default function(config = {}, win = window, doc = document) {
return operatorConfig.LocalDatacenter;
case 'CONSUL_DATACENTER_PRIMARY':
return operatorConfig.PrimaryDatacenter;
case 'CONSUL_HCP_MANAGED_RUNTIME':
return operatorConfig.HCPManagedRuntime;
case 'CONSUL_API_PREFIX':
// we want API prefix to look like an env var for if we ever change
// operator config to be an API request, we need this variable before we
// make and API request so this specific variable should never be be
// retrived via an API request
return operatorConfig.APIPrefix;
case 'CONSUL_HCP_URL':
return operatorConfig.HCPURL;
case 'CONSUL_UI_CONFIG':
dashboards = {
service: undefined,
@ -190,7 +194,7 @@ export default function(config = {}, win = window, doc = document) {
}
};
const ui = function(key) {
let $;
let $ = {};
switch (config.environment) {
case 'development':
case 'staging':
@ -225,15 +229,28 @@ export default function(config = {}, win = window, doc = document) {
case 'CONSUL_UI_CONFIG':
prev['CONSUL_UI_CONFIG'] = JSON.parse(value);
break;
case 'TokenSecretID':
prev['CONSUL_HTTP_TOKEN'] = value;
break;
default:
prev[key] = value;
}
return prev;
}, {});
if (typeof $[key] !== 'undefined') {
return $[key];
}
break;
case 'production':
$ = dev().reduce(function(prev, [key, value]) {
switch (key) {
case 'TokenSecretID':
prev['CONSUL_HTTP_TOKEN'] = value;
break;
}
return prev;
}, {});
break;
}
if (typeof $[key] !== 'undefined') {
return $[key];
}
return config[key];
};
@ -252,7 +269,9 @@ export default function(config = {}, win = window, doc = document) {
case 'CONSUL_UI_CONFIG':
case 'CONSUL_DATACENTER_LOCAL':
case 'CONSUL_DATACENTER_PRIMARY':
case 'CONSUL_HCP_MANAGED_RUNTIME':
case 'CONSUL_API_PREFIX':
case 'CONSUL_HCP_URL':
case 'CONSUL_ACLS_ENABLED':
case 'CONSUL_NSPACES_ENABLED':
case 'CONSUL_PEERINGS_ENABLED':

View File

@ -108,7 +108,7 @@
"ember-cli-flash": "^2.1.1",
"ember-cli-htmlbars": "^5.2.0",
"ember-cli-inject-live-reload": "^2.0.2",
"ember-cli-page-object": "^1.16.2",
"ember-cli-page-object": "^1.17.10",
"ember-cli-sass": "^10.0.1",
"ember-cli-sri": "^2.1.1",
"ember-cli-string-helpers": "^5.0.0",

View File

@ -53,11 +53,11 @@ Feature: dc / nodes / show: Show node
dc: dc1
node: node-0
---
And I see healthChecks on the tabs
And I see serviceInstances on the tabs
And I don't see roundTripTime on the tabs
And I see lockSessions on the tabs
And I see serviceInstancesIsSelected on the tabs
And I see healthChecksIsVisible on the tabs
And I see serviceInstancesIsVisible on the tabs
And I don't see roundTripTime on the tabs
And I see lockSessionsIsVisible on the tabs
Scenario: A node warns when deregistered whilst blocking
Given 1 node model from yaml
---

View File

@ -33,7 +33,7 @@ Feature: dc / services / instances / Exposed Paths
id: service-0-with-id
---
Then the url should be /dc1/services/service-0/instances/node-0/service-0-with-id/health-checks
And I see exposedPaths on the tabs
And I see exposedPathsIsVisible on the tabs
When I click exposedPaths on the tabs

View File

@ -33,7 +33,7 @@ Feature: dc / services / instances / Upstreams
id: service-0-with-id
---
Then the url should be /dc1/services/service-0/instances/node-0/service-0-with-id/health-checks
And I see upstreams on the tabs
And I see upstreamsIsVisible on the tabs
When I click upstreams on the tabs

View File

@ -15,7 +15,7 @@ Feature: dc / services / show-routing: Show Routing for Service
service: service-0
---
And the title should be "service-0 - Consul"
And I see routing on the tabs
And I see routingIsVisible on the tabs
Scenario: Given connect is disabled, the Routing tab should not display or error
Given 2 datacenter models from yaml
---
@ -51,14 +51,7 @@ Feature: dc / services / show-routing: Show Routing for Service
dc: dc2
service: service-1
---
And I see routing on the tabs
# something weird is going on with this test
# without waiting we issue a url reload that
# will make the test timeout.
# waiting will "fix" this - we should look into
# the underlying reason for this soon. This is
# only a quick-fix to land ember-qunit v5.
And pause for 1000
And I see routingIsVisible on the tabs
And I visit the service page for yaml
---
dc: dc1

View File

@ -14,7 +14,7 @@ Feature: dc / services / show-topology: Show Topology tab for Service
dc: dc1
service: service-0
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
Then the url should be /dc1/services/service-0/topology
Scenario: Given connect is disabled, the Topology tab should not display or error
Given 1 datacenter model with the value "dc1"

View File

@ -38,7 +38,7 @@ Feature: dc / services / show / intentions / index: Intentions per service
service: service-0
---
And the title should be "service-0 - Consul"
And I see intentions on the tabs
And I see intentionsIsVisible on the tabs
When I click intentions on the tabs
And I see intentionsIsSelected on the tabs
Scenario: I can see intentions

View File

@ -16,7 +16,7 @@ Feature: dc / services / show / services
service: terminating-gateway-1
---
And the title should be "terminating-gateway-1 - Consul"
And I see linkedServices on the tabs
And I see linkedServicesIsVisible on the tabs
When I click linkedServices on the tabs
And I see linkedServicesIsSelected on the tabs
Scenario: Seeing the list of Linked Services

View File

@ -19,7 +19,7 @@ Feature: dc / services / show / tags
dc: dc1
service: service
---
And I see tags on the tabs
And I see tagsIsVisible on the tabs
When I click tags on the tabs
And I see tagsIsSelected on the tabs
And I see 3 tag models on the tabs.tagsTab component
@ -42,7 +42,7 @@ Feature: dc / services / show / tags
dc: dc1
service: service
---
And I see tags on the tabs
And I see tagsIsVisible on the tabs
When I click tags on the tabs
And I see tagsIsSelected on the tabs
And I see 3 tag models on the tabs.tagsTab component

View File

@ -24,7 +24,7 @@ Feature: dc / services / show / topology / routing-config
dc: dc1
service: service-0
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
Scenario: Given the Source is routing config, show Source Type
Then I see the text "Routing configuration" in "[data-test-topology-metrics-source-type]"
Scenario: Given the Source is routing config, redirect to Routing Config page

View File

@ -14,7 +14,7 @@ Feature: dc / services / show / topology / stats
dc: dc1
service: service-0
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
And I don't see the "[data-test-topology-metrics-stats]" element
Scenario: Given metrics is enabled, the Topology tab should display metrics
Given 1 datacenter model with the value "dc1"
@ -31,7 +31,7 @@ Feature: dc / services / show / topology / stats
dc: dc1
service: service-0
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
And I see the "[data-test-topology-metrics-stats]" element
Scenario: Given metrics is enabled, metrics stats are disabled for an ingress gateway Topology
Given 1 datacenter model with the value "dc1"
@ -49,7 +49,7 @@ Feature: dc / services / show / topology / stats
dc: dc1
service: ingress-gateway
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
And I don't see the "[data-test-topology-metrics-stats]" element
And I see the "[data-test-topology-metrics-status]" element
Scenario: Given metrics is enabled, metric stats are disabled for ingress gateway as downstream services
@ -77,7 +77,7 @@ Feature: dc / services / show / topology / stats
dc: dc1
service: service-0
---
And I see topology on the tabs
And I see topologyIsVisible on the tabs
And I see the "[data-test-sparkline]" element
And I don't see the "[data-test-topology-metrics-downstream-stats]" element

View File

@ -16,7 +16,7 @@ Feature: dc / services / show / upstreams
service: ingress-gateway-1
---
And the title should be "ingress-gateway-1 - Consul"
And I see upstreams on the tabs
And I see upstreamsIsVisible on the tabs
When I click upstreams on the tabs
And I see upstreamsIsSelected on the tabs
Scenario: Seeing the list of Upstreams

View File

@ -0,0 +1,2 @@
// temporary import until we are running as separate applications
import 'consul-ui/components/consul/hcp/home/index.test';

View File

@ -184,7 +184,9 @@ export default function(scenario, assert, find, currentPage, $) {
}
}
assert[isNegative ? 'notOk' : 'ok'](target, message);
return Promise.resolve();
// always return promise and handle the fact that `target` could be async
return Promise.resolve().then(() => target);
})
.then(
[

View File

@ -6530,7 +6530,7 @@ ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.11.0,
ember-cli-version-checker "^2.1.2"
semver "^5.5.0"
ember-cli-babel@^7.13.2, ember-cli-babel@^7.26.3, ember-cli-babel@^7.26.5:
ember-cli-babel@^7.13.2, ember-cli-babel@^7.26.1, ember-cli-babel@^7.26.3, ember-cli-babel@^7.26.5:
version "7.26.11"
resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.11.tgz#50da0fe4dcd99aada499843940fec75076249a9f"
integrity sha512-JJYeYjiz/JTn34q7F5DSOjkkZqy8qwFOOxXfE6pe9yEJqWGu4qErKxlz8I22JoVEQ/aBUO+OcKTpmctvykM9YA==
@ -6781,15 +6781,15 @@ ember-cli-normalize-entity-name@^1.0.0:
dependencies:
silent-error "^1.0.0"
ember-cli-page-object@^1.16.2:
version "1.17.7"
resolved "https://registry.yarnpkg.com/ember-cli-page-object/-/ember-cli-page-object-1.17.7.tgz#a35c4cc1ece147e9752604cbc2266038660a84f6"
integrity sha512-sp7lunZa9p57cNm6og86F12SBx5Tt/7dWndfIKGE9Kol3QP2/72qiUVauhbfoUDSjfYLG2xEAFWDbiLHPMPYsg==
ember-cli-page-object@^1.17.10:
version "1.17.10"
resolved "https://registry.yarnpkg.com/ember-cli-page-object/-/ember-cli-page-object-1.17.10.tgz#a3145c7b0341e6180dab28e10c858f8b6535e66a"
integrity sha512-J7OQZ4IWftHLunsCicFbaVb/GrI+/DMWMPO5EAca9+0x9K+rxml351Tl1Z/Fpr8UmHg1q/KGwqGx9xGodrRxbg==
dependencies:
broccoli-file-creator "^2.1.1"
broccoli-merge-trees "^2.0.0"
ceibo "~2.0.0"
ember-cli-babel "^6.16.0"
ember-cli-babel "^7.26.1"
ember-cli-node-assets "^0.2.2"
ember-native-dom-helpers "^0.7.0"
jquery "^3.4.1"

View File

@ -12,8 +12,9 @@ Command: `consul license`
<EnterpriseAlert />
The `license` command provides a datacenter-level view of the Consul Enterprise license. This was added
in Consul 1.1.0 but Consul 1.10.0 removed the ability to set and reset the license using the CLI.
The `license` command provides a list of all datacenters that use the Consul Enterprise license applied to the current datacenter.
~> **Warning**: Consul 1.10.0 removed the ability to set and reset the license using the CLI.
See the [licensing documentation](/docs/enterprise/license/overview) for more information about
Consul Enterprise license management.

View File

@ -90,3 +90,4 @@ items which are reloaded include:
Consul will issue the following warning, `Static Runtime config has changed and need a manual config reload to be applied`.
You must manually issue the `consul reload` command or send a `SIGHUP` to the Consul process to reload the new values.
- Watches
- [License](/docs/enterprise/license/overview)

View File

@ -40,6 +40,13 @@ may also be licensed in the very same manner.
However, to avoid the need to configure the license on many client agents and snapshot agents,
those agents have the capability to retrieve the license automatically under the conditions described below.
Updating the license for an agent depends on the method you used to apply the license.
- **If you used the `CONSUL_LICENSE`
environment variable**: After updating the environment variable, restart the affected agents.
- **If you used the
`CONSUL_LICENSE_PATH` environment variable**: Update the license file first. Then, restart the affected agents.
- **If you used the `license_path` configuration item**: Update the license file first. Then, run [`consul reload`](/commands/reload) for the affected agents.
#### Client Agent License Retrieval
When a client agent starts without a license in its configuration or environment, it will try to retrieve the

View File

@ -6,7 +6,7 @@ description: Single Consul Datacenter deployed in multiple Kubernetes clusters
# Single Consul Datacenter in Multiple Kubernetes Clusters
~> **Note:** For running Consul across multiple Kubernetes, it is generally recommended to utilize [Admin Partitions](/docs/enterprise/admin-partitions) for production environments. This Consul Enterprise feature allows for the ability to accommodate for multiple tenants without concerns of resource collisions when administering a cluster at scale, and for the ability to run Consul on Kubernetes clusters across a non-flat network.
~> **Note:** When running Consul across multiple Kubernetes clusters, we recommend using [admin partitions](/docs/enterprise/admin-partitions) for production environments. This Consul Enterprise feature allows you to accommodate multiple tenants without resource collisions when administering a cluster at scale. Admin partitions also enable you to run Consul on Kubernetes clusters across a non-flat network.
This page describes deploying a single Consul datacenter in multiple Kubernetes clusters,
with servers and clients running in one cluster and only clients in the rest of the clusters.
@ -76,7 +76,7 @@ which are likely going to change.
To deploy, first generate the Gossip encryption key and save it as a Kubernetes secret.
```shell
```shell-session
$ kubectl create secret generic consul-gossip-encryption-key --from-literal=key=$(consul keygen)
```
@ -163,7 +163,7 @@ which can be seen by running `kubectl get nodes --output wide`.
Set `externalServers.httpsPort` to the `nodePort` of the `cluster1-consul-ui` service.
In our example, the port is `31557`.
```shell
```shell-session
$ kubectl get service cluster1-consul-ui --context cluster1
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
cluster1-consul-ui NodePort 10.0.240.80 <none> 443:31557/TCP 40h

View File

@ -109,7 +109,7 @@ NAME: consul
### Enable the Consul CNI plugin
By default, Consul generates a `connect-inject init` container as part of the Kubernetes pod startup process when Consul is in [transparent proxy mode](/docs/connect/transparent-proxy). The container configures traffic redirection in the service mesh through the sidecar proxy. To configure redirection, the container requires elevated CAP_NET_ADMIN privileges, which may not be compatible with security policies in your organization.
By default, Consul generates a `connect-inject init` container as part of the Kubernetes pod startup process when Consul is in [transparent proxy mode](/docs/connect/transparent-proxy). The container configures traffic redirection in the service mesh through the sidecar proxy. To configure redirection, the container requires elevated `CAP_NET_ADMIN` privileges, which may not be compatible with security policies in your organization.
Instead, you can enable the Consul container network interface (CNI) plugin to perform traffic redirection. Because the plugin is executed by the Kubernetes kubelet, the plugin already has the elevated privileges necessary to configure the network.
@ -125,10 +125,10 @@ global:
connectInject:
enabled: true
cni:
enabled: true
logLevel: info
cniBinDir: "/opt/cni/bin"
cniNetDir: "/etc/cni/net.d"
enabled: true
logLevel: info
cniBinDir: "/opt/cni/bin"
cniNetDir: "/etc/cni/net.d"
```
</CodeBlockConfig>
@ -140,10 +140,10 @@ global:
connectInject:
enabled: true
cni:
enabled: true
logLevel: info
cniBinDir: "/home/kubernetes/bin"
cniNetDir: "/etc/cni/net.d"
enabled: true
logLevel: info
cniBinDir: "/home/kubernetes/bin"
cniNetDir: "/etc/cni/net.d"
```
</CodeBlockConfig>

View File

@ -2,32 +2,29 @@
layout: docs
page_title: Consul-Terraform-Sync API
description: >-
How to use the Consul-Terraform-Sync API
Consul-Terraform-Sync provides an API interface that lets you query CTS instance health, check CTS instance status, and run CTS tasks.
---
# Consul-Terraform-Sync API
# Consul-Terraform-Sync API Overview
When running in [daemon mode](/docs/nia/cli#daemon-mode), Consul-Terraform-Sync (CTS) serves an HTTP API interface. Details of the available API endpoints is available in the navigation to the left.
Consul-Terraform-Sync (CTS) provides an HTTP API interface for querying CTS instances and running and managing tasks.
### Port
## Port
The API is served at the default port `8558` or a different port if set with [`port` configuration](/docs/nia/configuration#port)
### Version Prefix
## Version prefix
All API routes are prefixed with `/v1/`. This documentation is for v1 of the API, which is the only version currently.
Example: `localhost:8558/v1/status`
### Error
## Request ID
Successful API requests will receive a 2XX success status code. For other unsuccessful status codes, when possible, more details will be provided in a response body containing an error object.
Each call to a CTS API endpoint returns a `request_id` field. The field is a string generated by the API. Example:
Example: Status 400 Bad Request
```json
{
"error": {
"message": "example error message: unsupported status parameter value"
}
}
```
`"request_id": "e1bc2236-3d0e-5f5e-dc51-164a1cf6da88"`
## Error messages
The API sends a response code in the 200 range if the call is successful. If the call is unsuccessful, the API sends an error message that includes additional information when possible. Refer to [Error Messages](/docs/nia/usage/errors-ref) for additional information.

View File

@ -6,28 +6,27 @@ description: >-
---
# Status
The `/status` endpoints share status-related information for tasks. This information is available for understanding the status of individual tasks and across tasks.
The `/status` endpoints return status-related information for tasks. This endpoint returns a count of successful and failed task events that are recorded whenever tasks execute an automation. Currently, only the five most recent events are stored in Consul-Terraform-Sync (CTS). For more information on the hierarchy of status information and how it is collected, refer to [Status Information](/docs/nia/tasks#status-information).
The health status value is determined by aggregating the success or failure of the event of a task detecting changes in Consul services and then updating network infrastructure. Currently, only the 5 most recent events are stored in Consul-Terraform-Sync (CTS). For more information on the hierarchy of status information and how it is collected, see [Status Information](/docs/nia/tasks#status-information).
If CTS is configured [for high availability](/docs/nia/usage/run-ha), you can send requests to the [`/status/cluster` endpoint path](#cluster-status) on any CTS cluster member instance to receive information about the entire cluster. Calling the `status` endpoint path (without `/cluster`), however, returns a 400 error if the request is sent to a follower instance. The error message depends on what information the follower instance is able to obtain about the leader. Refer to [Error Messages](/docs/nia/usage/errors-ref) for more information.
## Overall Status
## Status for all tasks
This endpoint currently returns the overall status information for all tasks.
This endpoint returns the overall status information for all tasks.
| Method | Path | Produces |
| ------ | ------------------- | ------------------ |
| `GET` | `/status` | `application/json` |
### Request Parameters
Currently no request parameters are offered for the overall status API.
### Request parameters
### Response Statuses
No request parameters are available for this endpoint.
| Status | Reason |
| ------ | ---------------- |
| 200 | Successfully retrieved the status |
### Response statuses
### Response Fields
- `200`: Successfully retrieved the status.
### Response fields
| Name | Type | Description |
| ----- | ------ | ------------------ |
@ -65,7 +64,7 @@ Response:
}
```
## Task Status
## Task status
This endpoint returns the individual task status information for a single specified task or for all tasks.
@ -79,7 +78,7 @@ Task health status value is determined by the success or failure of all stored [
| ------ | ------------------- | ------------------ |
| `GET` | `/status/tasks/:task_name` | `application/json` |
### Request Parameters
### Request parameters
| Name | Required | Type | Description | Default |
| -------- | -------- | ------ | ------------------ | ------- |
@ -87,14 +86,12 @@ Task health status value is determined by the success or failure of all stored [
|`include` | Optional | string | Only accepts the value `"events"`. Use to include stored event information in response. | none
|`status` | Optional | string | Only accepts health status values `"successful"`, `"errored"`, `"critical"`, or `"unknown"`. Use to filter response by tasks that have the specified health status value. Recommend setting this parameter when requesting all tasks i.e. no `task` parameter is set. | none
### Response Statuses
### Response statuses
| Status | Reason |
| ------ | ---------------- |
| 200 | Successfully retrieved the task status |
| 404 | Task with the given name not found |
- `200`: Successfully retrieved the task status
- `404`: Task with the given name not found
### Response Fields
### Response fields
The response is a JSON map of task name to a status information structure with the following fields.
@ -126,7 +123,11 @@ Event represents the process of updating network infrastructure of a task. The d
|`config.source` | string | **Deprecated in CTS 0.5.0 and will be removed in v0.8.0.** Module configured for the task.
|`config.providers` | list[string] | **Deprecated in CTS 0.5.0 and will be removed in v0.8.0.** List of the providers configured for the task.
### Example: All Task Statuses
### Examples
The following examples call `status` endpoints to retrieve information about tasks.
#### All task statuses
Request:
```shell-session
@ -163,7 +164,7 @@ Response:
}
```
### Example: Individual Task Status with Events
#### Individual task status with events
Request:
```shell-session
@ -225,3 +226,78 @@ Response:
}
}
```
## Cluster status
The `/v1/status/cluster` API endpoint returns information about high-availability clusters and its members, such as health status and leadership. This endpoint is only supported when using CTS in [high availability mode](/docs/nia/usage/run-ha). If you call the endpoint without configuring CTS for high availability, then CTS prints an error to the console. Refer to [Error Messages](/docs/nia/usage/errors-ref) for information about CTS error messages.
| Method | Path | Response format |
| ------ | ----------------- | ------------------ |
| `GET` | `/status/cluster` | `application/json` |
### Request parameters
Currently no request parameters are offered for the cluster status API.
### Request statuses
- `200`: Successfully retrieved the cluster status
### Response fields
The following table describes the responses that the `status/cluster` endpoint can send.
| Field | Type | Description |
| --- | ---- | --- |
| `cluster_name` | string | Identifies the name of the cluster. |
| `members` | list[[member](#member-objects)] | Contains a list of [member objects](#member-objects). Each object in the `members` list represents a CTS instance. |
#### Member objects
The following table describes the fields available for objects in the `members` array.
| Field | Type | Description |
| --- | ---- | --- |
| `address` | string | Indicates the location of the instance. The address is only included in the response if the `high_availability.instance.address` option is configured on the leader instance. Refer to the [high availability instance configuration](/docs/nia/configuration#high-availability-instance) reference for additional information. |
| `healthy` | boolean | Indicates the health of the service instance health. Refer to [Service Registration](/docs/nia/configuration#service-registration) for additional information. |
| `id` | string | Indicates the service registration ID. Refer to [Service Registration](/docs/nia/configuration#service-registration) for additional information. |
| `leader` | boolean | Identifies the cluster leader. |
| `service_name` | string | Identifies the name of the service that the instance represents. The value is set by the `service_name` field in the [Service Registration](/docs/nia/configuration#service-registration) configuration. |
### Example
The following command calls the `/status/cluster` endpoint:
```shell-session
$ curl request GET http://localhost:8553/v1/status/cluster
```
The following example response shows three CTS instances. The cluster leader is `cts-02` and is advertising that its address is `cts-02.example.com`:
```json
{
"cluster_name": "cluster-a",
"members": [
{
"address": "cts-02.example.com",
"healthy": true,
"id": "cts-02",
"leader": true,
"service_name": "consul-terraform-sync"
},
{
"healthy": true,
"id": "cts-03",
"leader": false,
"service_name": "consul-terraform-sync"
},
{
"healthy": true,
"id": "cts-01",
"leader": false,
"service_name": "consul-terraform-sync"
}
],
"request_id": "e1bc2236-3d0e-5f5e-dc51-164a1cf6da88"
}
```

View File

@ -9,6 +9,8 @@ description: >-
The `/tasks` endpoints interact with the tasks that Consul-Terraform-Sync (CTS) is responsible for running.
If you [run CTS with high availability enabled](/docs/nia/usage/run-ha), you can send requests to the `/tasks` endpoint on CTS leader or follower instances. Requests to a follower instance, however, return a 400 Bad Request and error message. The error message depends on what information the follower instance is able to obtain about the leader. Refer to [Error Messages](/docs/nia/usage/errors-ref) for more information.
## Get Tasks
This endpoint returns information about all existing tasks.

View File

@ -2,37 +2,29 @@
layout: docs
page_title: Architecture
description: >-
Consul-Terraform-Sync Architecture
Learn about the Consul-Terraform-Sync architecture and high-level CTS components, such as the Terraform driver and tasks.
---
# Consul-Terraform-Sync Architecture
Consul-Terraform-Sync (CTS) is a service-oriented tool for managing
network infrastructure near real-time. CTS runs as a daemon
and integrates the network topology maintained by your Consul cluster with your
network infrastructure to dynamically secure and connect services.
Consul-Terraform-Sync (CTS) is a service-oriented tool for managing network infrastructure near real-time. CTS runs as a daemon and integrates the network topology maintained by your Consul cluster with your network infrastructure to dynamically secure and connect services.
## 10,000 Foot View
## CTS workflow
The following diagram shows the CTS workflow as it monitors the Consul service catalog for updates.
[![Consul-Terraform-Sync Architecture](/img/nia-highlevel-diagram.svg)](/img/nia-highlevel-diagram.svg)
The diagram shows CTS monitoring the Consul service catalog
for updates and utilizing Terraform to update the state of the infrastructure.
1. CTS monitors the state of Consuls service catalog and its KV store. This process is described in [Watcher and views](#watcher-and-views).
1. CTS detects a change.
1. CTS prompts Terraform to update the state of the infrastructure.
There are two principal aspects of Sync to know about corresponding to the
lines to Consul and Terraform in the diagram above. The line to Consul
represents the Watchers monitoring the state of Consul's service catalog (and
possibly KV store) while the line to Terraform represents tasks being run to
update the infrastructure.
## Watcher and Views
## Watcher and views
CTS monitors Consul for updates utilizing Consul's [Blocking
Queries](/api-docs/features/blocking) whenever supported, falling back on
polling when not. The watcher maintains a separate thread (known internally as
a view) for each value monitored, running any tasks that depend on that watched
value whenever it's updated. Say, for example, running a task to update a
proxy when an instance goes unhealthy.
CTS uses Consuls [blocking queries](/api-docs/features/blocking) functionality to monitor Consul for updates. If an endpoint does not support blocking queries, CTS uses polling to watch for changes. These mechanisms are referred to in CTS as *watchers*.
The watcher maintains a separate thread for each value monitored and runs any tasks that depend on the watched value whenever it is updated. These threads are referred to as _views_. For example, a thread may run a task to update a proxy when the watcher detects that an instance has become unhealthy .
## Tasks
@ -53,8 +45,35 @@ network infrastructure. The following [drivers](/docs/nia/network-drivers#terraf
Each driver includes a set of providers that [enables support](/docs/nia/terraform-modules) for a wide variety of infrastructure applications.
## Security Guidelines
## State storage and persistence
The [Secure Consul-Terraform-Sync for Production](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS)
tutorial contains a checklist of best practices to secure your
CTS installation for a production environment.
The following types of state information are associated with CTS.
### Terraform state information
By default, CTS stores [Terraform state data](https://www.terraform.io/docs/state/index.html) in the Consul KV, but you can specify where this information is stored by configuring the `backend` setting in the [Terraform driver configuration](/docs/nia/configuration#backend). The data persists if CTS stops and the backend is configured to a remote location.
### CTS task and event data
By default, CTS stores task and event data in memory. This data is transient and does not persist. If you configure [CTS to run with high availability enabled](/docs/nia/usage/run-ha), CTS stores the data in the Consul KV. High availability is an enterprise feature that promotes CTS resiliency. When high availability is enabled, CTS stores and persists task changes and events that occur when an instance stops.
The data stored when operating in high availability mode includes task changes made using the task API or CLI. Examples of task changes include creating a new task, deleting a task, and enabling or disabling a task. You can empty the leaders stored state information by starting CTS with the [`-reset-storage` flag](/docs/nia/cli/start#options).
## Instance compatibility checks (high availability)
If you [run CTS with high availability enabled](/docs/nia/usage/run-ha), CTS performs instance compatibility checks to ensure that all instances in the cluster behave consistently. Consistent instance behavior enables CTS to properly perform automations configured in the state storage.
The CTS instance compatibility check reports an error if the task [module](/docs/nia/configuration#module) is configured with a local module, but the module does not exist on the CTS instance. Refer to the [Terraform documentation](https://www.terraform.io/language/modules/sources#module-sources) for additional information about module sources. Example log:
```shell-session
[ERROR] ha.compat: error="compatibility check failure: stat ./example-module: no such file or directory"
```
Refer to [Error Messages](/docs/nia/usage/errors-ref) for additional information.
CTS instances perform a compatibility check on start-up based on the stored state and every five minutes after starting. If the check detects an incompatible CTS instance, it generates a log so that an operator can address it.
CTS logs the error message and continues to run when it finds an incompatibility. CTS can still elect an incompatible instance to be the leader, but tasks affected by the incompatibility do not run successfully. This can happen when all active CTS instances enter [`once-mode`](/docs/nia/cli/start#modes) and run the tasks once when initially elected.
## Security guidelines
We recommend following the network security guidelines described in the [Secure Consul-Terraform-Sync for Production](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial. The tutorial contains a checklist of best practices to secure your CTS installation for a production environment.

View File

@ -2,61 +2,42 @@
layout: docs
page_title: Start Command
description: >-
Consul-Terraform-Sync supports start command for starting CTS as a daemon
Run the Consul-Terraform-Sync start command to start the CTS daemon.
---
# start
The `start` command starts Consul-Terraform-Sync (CTS) as a daemon. When CTS runs as a daemon, there is no default configuration to start CTS. You must set a configuration flag `-config-file` or `-config-dir`. For example:
The `start` command starts the Consul-Terraform-Sync (CTS) daemon.
## Usage
```shell-session
$ consul-terraform-sync start -config-file=config.hcl
$ consul-terraform-sync start -config-file <instance-configuration-file> [OPTIONS]
```
To review a list of available flags, use the `-help` or `-h` flag.
## Modes
CTS can be run as a daemon in different modes.
### Long-running Mode
Flag: none
Behavior: This is the default mode in which CTS passes through a once-mode phase and then turns into a long-running process. During the once-mode phase, the daemon will exit with a non-zero status if it encounters an error. After successfully passing through once-mode phase, it will begin a long-running process in which errors are logged and exiting is not expected behavior. When the long-running process begins, CTS daemon starts serving API and command requests.
### Inspect Mode
Flag: `-inspect`
Behavior: CTS will display the proposed state changes for all tasks once and exit. No changes are applied in this mode. On encountering an error before completing, CTS will exit with a non-zero status.
Usage: Intended to be run before long-running mode in order to confirm configuration is accurate and tasks would update network infrastructure as expected.
---
Flag: `-inspect-task [task-name]`
Behavior: This has similar behavior as `-inspect` mode for the selected task. The flag can be specified multiple times to inspect multiple tasks. No changes are applied in this mode.
Usage: Useful to debug one or more tasks to confirm configuration is accurate and the selected tasks would update network infrastructure as expected.
### Once Mode
Flag: `-once`
Behavior: CTS will run all tasks once with buffer periods disabled and exit. On encountering an error before completing, CTS will exit with a non-zero status.
Usage: Intended to be run before long-running mode in order to confirm configuration is accurate and tasks update network infrastructure as expected.
The `-config-file` or `-config-dir` flag is required. Use the flag to specify the [CTS instance configuration file](/docs/nia/configuration) or directory containing several configuration files to start a CTS cluster.
## Options
The `start` command supports the following options:
The following table describes all of the available flags.
| Name | Required | Type | Description |Default |
| --------------- | -------- | ------- | ------------------------------- | ----------------------- |
| `-config-dir ` &nbsp; &nbsp; &nbsp;| Required when`-config-file` is not set | string | A directory to load files for configuring CTS. Configuration files require an .hcl or .json file extension in order to specify their format. This option can be specified multiple times to load different directories. | none |
| `-config-file` &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;| Required when `-config-dir` is not set | string | A file to load for configuring CTS. Configuration file requires an .hcl or .json extension in order to specify their format. This option can be specified multiple times to load different configuration files. | none |
| `-inspect` | Optional | boolean | Run CTS in Inspect mode to print the proposed state changes for all tasks, and then exit. No changes are applied in this mode. | false |
| `-inspect-task` &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | Optional | string | Run CTS in Inspect mode to print the proposed state changes for the task, and then exit. The flag can be specified multiple times to inspect multiple tasks. No changes are applied in this mode. | none |
| `-once` | Optional | boolean | Render templates and run tasks once. Does not start the process in long-running mode and disables buffer periods. | false |
| Name | Required | Type | Description |Default |
| --- | ---- | ---- | ---- | ---- |
| <nobr>`-config-dir`</nobr>| Required when <nobr>`-config-file`</nobr> is not set | string | Specifies a directory containing CTS instance configuration files to load on startup. Files must be in HCL or JSON format. You can specify the flag multiple times to load more than one directory of files. | none |
| <nobr>`-config-file`</nobr> | Required when `-config-dir` is not set | string | Specifies the CTS instance configuration file to load on startup. Files must be in HCL or JSON format. You can specify the flag multiple times to load more than one file. | none |
| <nobr>`-inspect`</nobr> | Optional | boolean | Starts CTS in inspect mode . In inspect mode, CTS displays the proposed state changes for all tasks once and exits. No changes are applied. Refer to [Modes](#modes) for additional information. If an error occurs before displaying all changes, CTS exits with a non-zero status. | `false` |
| <nobr>`-inspect-task`</nobr> | Optional | string | Starts CTS in inspect mode for the specified task. CTS displays the proposed state changes for the specified task and exits. No changes are applied. <br/>You can specify the flag multiple times to display more than one task. <br/>If an error occurs before displaying all changes, CTS exits with a non-zero status. | none |
| <nobr>`-once`</nobr> | Optional | boolean | Starts CTS in once-mode. In once-mode, CTS renders templates, runs tasks once, and disables buffer periods. Refer to [Modes](#modes) for additional information. | `false` |
| <nobr>`-reset-storage`</nobr> | Optional | boolean | <EnterpriseAlert inline /> Directs CTS to overwrite the state storage with new state information when the instance you are starting is elected the cluster leader. <br/>Only use this flag when running CTS in [high availability mode](/docs/nia/usage/run-ha). | `false` |
| `-h`, `-help` | Optional | boolean | Prints the CTS command line help. | `false` |
## Modes
By default, CTS starts in long-running mode. The following table describes all available CTS modes.
| Name | Description |How to start |
| --- | --- | --- |
| <nobr>Long-running mode</nobr> | CTS starts in once-mode and switches to a long-running process. <p>During the once-mode phase, the daemon exits with a non-zero status if it encounters an error.</p><p>After successfully operating in once-mode, CTS begins a long-running process.</p><p> When the long-running process begins, the CTS daemon serves API and command requests.</p><p>If an error occurs, CTS logs it and continues running.</p> | No additional flags. <br/>This is the default mode. |
| Once-mode | In once-mode, CTS renders templates and runs tasks once. CTS does not start the process in long-running mode and disables buffer periods. <p>Use once-mode before starting CTS in long-running mode to verify that your configuration is accurate and tasks update network infrastructure as expected.</p> | Add the `-once` flag when starting CTS. |
| Inspect mode | CTS displays the proposed state changes for all tasks once and exits. No changes are applied. If an error occurs before displaying all changes, CTS exits with a non-zero status. <p>Use inspect mode before starting CTS in long-running mode to debug one or more tasks and to verify that your tasks will update network infrastructure as expected.</p> | Add the `-inspect` flag to verify all tasks. <p>Add the `-inspect-task` flag to inspect a single task. Use multiple flags to verify more than one task.</p> |
| <nobr>High availability mode</nobr> | A long-running process that ensures that all changes to Consul that occur during a failover transition are processed and that CTS continues to operate as expected. CTS logs the errors and continues to operate without interruption. Refer to [Run Consul-Terraform-Sync with High Availability](/docs/nia/usage/run-ha) for additional information. | Add the `high_availability` block to your CTS instance configuration. <p>Refer to [Run Consul-Terraform-Sync with High Availability](/docs/nia/usage/run-ha) for additional information.</p> |

View File

@ -7,7 +7,7 @@ description: >-
# Compatibility
The following tables list the Consul-Terraform-Sync (CTS) version compatibility for Consul, Terraform, and Vault.
This topic describes Consul-Terraform-Sync (CTS) cross-compatibility with Consul, Terraform, and Vault.
## Consul
@ -26,8 +26,10 @@ CTS integration with Terraform is supported for the following:
| CTS Version | Terraform CLI Version | Terraform Cloud Version | Terraform Enterprise Version |
| :------------------ | :-------------------- | :---------------------- | :--------------------------- |
| CTS Enterprise 0.7 | 0.13-1.2 | Latest | v202010-2 - Latest |
| CTS Enterprise 0.4+ | 0.13 - 1.1 | Latest | v202010-2 - Latest |
| CTS Enterprise 0.3 | 0.13 - 1.1 | N/A | v202010-2 - Latest |
| CTS OSS 0.7 | 0.13 - 1.2 | N/A | N/A |
| CTS OSS 0.3+ | 0.13 - 1.1 | N/A | N/A |
| CTS OSS 0.2 | 0.13 - 1.0 | N/A | N/A |
| CTS OSS 0.1 | 0.13 - 0.14 | N/A | N/A |

View File

@ -5,11 +5,11 @@ description: >-
Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module and a running Consul Cluster outside of the consul-terraform-sync daemon.
---
# Configuration Options for Consul-Terraform-Sync
# Consul-Terraform-Sync Configuration
The Consul-Terraform-Sync (CTS) daemon is configured using configuration files and supports [HashiCorp Configuration Language](https://github.com/hashicorp/hcl) (HCL) and JSON file formats.
This topic contains configuration reference information for Consul-Terraform-Sync (CTS). Pass configuration settings in an HCL or JSON configuration file to configure the CTS daemon. Refer to the [HashiCorp Configuration Language](https://github.com/hashicorp/hcl) to learn the HCL syntax.
## Global Config Options
## Global configurations
Top level options are reserved for configuring CTS.
@ -44,7 +44,7 @@ tls {
- `max` - (string: "20s") The maximum period of time to wait after changes are detected before triggering related tasks. If `min` is set, the default period for `max` is 4 times the value of `min`.
- `log_level` - (string: "INFO") The log level to use for CTS logging. This defaults to "INFO". The available log levels are "TRACE", "DEBUG", "INFO", "WARN", and "ERR".
- `port` - (int: 8558) The port for CTS to use to serve API requests.
- `id` (string: Generated ID with the format `cts-<hostname>`) The ID of the CTS instance. Used as the service ID for CTS if service registration is enabled.
- `id` (string: Generated ID with the format `cts-<hostname>`) The ID of the CTS instance. CTS uses the ID as the service ID for CTS if service registration is enabled. CTS also uses the ID to identify the instance in a high availability cluster.
- `syslog` - Specifies the syslog server for logging.
- `enabled` - (bool) Enable syslog logging. Specifying other option also enables syslog logging.
- `facility` - (string: "local0") Name of the syslog facility to log to.
@ -82,7 +82,7 @@ license {
| `auto_retrieval` | Optional | object | Configures the license auto-retrieval used by CTS. To learn more, review [Auto-Retrieval](/docs/nia/configuration#auto-retrieval) for details | Review [Auto-Retrieval](/docs/nia/configuration#auto-retrieval) for defaults. |
#### Auto-Retrieval
### Auto-retrieval
You can use the `auto_retrieval` block to configure the the automatic license retrieval in CTS. When enabled, CTS attempts to retrieve a new license from its configured Consul Enterprise backend once a day. If CTS cannot retrieve a license and the current license is reaching its expiration date, CTS attempts to retrieve a license with increased frequency, as defined by the [License Expiration Date Handling](/docs/nia/enterprise/license#license-expiration-handling).
@ -92,7 +92,7 @@ You can use the `auto_retrieval` block to configure the the automatic license re
| --------- | -------- | ---- | ----------- | ------- |
| `enabled` | Optional | string | If set to true, enables license auto-retrieval | true |
## Consul
## Consul connection
The `consul` block configures the CTS connection with a Consul agent so that CTS can perform queries for task execution. It also configures the automatic registration of CTS as a service with Consul.
@ -127,7 +127,7 @@ consul {
| `transport` | Optional | [transport](/docs/nia/configuration#transport) | Low-level network connection details ||
| `service_registration` | Optional| [service_registration](/docs/nia/configuration#service-registration) | Options for how CTS will register itself as a service with a health check to Consul. ||
##### ACL Requirements
### ACL requirements
The following table describes the ACL policies required by CTS. For more information, refer to the [Secure Consul-Terraform-Sync for Production](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS#configure-acl-privileges-for-consul-terraform-sync) tutorial.
| Policy | Resources |
@ -140,7 +140,7 @@ The following table describes the ACL policies required by CTS. For more informa
| `keys:write` | `consul-terraform-sync/` Only required when using Consul as the [Terraform backend](/docs/nia/configuration#backend). |
#### Auth
### Auth
Configures HTTP basic authentication for communicating with Consul.
| Parameter | Required | Type | Description | Default |
@ -149,7 +149,8 @@ Configures HTTP basic authentication for communicating with Consul.
| `username` | Optional | string | Username for authentication| none |
| `password` | Optional | string | Password for authentication| none |
#### TLS
### TLS
Configure TLS to use a secure client connection with Consul. Using HTTP/2 can solve issues related to hitting Consul's maximum connection limits, as well as improve efficiency when processing many blocking queries. This option is required for Consul-Terraform-Sync when connecting to a [Consul agent with TLS verification enabled for HTTPS connections](/docs/agent/config/config-files#verify_incoming).
If Consul is using a self-signed certificate that you have not added to the global CA chain, you can set this certificate with `ca_cert` or `ca_path`. Alternatively, you can disable SSL verification by setting `verify` to false. However, disabling verification is a potential security vulnerability.
@ -164,7 +165,7 @@ If Consul is using a self-signed certificate that you have not added to the glob
| `key` | Optional | string | The path to the PEM-encoded private key file used with the client certificate configured by `cert`. Required if Consul has `verify_incoming` set to true.<br/><br/>Can also be provided through the `CONSUL_CLIENT_KEY` environment variable. | none |
| `server_name` | Optional | string | The server name to use as the Server Name Indication (SNI) for Consul when connecting via TLS.<br/><br/>Can also be provided through the `CONSUL_TLS_SERVER_NAME` environment variable. | none |
#### Transport
### Transport
Configures the low-level network connection details to Consul.
To achieve the shortest latency between a Consul service update to a task execution, configure `max_idle_conns_per_host` equal to or greater than the number of services in automation across all tasks. This value should be lower than the configured [`http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) for the Consul agent.
@ -182,15 +183,15 @@ If `max_idle_conns_per_host` and the number of services in automation is greater
| `tls_handshake_timeout` | Optional | string | The amount of time to wait to complete the TLS handshake. | `10s`|
#### Service Registration
### Service registration
CTS automatically registers itself with Consul as a service with a health check, using the [`id`](/docs/nia/configuration#id) configuration as the service ID. CTS deregisters itself with Consul when CTS stops gracefully. If CTS is unable to register with Consul, then it will log the error and continue without exiting.
Service registration requires that the [Consul token](/docs/nia/configuration#consul) has an ACL policy of `service:write` for the CTS service.
| Parameter | Required | Type | Description | Default |
| --------- | -------- | ---- | ----------- | ------- |
| `enabled` | Optional | boolean | Enables CTS to register itself as a service with Consul.| `true` |
| `service_name` | Optional | string | The service name for CTS. | `Consul-Terraform-Sync` |
| `enabled` | Optional | boolean | Enables CTS to register itself as a service with Consul. When service registration is enabled for a [CTS instance configured for high availability](/docs/nia/usage/run-ha), the instance also registers itself with a new tag using the `cts-cluster:<cluster-name>` format. | `true` |
| `service_name` | Optional | string | The service name for CTS. We recommended specifying the same name used for [`high_availability.cluster.name`](#high-availability-cluster) value if [CTS is configured for high availability](/docs/nia/usage/run-ha). | `consul-terraform-sync` |
| `address` | Optional | string | The IP address or hostname for CTS. | IP address of the Consul agent node |
| `namespace` | Optional | string | <EnterpriseAlert inline /> The namespace to register CTS in. | In order of precedence: <br/> 1. Inferred from the CTS ACL token <br/> 2. The `default` namespace. |
| `default_check.enabled` | Optional | boolean | Enables CTS to create the default health check. | `true` |
@ -213,9 +214,38 @@ The default health check is an [HTTP check](/docs/discovery/checks#http-interval
| Timeout | `2s` |
| TLSSkipVerify | `false` |
## High availability
Add a `high_availability` block to your configuration to enable CTS to run in high availability mode. Refer to [Run Consul-Terrform-Sync with High Availability](/docs/nia/usage/run-ha) for additional information. The `high_availability` block contains the following configuration items.
### High availability cluster
The `cluster` parameter contains configurations for the cluster you want to operate with high availability enabled. You can configure the following options:
| Parameter | Description| Required | Type |
| --------- | ---------- | -------- | ------|
| `name` | Specifies the name of the cluster operating with high availability enabled. | Required | String |
| `storage` | Configures how CTS stores state information. Refer to [State storage and persistence](/docs/nia/architecture#state-storage-and-persistence) for additional information. You can define storage for the `"consul"` resource. Refer to [High availability cluster storage](#high-availability-cluster-storage) for additional information. | Optional | Object |
#### High availability cluster storage
The `high_availability.cluster.storage` object contains the following configurations.
| Parameter | Description| Required | Type |
| --------- | ---------- | -------- | ------|
| `parent_path` | Defines a parent path in the Consul KV for CTS to store state information. Default is `consul-terraform-sync/`. CTS automatically appends the cluster name to the parent path, so the effective default directory for state information is `consul-terraform-sync/<cluster-name>`. | Optional | String |
| `namespace` | Specifies the namespace to use when storing state in the Consul KV. Default is inferred from the CTS ACL token. The fallback default is `default`. | Optional | String |
| `session_ttl` | Specifies the session time-to-live for leader elections. You must specify a value greater than the `session_ttl_min` configured for Consul. A longer `session_ttl` results in a longer leader election after a failover. Default is `15s`. | Optional | String |
### High availability instance
The `instance` parameter is an object that contains configurations unique to the CTS instance. You specify the following configurations:
- `address`: (Optional) String value that specifies the IP address of the CTS instance to advertise to other instances. This parameter does not have a default value.
## Service
~> Deprecated in CTS 0.5.0 and will be removed in a future major release. `service` blocks are used to define the `task` block's `services` fields, which were also deprecated and replaced with [Services Condition](/docs/nia/configuration#services-condition) and [Services Module Input](/docs/nia/configuration#services-module-input). `service` block configuration can be replaced by configuring the equivalent fields of the corresponding Services Condition and Services Module Input. See [0.5.0 release notes](/docs/nia/release-notes/0-5-0#deprecate-service-block) for examples.
~> **Note:** Deprecated in CTS 0.5.0 and will be removed in a future major release. `service` blocks are used to define the `task` block's `services` fields, which were also deprecated and replaced with [Services Condition](/docs/nia/configuration#services-condition) and [Services Module Input](/docs/nia/configuration#services-module-input). `service` block configuration can be replaced by configuring the equivalent fields of the corresponding Services Condition and Services Module Input. Refer to [0.5.0 release notes](/docs/nia/release-notes/0-5-0#deprecate-service-block) for examples.
A `service` block is an optional block to explicitly define the services configured in the `task` block's `services` field (deprecated). `service` blocks do not define services configured in the `task` block's `condition "services"` or `module_input "services` blocks.
@ -241,7 +271,7 @@ service {
## Task
A `task` block configures which task to execute in automation. When the task should execute can be determined by the `service` block (deprecated) and `condition` block. The `task` block may be specified multiple times to configure multiple tasks.
A `task` block configures which task to execute in automation. Use the `condition` block to specify when the task executes. You can specify the `task` block multiple times to configure multiple tasks, or you can omit it entirely. If task blocks are not specified in your initial configuration, you can add them to a running CTS instance by using the [`/tasks` API endpoint](/docs/nia/api/tasks#tasks) or the [CLI's `task` command](/docs/nia/cli/task#task).
```hcl
task {
@ -624,6 +654,7 @@ driver "terraform" {
- Supported backend options: [azurerm](https://www.terraform.io/docs/backends/types/azurerm.html), [consul](https://www.terraform.io/docs/backends/types/consul.html), [cos](https://www.terraform.io/docs/backends/types/cos.html), [gcs](https://www.terraform.io/docs/backends/types/gcs.html), [kubernetes](https://www.terraform.io/docs/backends/types/kubernetes.html), [local](https://www.terraform.io/docs/backends/types/local.html), [manta](https://www.terraform.io/docs/backends/types/manta.html), [pg](https://www.terraform.io/docs/backends/types/pg.html) (Terraform v0.14+), [s3](https://www.terraform.io/docs/backends/types/s3.html). Visit the Terraform documentation links for details on backend configuration options.
- If omitted, CTS will generate default values and use configurations from the [`consul` block](#consul) to configure [Consul as the backend](https://www.terraform.io/docs/backends/types/consul.html), which stores Terraform statefiles in the Consul KV. The [ACL token provided for Consul authentication](#consul) is used to read and write to the KV store and requires [Consul KV privileges](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS#configure-acl-privileges-for-consul-terraform-sync). The Consul KV path is the base path to store state files for tasks. The full path of each state file will have the task identifier appended to the end of the path, e.g. `consul-terraform-sync/terraform-env:task-name`.
- The remote enhanced backend is not supported with the Terraform driver to run operations in Terraform Cloud. Use the [Terraform Cloud driver](#terraform-cloud-driver) to integrate CTS with Terraform Cloud for remote workspaces and remote operations.
- The `local` backend type is not supported with CTS instances configured for high availability. If high availability is configured and the Terraform backend type is `local`, CTS logs an error and exits.
- `log` - (bool) Enable all Terraform output (stderr and stdout) to be included in the CTS log. This is useful for debugging and development purposes. It may be difficult to work with log aggregators that expect uniform log format.
- `path` - (string) The file path to install Terraform or discover an existing Terraform binary. If omitted, Terraform will be installed in the same directory as the CTS daemon. To resolve an incompatible Terraform version or to change versions will require removing the existing binary or change to a different path.
- `persist_log` - (bool) Enable trace logging for each Terraform client to disk per task. This is equivalent to setting `TF_LOG_PATH=<work_dir>/terraform.log`. Trace log level results in verbose logging and may be useful for debugging and development purposes. We do not recommend enabling this for production. There is no log rotation and may quickly result in large files.

View File

@ -1,129 +0,0 @@
---
layout: docs
page_title: Requirements
description: >-
Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module, and a running Consul cluster outside of the `consul-terraform-sync` daemon.
---
# Prerequisites Needed to Run Consul-Terraform-Sync
The following components are required to run Consul-Terraform-Sync (CTS):
* A Terraform Provider
* A Terraform Module
* A Consul cluster running outside of the `consul-terraform-sync` daemon
Practitioners can add support for their network infrastructure through Terraform providers. Once network infrastructure support exists, practitioners can add network integrations in the form of Terraform modules.
The following guidance is for running CTS using the Terraform driver. The Terraform Cloud driver<EnterpriseAlert inline />has [additional prerequisites](/docs/nia/network-drivers/terraform-cloud#setting-up-terraform-cloud-driver).
## Run a Consul Cluster
Below are several steps towards a minimum Consul setup required for running CTS.
### Install Consul
CTS is a daemon that runs alongside Consul, similar to other Consul ecosystem tools like Consul Template. CTS is not included with the Consul binary and needs to be installed separately.
To install a local Consul agent, refer to the [Getting Started: Install Consul Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-install).
For information on compatible Consul versions, refer to the [Consul compatibility matrix](/docs/nia/compatibility#consul).
### Run an Agent
The Consul agent must be running in order to dynamically update network devices. To run the local Consul agent, you can run Consul in development mode which can be started with `consul agent -dev` for simplicity. For more details on running Consul agent, refer to the [Getting Started: Run the Consul Agent Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-agent?in=consul/getting-started).
When running a Consul agent with CTS in production, we suggest to keep a few considerations in mind. CTS uses [blocking queries](/api-docs/features/blocking) to monitor task dependencies, like changes to registered services. This results in multiple long running TCP connections between CTS and the agent to poll changes for each dependency. Monitoring a high number of services may quickly hit the default Consul agent connection limits.
There are 2 ways to fix this issue. The first and recommended fix is to use HTTP/2 (requires HTTPS) to communicate between Consul-Terraform-Sync and the Consul agent. When using HTTP/2 only a single connection is made and reused for all communications. See the [Consul Configuration section](/docs/nia/configuration#consul) for more. The other option is to configure [`limits.http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) for the agent to a reasonable value proportional to the number of services monitored by Consul-Terraform-Sync.
### Register Services
CTS monitors Consul catalog for service changes which lead to downstream changes to your network devices. Without services, your CTS daemon will be operational but idle. You can register services with your Consul agent either by loading a service definition or by HTTP API request.
If you are running Consul in development mode, below is an example of registering a service by HTTP API request:
```shell-session
$ echo '{
"ID": "web",
"Name": "web",
"Address": "10.10.10.10",
"Port": 8000
}' > payload.json
$ curl --request PUT --data @payload.json http://localhost:8500/v1/agent/service/register
```
The above example registers a service named "web" with your Consul agent. This represents a non-existent web service running at 10.10.10.10:8000. Your web service is now available for CTS to consume. You can have CTS monitor the web service to execute a task and update network device(s) by configuring "web" in [`condition "services"`](/docs/nia/configuration#services-condition) of a task block. If the web service has any non-default values, it can also be configured in `condition "services"`.
For more details on registering a service by HTTP API request, refer to the [register service API docs](/api-docs/agent/service#register-service).
For more details on registering a service by loading a service definition, refer to the [Getting Started: Register a Service with Consul Service Discovery Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery?in=consul/getting-started).
### Run a Cluster
The previous steps of installing and running a single Consul agent then registering a single service is sufficient to meaningfully start running CTS.
If you would like to run a Consul cluster rather than a single agent, refer to [Getting Started: Create a Local Consul Datacenter](https://learn.hashicorp.com/tutorials/consul/get-started-create-datacenter?in=consul/getting-started). This will walk you through the steps of running multiple Consul agents and then joining them together into a cluster.
## Network Infrastructure (using a Terraform Provider)
CTS integrations for the Terraform driver utilizes Terraform providers as plugins to interface with specific network infrastructure platforms. The Terraform driver of CTS inherits the expansive collection of Terraform providers to integrate with, and with release of [Terraform 0.13](https://www.hashicorp.com/blog/announcing-hashicorp-terraform-0-13/), this extends to include providers written by the community too by using [provider source](https://www.hashicorp.com/blog/adding-provider-source-to-hashicorp-terraform/).
### Finding Terraform Providers
To find providers for the infrastructure platforms you use, browse the providers section of the [Terraform Registry](https://registry.terraform.io/browse/providers).
### How to Create a Provider
If there is no existing Terraform provider, a new Terraform provider can be [created](https://learn.hashicorp.com/tutorials/terraform/provider-setup) and [published](https://www.terraform.io/docs/registry/providers/publishing.html). The provider can then be used within a network integration task by authoring a compatible Terraform module.
## Network Integration (using a Terraform Module)
The Terraform module for a task in CTS is the core component of the integration. It declares which resources and how your infrastructure is dynamically updated. The module along with how it is configured within a task determines the condition under which your infrastructure is updated.
Working with a Terraform provider, you can write an integration task for CTS by [creating a Terraform module](/docs/nia/terraform-modules) that is compatible with the Terraform driver or use a module built by partners below.
Continue to the next page to [get started with configuring CTS and how to use Terraform providers and modules for tasks.](/docs/nia/installation/configure)
### Partner Terraform Modules
The modules listed below are available to use and are compatible with CTS.
#### A10 Networks
- Dynamic Load Balancing with Group Member Updates: [Terraform Registry](https://registry.terraform.io/modules/a10networks/service-group-sync-nia/thunder/latest) / [GitHub](https://github.com/a10networks/terraform-thunder-service-group-sync-nia)
#### Avi Networks
- Scale Up and Scale Down Pool and Pool Members (Servers): [GitHub](https://github.com/vmware/terraform-provider-avi/tree/20.1.5/modules/nia/pool)
#### AWS Application Load Balancer (ALB)
- Create Listener Rule and Target Group for an AWS ALB, Forward Traffic to Consul Ingress Gateway: [Terraform Registry](https://registry.terraform.io/modules/aws-quickstart/cts-alb_listener-nia/hashicorp/latest) / [GitHub](https://github.com/aws-quickstart/terraform-hashicorp-cts-alb_listener-nia)
#### Checkpoint
- Dynamic Firewalling with Address Object Updates: [Terraform Registry](https://registry.terraform.io/modules/CheckPointSW/dynobj-nia/checkpoint/latest) / [GitHub](https://github.com/CheckPointSW/terraform-checkpoint-dynobj-nia)
#### Cisco ACI
- Policy Based Redirection: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/autoscaling-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-autoscaling-nia)
- Create and Update Cisco ACI Endpoint Security Groups: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/esg-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-esg-nia)
#### Citrix ADC
- Create, Update and Delete Service Groups in Citrix ADC: [Terraform Registry](https://registry.terraform.io/modules/citrix/servicegroup-consul-sync-nia/citrixadc/latest) / [GitHub](https://github.com/citrix/terraform-citrixadc-servicegroup-consul-sync-nia)
#### F5
- Dynamic Load Balancing with Pool Member Updates: [Terraform Registry](https://registry.terraform.io/modules/f5devcentral/app-consul-sync-nia/bigip/latest) / [GitHub](https://github.com/f5devcentral/terraform-bigip-app-consul-sync-nia)
#### NS1
- Create, Delete and Update DNS Records and Zones: [Terraform Registry](https://registry.terraform.io/modules/ns1-terraform/record-sync-nia/ns1/latest) / [GitHub](https://github.com/ns1-terraform/terraform-ns1-record-sync-nia)
#### Palo Alto Networks
- Dynamic Address Group (DAG) Tags: [Terraform Registry](https://registry.terraform.io/modules/PaloAltoNetworks/dag-nia/panos/latest) / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-dag-nia)
- Address Group and Dynamic Address Group (DAG) Tags: [Terraform Registry](https://registry.terraform.io/modules/PaloAltoNetworks/ag-dag-nia/panos/latest) / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-ag-dag-nia)

View File

@ -1,34 +0,0 @@
---
layout: docs
page_title: Run Consul-Terraform-Sync
description: >-
Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module and a running Consul Cluster outside of the `consul-terraform-sync` daemon.
---
# Run Consul-Terraform-Sync
1. Move the `consul-terraform-sync` binary to a location available on your `PATH`.
```shell-session
$ mv ~/Downloads/consul-terraform-sync /usr/local/bin/consul-terraform-sync
```
2. Create the config.hcl file, all the options are available [here](/docs/nia/configuration).
3. Run Consul-Terraform-Sync (CTS).
```shell-session
$ consul-terraform-sync start -config-file <config.hcl>
```
4. Check status of tasks. Replace port number if configured in Step 2. See additional API endpoints [here](/docs/nia/api)
```shell-session
$ curl localhost:8558/status/tasks
```
## Other Run modes
CTS allows you to inspect your configuration before applying any change and to run in once mode, meaning that you can verify the changes are correctly applied in a test run before running it in unsupervised daemon mode.
To learn more on these options check the [Consul-Terraform-Sync Run Modes and Status Inspection](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-run-and-inspect?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial.

View File

@ -0,0 +1,143 @@
---
layout: docs
page_title: Error Messages
description: >-
Look up Consul-Terraform-Sync error message to learn how to resolve potential issues using CTS.
---
# Error Messages
This topic explains error messages you may encounter when using Consul-Terraform-Sync (CTS).
## Example error log messages
If you configured the CTS cluster to run in [high availability mode](/docs/nia/usage/run-ha) and the the local module is missing, then the following message appears in the log:
```shell-session
[ERROR] ha.compat: error="compatibility check failure: stat ./example-module: no such file or directory"
```
The resolution is to add the missing local module on the incompatible CTS instance. Refer to the [`module` documentation](/docs/nia/configuration#module) in the CTS configuration reference for additional information.
## Example API and CLI error messages
**Error**:
```json
{
"error": {
"message": "redirect requests to leader 'cts-01' at cts-01.example.com:8558"
}
}
```
**Conditions**:
- CTS can determine the leader.
- `high_availability.instance.address` is configured for the leader.
- The CTS instance you sent the request to is not the leader.
**Resolution**:
Redirect the request to the leader instance, for example:
```shell-session
$ curl --request GET cts-01.example.com:8558/v1/tasks
```
---
**Error**:
```json
{
"error": {
"message": "redirect requests to leader 'cts-01'"
}
}
```
**Conditions**:
* CTS can determine the leader.
* The CTS instance you sent the request to is not the leader.
* `high_availability.instance.address` is not configured.
**Resolution**:
Identify the leader instance address and redirect the request to the leader. You can identify the leader by calling the [`status/cluster` API endpoint](/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
```shell-session
[INFO] ha: acquired leadership lock: id=<ID-OF-CTS-INSTANCE>.
We recommend deploying a cluster that has three instances.
---
**Error**:
```json
{
"error": {
"message": "redirect requests to leader"
}
}
```
**Conditions**:
* The CTS instance you sent the request to is not the leader.
* The CTS is unable to determine the leader.
* Note that these conditions are rare.
**Resolution**:
Identify and send the request to the leader CTS instance. You can identify the leader by calling the [`status/cluster` API endpoint](/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
```shell-session
[INFO] ha: acquired leadership lock: id=<ID-OF-CTS-INSTANCE>
```
---
**Error**:
```json
{
"error": {
"message": "this endpoint is only available with high availability configured"
}
}
```
**Conditions**:
- You called the [`status/cluster` API endpoint](/docs/nia/api/status#cluster-status) without configuring CTS for [high availability](/docs/nia/usage/run-ha).
**Resolution**:
Configure CTS to run in [high availability mode](/docs/nia/usage/run-ha).
---
**Error**:
```json
{
"error": {
"message": "example error message: unsupported status parameter value"
}
}
```
**Conditions**:
- You sent a request to the `status` API endpoint.
- The request included an unsupported parameter value.
**Resolution**:
Send a new request and verify that all of the parameter values are correct.

View File

@ -0,0 +1,134 @@
---
layout: docs
page_title: Requirements
description: >-
Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module, and a running Consul cluster outside of the `consul-terraform-sync` daemon.
---
# Requirements
The following components are required to run Consul-Terraform-Sync (CTS):
- A Terraform provider
- A Terraform module
- A Consul cluster running outside of the `consul-terraform-sync` daemon
You can add support for your network infrastructure through Terraform providers so that you can apply Terraform modules to implement network integrations.
The following guidance is for running CTS using the Terraform driver. The Terraform Cloud driver<EnterpriseAlert inline /> has [additional prerequisites](/docs/nia/network-drivers/terraform-cloud#setting-up-terraform-cloud-driver).
## Run a Consul cluster
Below are several steps towards a minimum Consul setup required for running CTS.
### Install Consul
CTS is a daemon that runs alongside Consul, similar to other Consul ecosystem tools like Consul Template. CTS is not included with the Consul binary and needs to be installed separately.
To install a local Consul agent, refer to the [Getting Started: Install Consul Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-install).
For information on compatible Consul versions, refer to the [Consul compatibility matrix](/docs/nia/compatibility#consul).
### Run an agent
The Consul agent must be running in order to dynamically update network devices. Refer to the [Consul agent documentation](/docs/agent/index) for information about configuring and starting a Consul agent. For hands-on instructions about running Consul agents, refer to the [Getting Started: Run the Consul Agent Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-agent?in=consul/getting-started).
When running a Consul agent with CTS in production, consider that CTS uses [blocking queries](/api-docs/features/blocking) to monitor task dependencies, such as changes to registered services. This results in multiple long-running TCP connections between CTS and the agent to poll changes for each dependency. Consul may quickly reach the agent connection limits if CTS is monitoring a high number of services.
To avoid reaching the limit prematurely, we recommend using HTTP/2 (requires HTTPS) to communicate between CTS and the Consul agent. When using HTTP/2, CTS establishes a single connection and reuses it for all communication. Refer to the [Consul Configuration section](/docs/nia/configuration#consul) for details.
Alternatively, you can configure the [`limits.http_max_conns_per_client`](/docs/agent/config/config-files#http_max_conns_per_client) option to set a maximimum number of connections to meet your needs.
### Register services
CTS monitors the Consul catalog for service changes that lead to downstream changes to your network devices. Without services, your CTS daemon is operational but idle. You can register services with your Consul agent by either loading a service definition or by sending an HTTP API request.
The following HTTP API request example registers a service named `web` with your Consul agent:
```shell-session
$ echo '{
"ID": "web",
"Name": "web",
"Address": "10.10.10.10",
"Port": 8000
}' > payload.json
$ curl --request PUT --data @payload.json http://localhost:8500/v1/agent/service/register
```
The example represents a non-existent web service running at `10.10.10.10:8000` that is now available for CTS to consume.
You can configure CTS to monitor the web service, execute a task, and update network device(s) by configuring `web` in the [`condition "services"`](/docs/nia/configuration#services-condition) task block. If the web service has any non-default values, it can also be configured in `condition "services"`.
For more details on registering a service using the HTTP API endpoint, refer to the [register service API docs](/api-docs/agent/service#register-service).
For hands-on instructions on registering a service by loading a service definition, refer to the [Getting Started: Register a Service with Consul Service Discovery Tutorial](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery?in=consul/getting-started).
### Run a cluster
For production environments, we recommend operating a Consul cluster rather than a single agent. Refer to [Getting Started: Create a Local Consul Datacenter](https://learn.hashicorp.com/tutorials/consul/get-started-create-datacenter?in=consul/getting-started) for instructions on starting multiple Consul agents and joining them into a cluster.
## Network infrastructure using a Terraform provider
CTS integrations for the Terraform driver use Terraform providers as plugins to interface with specific network infrastructure platforms. The Terraform driver for CTS inherits the expansive collection of Terraform providers to integrate with. You can also specify a provider `source` in the [`required_providers` configuration](https://www.terraform.io/language/providers/requirements#requiring-providers) to use providers written by the community (requires Terraform 0.13 or later).
### Finding Terraform providers
To find providers for the infrastructure platforms you use, browse the providers section of the [Terraform Registry](https://registry.terraform.io/browse/providers).
### How to create a provider
If a Terraform provider does not exist for your environment, you can create a new Terraform provider and publish it to the registry so that you can use it within a network integration task or create a compatible Terraform module. Refer to the following Terraform tutorial and documentation for additional information on creating and publishing providers:
- [Setup and Implement Read](https://learn.hashicorp.com/tutorials/terraform/provider-setup)
- [Publishing Providers](https://www.terraform.io/docs/registry/providers/publishing.html).
## Network integration using a Terraform module
The Terraform module for a task in CTS is the core component of the integration. It declares which resources to use and how your infrastructure is dynamically updated. The module, along with how it is configured within a task, determines the conditions under which your infrastructure is updated.
Working with a Terraform provider, you can write an integration task for CTS by [creating a Terraform module](/docs/nia/terraform-modules) that is compatible with the Terraform driver. You can also use a [module built by partners](#partner-terraform-modules).
Refer to [Configuration](/docs/nia/configuration) for information about configuring CTS and how to use Terraform providers and modules for tasks.
### Partner Terraform Modules
The modules listed below are available to use and are compatible with CTS.
#### A10 Networks
- Dynamic Load Balancing with Group Member Updates: [Terraform Registry](https://registry.terraform.io/modules/a10networks/service-group-sync-nia/thunder/latest) / [GitHub](https://github.com/a10networks/terraform-thunder-service-group-sync-nia)
#### Avi Networks
- Scale Up and Scale Down Pool and Pool Members (Servers): [GitHub](https://github.com/vmware/terraform-provider-avi/tree/20.1.5/modules/nia/pool)
#### AWS Application Load Balancer (ALB)
- Create Listener Rule and Target Group for an AWS ALB, Forward Traffic to Consul Ingress Gateway: [Terraform Registry](https://registry.terraform.io/modules/aws-quickstart/cts-alb_listener-nia/hashicorp/latest) / [GitHub](https://github.com/aws-quickstart/terraform-hashicorp-cts-alb_listener-nia)
#### Checkpoint
- Dynamic Firewalling with Address Object Updates: [Terraform Registry](https://registry.terraform.io/modules/CheckPointSW/dynobj-nia/checkpoint/latest) / [GitHub](https://github.com/CheckPointSW/terraform-checkpoint-dynobj-nia)
#### Cisco ACI
- Policy Based Redirection: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/autoscaling-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-autoscaling-nia)
- Create and Update Cisco ACI Endpoint Security Groups: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/esg-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-esg-nia)
#### Citrix ADC
- Create, Update, and Delete Service Groups in Citrix ADC: [Terraform Registry](https://registry.terraform.io/modules/citrix/servicegroup-consul-sync-nia/citrixadc/latest) / [GitHub](https://github.com/citrix/terraform-citrixadc-servicegroup-consul-sync-nia)
#### F5
- Dynamic Load Balancing with Pool Member Updates: [Terraform Registry](https://registry.terraform.io/modules/f5devcentral/app-consul-sync-nia/bigip/latest) / [GitHub](https://github.com/f5devcentral/terraform-bigip-app-consul-sync-nia)
#### NS1
- Create, Delete, and Update DNS Records and Zones: [Terraform Registry](https://registry.terraform.io/modules/ns1-terraform/record-sync-nia/ns1/latest) / [GitHub](https://github.com/ns1-terraform/terraform-ns1-record-sync-nia)
#### Palo Alto Networks
- Dynamic Address Group (DAG) Tags: [Terraform Registry](https://registry.terraform.io/modules/PaloAltoNetworks/dag-nia/panos/latest) / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-dag-nia)
- Address Group and Dynamic Address Group (DAG) Tags: [Terraform Registry](https://registry.terraform.io/modules/PaloAltoNetworks/ag-dag-nia/panos/latest) / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-ag-dag-nia)

View File

@ -0,0 +1,181 @@
---
layout: docs
page_title: Run Consul-Terraform-Sync with High Availability
description: >-
Improve network automation resiliency by enabling high availability for Consul-Terraform-Sync. HA enables persistent task and event data so that CTS functions as expected during a failover event.
---
# Run Consul-Terraform-Sync with High Availability
<EnterpriseAlert>
Licenses are only required for Consul-Terraform-Sync (CTS) Enterprise
</EnterpriseAlert>
This topic describes how to run Consul-Terraform-Sync (CTS) configured for high availability. High availability is an enterprise capability that ensures that all changes to Consul that occur during a failover transition are processed and that CTS continues to operate as expected.
## Introduction
A network always has exactly one instance of the CTS cluster that is the designated leader. The leader is responsible for monitoring and running tasks. If the leader fails, CTS triggers the following process when it is configured for high availability:
1. The CTS cluster promotes a new leader from the pool of followers in the network.
1. The new leader begins running all existing tasks in `once-mode` in order to process changes that occurred during the failover transition period. In this mode, CTS runs all existing tasks one time.
1. The new leader logs any errors that occur during `once-mode` operation and the new leader continues to monitor Consul for changes.
In a standard configuration, CTS exits if errors occur when the CTS instance runs tasks in `once-mode`. In a high availability configuration, CTS logs the errors and continues to operate without interruption.
The following diagram shows operating state when high availability is enabled. CTS Instance A is the current leader and is responsible for monitoring and running tasks:
![Consul-Terraform-Sync architecture configured for high availability before a shutdown event](/img/nia/cts-ha-before.svg)
The following diagram shows the CTS cluster state after the leader stops. CTS Instance B becomes the leader responsible for monitoring and running tasks.
![Consul-Terraform-Sync architecture configured for high availability before a shutdown event](/img/nia/cts-ha-after.svg)
### Failover details
- The time it takes for a new leader to be elected is determined by the `high_availability.cluster.storage.session_ttl` configuration. The minimum failover time is equal to the `session_ttl` value. The maximum failover time is double the `session_ttl` value.
- If failover occurs during task execution, a new leader is elected. The new leader will attempt to run all tasks once before continuing to monitor for changes.
- If using the [Terraform Cloud (TFC) driver](/docs/nia/network-drivers/terraform-cloud), the task finishes and CTS starts a new leader that attempts to queue a run for each task in TFC in once-mode.
- If using [Terraform driver](/docs/nia/network-drivers/terraform), the task may complete depending on the cause of the failover. The new leader starts and attempts to run each task in [once-mode](/docs/nia/cli/start#modes). Depending on the module and provider, the task may require manual intervention to fix any inconsistencies between the infrastructure and Terraform state.
- If failover occurs when no task is executing, CTS elects a new leader that attempts to run all tasks in once-mode.
Note that driver behavior is consistent whether or not CTS is running in high availability mode.
## Requirements
Verify that you have met the [basic requirements](/docs/nia/usage/requirements) for running CTS.
* CTS Enterprise 0.7 or later
* Terraform CLI 0.13 or later
* All instances in a cluster must be in the same datacenter.
You must configure appropriate ACL permissions for your cluster. Refer to [ACL permissions](#) for details.
We recommend specifying the [TFC driver](/docs/nia/network-drivers/terraform-cloud) in your CTS configuration if you want to run in high availability mode.
## Configuration
Add the `high_availability` block in your CTS configuration and configure the required settings to enable high availability. Refer to the [Configuration reference](/docs/nia/configuration#high-availability) for details about the configuration fields for the `high_availability` block.
The following example configures high availability functionality for a cluster named `cts-cluster`:
<CodeBlockConfig filename="cts-config.hcl">
```hcl
high_availability {
cluster {
name = "cts-cluster"
storage "consul" {
parent_path = "cts"
namespace = "ns"
session_ttl = "30s"
}
}
instance {
address = "cts-01.example.com"
}
}
```
</CodeBlockConfig>
### ACL permissions
The `session` and `keys` resources in your Consul environment must have `write` permissions. Refer to the [ACL documentation](/docs/security/acl) for details on how to define ACL policies.
If the `high_availability.cluster.storage.namespace` field is configured, then your ACL policy must also enable `write` permissions for the `namespace` resource.
## Start a new CTS cluster
We recommend deploying a cluster that includes three CTS instances. This is so that the cluster has one leader and two followers.
1. Create an HCL configuration file that includes the settings you want to include, including the `high_availability` block. Refer to [Configuration Options for Consul-Terraform-Sync](/docs/nia/configuration) for all configuration options.
1. Issue the startup command and pass the configuration file. Refer to the [`start` command reference](/docs/nia/cli/start#modes) for additional information about CTS startup modes.
```shell-session
$ consul-terraform-sync start -config-file ha-config.hcl
```
1. You can call the `/status` API endpoint to verify the status of tasks CTS is configured to monitor. Only the leader of the cluster will return a successful response. Refer to the [`/status` API reference documentation](/docs/nia/api/status) for information about usage and responses.
```shell-session
$ curl localhost:<port>/status/tasks
```
Repeat the procedure to start the remaining instances for your cluster. We recommend using near-identical configurations for all instances in your cluster. You may not be able to use exact configurations in all cases, but starting instances with the same configuration improves consistency and reduces confusion if you need to troubleshoot errors.
## Modify an instance configuration
You can implement a rolling update to update a non-task configuration for a CTS instance, such as the Consul connection settings. If you need to update a task in the instance configuration, refer to [Modify tasks](#modify-tasks).
1. Identify the leader CTS instance by either making a call to the [`status/cluster` API endpoint](/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
```shell-session
[INFO] ha: acquired leadership lock: id=<ID-OF-CTS-INSTANCE>
```
1. Stop one of the follower CTS instances and apply the new configuration.
1. Restart the follower instance.
1. Repeat steps 2 and 3 for other follower instances in your cluster.
1. Stop the leader instance. One of the follower instances becomes the leader.
1. Apply the new configuration to the former leader instance and restart it.
## Modify tasks
When high availability is enabled, CTS persists task and event data. Refer to [State storage and persistence](/docs/nia/architecture#state-storage-and-persistence) for additional information.
You can use the following methods for modifying tasks when high availability is enabled. We recommend choosing a single method to make all task configuration changes because inconsistencies between the state and the configuration can occur when mixing methods.
### Delete and recreate the task (recommended)
Use the CTS API to identify the CTS leader instance as well as delete and replace a task.
1. Identify the leader CTS instance by either making a call to the [`status/cluster` API endpoint](/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
```shell-session
[INFO] ha: acquired leadership lock: id=<ID-OF-CTS-INSTANCE>
```
1. Send a `DELETE` call to the [`/task/<task-name>` endpoint](/docs/nia/api/tasks#delete-task) to delete the task. In the following example, the leader instance is at `localhost:8558`:
```shell-session
$ curl --request DELETE localhost:8558/v1/tasks/task_a
```
You can also use the [`task delete` command](/docs/nia/cli/task#task-delete) to complete this step.
1. Send a `POST` call to the `/task/<task-name>` endpoint and include the updated task in your payload.
```shell-session
$curl --header "Content-Type: application/json" \
--request POST \
--data @payload.json \
localhost:8558/v1/tasks
```
You can also use the [`task-create` command](/docs/nia/cli/task#task-create) to complete this step.
### Discard data with the `-reset-storage` flag
You can restart the CTS cluster using the [`-reset-storage` flag](/docs/nia/cli/start#options) to discard persisted data if you need to update a task.
1. Stop a follower instance.
1. Update the instances task configuration.
1. Restart the instance and include the `-reset-storage` flag.
1. Stop all other instances so that the updated instance becomes the leader.
1. Start all other instances again.
1. Restart the instance you restarted in step 3 without the `-reset-storage` flag so that it starts up with the current state. If you continue to run an instance with the `-reset-storage` flag enabled, then CTS will reset the state data whenever the instance becomes the leader.
## Troubleshooting
Use the following troubleshooting procedure if a previous leader had been running a task successfully but the new leader logs an error after a failover:
1. Check the logs printed to the console for errors. Refer to the [`syslog` configuration](/docs/nia/configuration#syslog) for information on how to locate the logs. In the following example output, CTS reported a `401: Bad credentials` error:
```shell-session
2022-08-23T09:25:09.501-0700 [ERROR] tasksmanager: error applying task: task_name=config-task
error=
| error tf-apply for 'config-task': exit status 1
|
| Error: GET https://api.github.com/user: 401 Bad credentials []
|
| with module.config-task.provider["registry.terraform.io/integrations/github"],
| on .terraform/modules/config-task/main.tf line 11, in provider "github":
| 11: provider "github" {
|
```
1. Check for differences between the previous leader and new leader, such as differences in configurations, environment variables, and local resources.
1. Start a new instance with the fix that resolves the issue.
1. Tear down the leader instance that has the issue and any other instances that may have the same issue.
1. Restart the affected instances to implement the fix.

View File

@ -0,0 +1,41 @@
---
layout: docs
page_title: Run Consul-Terraform-Sync
description: >-
Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module and a running Consul Cluster outside of the `consul-terraform-sync` daemon.
---
# Run Consul-Terraform-Sync
This topic describes the basic procedure for running Consul-Terraform-Sync (CTS). Verify that you have met the [basic requirements](/docs/nia/usage/requirements) before attempting to run CTS.
1. Move the `consul-terraform-sync` binary to a location available on your `PATH`.
```shell-session
$ mv ~/Downloads/consul-terraform-sync /usr/local/bin/consul-terraform-sync
```
2. Create the config.hcl file and configure the options for your use case. Refer to the [configuration reference](/docs/nia/configuration) for details about all CTS configurations.
3. Run Consul-Terraform-Sync (CTS).
```shell-session
$ consul-terraform-sync start -config-file <config.hcl>
```
4. Check status of tasks. Replace port number if configured in Step 2. Refer to [Consul-Terraform-Sync API](/docs/nia/api) for additional information.
```shell-session
$ curl localhost:8558/status/tasks
```
## Other Run modes
You can [configure CTS for high availability](/docs/nia/usage/run-ha), which is an enterprise capability that ensures that all changes to Consul that occur during a failover transition are processed and that CTS continues to operate as expected.
You can start CTS in [inspect mode](/docs/nia/cli/start#modes) to review and test your configuration before applying any changes. Inspect mode allows you to verify that the changes work as expected before running them in an unsupervised daemon mode.
For hands-on instructions on using inspect mode, refer to the [Consul-Terraform-Sync Run Modes and Status Inspection](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-run-and-inspect?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial.

View File

@ -794,16 +794,29 @@
"path": "nia/installation/install"
},
{
"title": "Requirements",
"path": "nia/installation/requirements"
},
{
"title": "Configure",
"title": "Configuration",
"path": "nia/installation/configure"
}
]
},
{
"title": "Usage",
"routes": [
{
"title": "Requirements",
"path": "nia/usage/requirements"
},
{
"title": "Run Consul-Terraform-Sync",
"path": "nia/installation/run"
"path": "nia/usage/run"
},
{
"title": "Run Consul-Terraform-Sync with High Availability",
"path": "nia/usage/run-ha"
},
{
"title": "Error Messages",
"path": "nia/usage/errors-ref"
}
]
},

BIN
website/public/img/nia/cts-ha-after.png (Stored with Git LFS) Normal file

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More