Merge pull request #11566 from hashicorp/ap/ingress
OSS Backport: Allow ingress gateways to target other partitions
This commit is contained in:
commit
f4cbde4086
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: **(Enterprise only)** Allow ingress gateways to target services in another partition
|
||||
```
|
|
@ -155,7 +155,7 @@ func makeUpstream(g *structs.GatewayService) structs.Upstream {
|
|||
upstream := structs.Upstream{
|
||||
DestinationName: g.Service.Name,
|
||||
DestinationNamespace: g.Service.NamespaceOrDefault(),
|
||||
DestinationPartition: g.Gateway.PartitionOrDefault(),
|
||||
DestinationPartition: g.Service.PartitionOrDefault(),
|
||||
LocalBindPort: g.Port,
|
||||
IngressHosts: g.Hosts,
|
||||
// Pass the protocol that was configured on the ingress listener in order
|
||||
|
@ -232,6 +232,7 @@ func (s *handlerIngressGateway) generateIngressDNSSANs(snap *ConfigSnapshot) []s
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(partitions): How should these be updated for partitions?
|
||||
for ns := range namespaces {
|
||||
// The default namespace is special cased in DNS resolution, so special
|
||||
// case it here.
|
||||
|
|
|
@ -266,10 +266,7 @@ func (e *IngressGatewayConfigEntry) Validate() error {
|
|||
|
||||
declaredHosts := make(map[string]bool)
|
||||
serviceNames := make(map[ServiceID]struct{})
|
||||
for i, s := range listener.Services {
|
||||
if err := validateInnerEnterpriseMeta(&s.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
|
||||
return fmt.Errorf("services[%d]: %w", i, err)
|
||||
}
|
||||
for _, s := range listener.Services {
|
||||
sn := NewServiceName(s.Name, &s.EnterpriseMeta)
|
||||
if err := s.RequestHeaders.Validate(listener.Protocol); err != nil {
|
||||
return fmt.Errorf("request headers %s (service %q on listener on port %d)", err, sn.String(), listener.Port)
|
||||
|
|
|
@ -103,12 +103,14 @@ type IngressService struct {
|
|||
// using a "tcp" listener.
|
||||
Hosts []string
|
||||
|
||||
// Referencing other partitions is not supported.
|
||||
|
||||
// Namespace is the namespace where the service is located.
|
||||
// Namespacing is a Consul Enterprise feature.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Partition is the partition where the service is located.
|
||||
// Partitioning is a Consul Enterprise feature.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// TLS allows specifying some TLS configuration per listener.
|
||||
TLS *GatewayServiceTLSConfig `json:",omitempty"`
|
||||
|
||||
|
|
|
@ -157,8 +157,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
|
||||
require.Len(t, readIngress.Listeners, 1)
|
||||
require.Len(t, readIngress.Listeners[0].Services, 1)
|
||||
// Set namespace to blank so that OSS and ent can utilize the same tests
|
||||
// Set namespace and partition to blank so that OSS and ent can utilize the same tests
|
||||
readIngress.Listeners[0].Services[0].Namespace = ""
|
||||
readIngress.Listeners[0].Services[0].Partition = ""
|
||||
|
||||
require.Equal(t, ingress1.Listeners, readIngress.Listeners)
|
||||
case "bar":
|
||||
|
@ -168,8 +169,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) {
|
|||
require.Equal(t, ingress2.Name, readIngress.Name)
|
||||
require.Len(t, readIngress.Listeners, 1)
|
||||
require.Len(t, readIngress.Listeners[0].Services, 1)
|
||||
// Set namespace to blank so that OSS and ent can utilize the same tests
|
||||
// Set namespace and partition to blank so that OSS and ent can utilize the same tests
|
||||
readIngress.Listeners[0].Services[0].Namespace = ""
|
||||
readIngress.Listeners[0].Services[0].Partition = ""
|
||||
|
||||
require.Equal(t, ingress2.Listeners, readIngress.Listeners)
|
||||
}
|
||||
|
|
|
@ -964,7 +964,8 @@ func TestDecodeConfigEntry(t *testing.T) {
|
|||
"Services": [
|
||||
{
|
||||
"Name": "web",
|
||||
"Namespace": "foo"
|
||||
"Namespace": "foo",
|
||||
"Partition": "bar"
|
||||
},
|
||||
{
|
||||
"Name": "db"
|
||||
|
@ -1001,6 +1002,7 @@ func TestDecodeConfigEntry(t *testing.T) {
|
|||
{
|
||||
Name: "web",
|
||||
Namespace: "foo",
|
||||
Partition: "bar",
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
|
|
|
@ -30,6 +30,11 @@ type Intention struct {
|
|||
SourceNS, SourceName string
|
||||
DestinationNS, DestinationName string
|
||||
|
||||
// SourcePartition and DestinationPartition cannot be wildcards "*" and
|
||||
// are not compatible with legacy intentions.
|
||||
SourcePartition string
|
||||
DestinationPartition string
|
||||
|
||||
// SourceType is the type of the value for the source.
|
||||
SourceType IntentionSourceType
|
||||
|
||||
|
@ -363,8 +368,8 @@ func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *
|
|||
func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := c.c.newRequest("PUT", "/v1/connect/intentions/exact")
|
||||
r.setWriteOptions(q)
|
||||
r.params.Set("source", maybePrefixNamespace(ixn.SourceNS, ixn.SourceName))
|
||||
r.params.Set("destination", maybePrefixNamespace(ixn.DestinationNS, ixn.DestinationName))
|
||||
r.params.Set("source", maybePrefixNamespaceAndPartition(ixn.SourcePartition, ixn.SourceNS, ixn.SourceName))
|
||||
r.params.Set("destination", maybePrefixNamespaceAndPartition(ixn.DestinationPartition, ixn.DestinationNS, ixn.DestinationName))
|
||||
r.obj = ixn
|
||||
rtt, resp, err := c.c.doRequest(r)
|
||||
if err != nil {
|
||||
|
@ -380,11 +385,17 @@ func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta,
|
|||
return wm, nil
|
||||
}
|
||||
|
||||
func maybePrefixNamespace(ns, name string) string {
|
||||
if ns == "" {
|
||||
func maybePrefixNamespaceAndPartition(part, ns, name string) string {
|
||||
switch {
|
||||
case part == "" && ns == "":
|
||||
return name
|
||||
case part == "" && ns != "":
|
||||
return ns + "/" + name
|
||||
case part != "" && ns == "":
|
||||
return part + "/" + IntentionDefaultNamespace + "/" + name
|
||||
default:
|
||||
return part + "/" + ns + "/" + name
|
||||
}
|
||||
return ns + "/" + name
|
||||
}
|
||||
|
||||
// IntentionCreate will create a new intention. The ID in the given
|
||||
|
|
|
@ -33,6 +33,8 @@ func TestAPI_ConnectIntentionCreateListGetUpdateDelete(t *testing.T) {
|
|||
ixn.UpdatedAt = actual.UpdatedAt
|
||||
ixn.CreateIndex = actual.CreateIndex
|
||||
ixn.ModifyIndex = actual.ModifyIndex
|
||||
ixn.SourcePartition = actual.SourcePartition
|
||||
ixn.DestinationPartition = actual.DestinationPartition
|
||||
ixn.Hash = actual.Hash
|
||||
require.Equal(t, ixn, actual)
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ type AdminPartition struct {
|
|||
ModifyIndex uint64 `json:"ModifyIndex,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionDefaultName is the default partition value.
|
||||
const PartitionDefaultName = "default"
|
||||
|
||||
type AdminPartitions struct {
|
||||
Partitions []*AdminPartition
|
||||
}
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
|
||||
"github.com/hashicorp/consul/agent"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/flags"
|
||||
"github.com/hashicorp/consul/command/intention"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func New(ui cli.Ui) *cmd {
|
||||
|
@ -36,12 +36,12 @@ type cmd struct {
|
|||
func (c *cmd) init() {
|
||||
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
c.flags.StringVar(&c.ingressGateway, "ingress-gateway", "",
|
||||
"(Required) The name of the ingress gateway service to use. A namespace "+
|
||||
"can optionally be specified as a prefix via the 'namespace/service' format.")
|
||||
"(Required) The name of the ingress gateway service to use. Namespace and partition "+
|
||||
"can optionally be specified as a prefix via the 'partition/namespace/service' format.")
|
||||
|
||||
c.flags.StringVar(&c.service, "service", "",
|
||||
"(Required) The name of destination service to expose. A namespace "+
|
||||
"can optionally be specified as a prefix via the 'namespace/service' format.")
|
||||
"(Required) The name of destination service to expose. Namespace and partition "+
|
||||
"can optionally be specified as a prefix via the 'partition/namespace/service' format.")
|
||||
|
||||
c.flags.IntVar(&c.port, "port", 0,
|
||||
"(Required) The listener port to use for the service on the Ingress gateway.")
|
||||
|
@ -79,7 +79,7 @@ func (c *cmd) Run(args []string) int {
|
|||
c.UI.Error("A service name must be given via the -service flag.")
|
||||
return 1
|
||||
}
|
||||
svc, svcNamespace, err := intention.ParseIntentionTarget(c.service)
|
||||
svc, svcNS, svcPart, err := intention.ParseIntentionTarget(c.service)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Invalid service name: %s", err))
|
||||
return 1
|
||||
|
@ -89,7 +89,7 @@ func (c *cmd) Run(args []string) int {
|
|||
c.UI.Error("An ingress gateway service must be given via the -ingress-gateway flag.")
|
||||
return 1
|
||||
}
|
||||
gateway, gatewayNamespace, err := intention.ParseIntentionTarget(c.ingressGateway)
|
||||
gateway, gatewayNS, gatewayPart, err := intention.ParseIntentionTarget(c.ingressGateway)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Invalid ingress gateway name: %s", err))
|
||||
return 1
|
||||
|
@ -102,7 +102,9 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
// First get the config entry for the ingress gateway, if it exists. Don't error if it's a 404 as that
|
||||
// just means we'll need to create a new config entry.
|
||||
conf, _, err := client.ConfigEntries().Get(api.IngressGateway, gateway, nil)
|
||||
conf, _, err := client.ConfigEntries().Get(
|
||||
api.IngressGateway, gateway, &api.QueryOptions{Partition: gatewayPart, Namespace: gatewayNS},
|
||||
)
|
||||
if err != nil && !strings.Contains(err.Error(), agent.ConfigEntryNotFoundErr) {
|
||||
c.UI.Error(fmt.Sprintf("Error fetching existing ingress gateway configuration: %s", err))
|
||||
return 1
|
||||
|
@ -111,7 +113,8 @@ func (c *cmd) Run(args []string) int {
|
|||
conf = &api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: gateway,
|
||||
Namespace: gatewayNamespace,
|
||||
Namespace: gatewayNS,
|
||||
Partition: gatewayPart,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +130,8 @@ func (c *cmd) Run(args []string) int {
|
|||
serviceIdx := -1
|
||||
newService := api.IngressService{
|
||||
Name: svc,
|
||||
Namespace: svcNamespace,
|
||||
Namespace: svcNS,
|
||||
Partition: svcPart,
|
||||
Hosts: c.hosts,
|
||||
}
|
||||
for i, listener := range ingressConf.Listeners {
|
||||
|
@ -145,7 +149,7 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
// Make sure the service isn't already exposed in this gateway
|
||||
for j, service := range listener.Services {
|
||||
if service.Name == svc && namespaceMatch(service.Namespace, svcNamespace) {
|
||||
if service.Name == svc && entMetaMatch(service.Namespace, service.Partition, svcNS, svcPart) {
|
||||
serviceIdx = j
|
||||
c.UI.Output(fmt.Sprintf("Updating service definition for %q on listener with port %d", c.service, listener.Port))
|
||||
break
|
||||
|
@ -170,7 +174,7 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
// Write the updated config entry using a check-and-set, so it fails if the entry
|
||||
// has been changed since we looked it up.
|
||||
succeeded, _, err := client.ConfigEntries().CAS(ingressConf, ingressConf.GetModifyIndex(), nil)
|
||||
succeeded, _, err := client.ConfigEntries().CAS(ingressConf, ingressConf.GetModifyIndex(), &api.WriteOptions{Partition: gatewayPart, Namespace: gatewayNS})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing ingress config entry: %v", err))
|
||||
return 1
|
||||
|
@ -194,12 +198,14 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
// Add the intention between the gateway service and the destination.
|
||||
ixn := &api.Intention{
|
||||
SourceName: gateway,
|
||||
SourceNS: gatewayNamespace,
|
||||
DestinationName: svc,
|
||||
DestinationNS: svcNamespace,
|
||||
SourceType: api.IntentionSourceConsul,
|
||||
Action: api.IntentionActionAllow,
|
||||
SourceName: gateway,
|
||||
SourceNS: gatewayNS,
|
||||
SourcePartition: gatewayPart,
|
||||
DestinationName: svc,
|
||||
DestinationNS: svcNS,
|
||||
DestinationPartition: svcPart,
|
||||
SourceType: api.IntentionSourceConsul,
|
||||
Action: api.IntentionActionAllow,
|
||||
}
|
||||
if _, err = client.Connect().IntentionUpsert(ixn, nil); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error upserting intention: %s", err))
|
||||
|
@ -210,17 +216,21 @@ func (c *cmd) Run(args []string) int {
|
|||
return 0
|
||||
}
|
||||
|
||||
func namespaceMatch(a, b string) bool {
|
||||
namespaceA := a
|
||||
namespaceB := b
|
||||
if namespaceA == "" {
|
||||
namespaceA = structs.IntentionDefaultNamespace
|
||||
func entMetaMatch(nsA, partitionA, nsB, partitionB string) bool {
|
||||
if nsA == "" {
|
||||
nsA = api.IntentionDefaultNamespace
|
||||
}
|
||||
if namespaceB == "" {
|
||||
namespaceB = structs.IntentionDefaultNamespace
|
||||
if partitionA == "" {
|
||||
partitionA = api.PartitionDefaultName
|
||||
}
|
||||
if nsB == "" {
|
||||
nsB = api.IntentionDefaultNamespace
|
||||
}
|
||||
if partitionB == "" {
|
||||
partitionB = api.PartitionDefaultName
|
||||
}
|
||||
|
||||
return namespaceA == namespaceB
|
||||
return strings.EqualFold(partitionA, partitionB) && strings.EqualFold(nsA, nsB)
|
||||
}
|
||||
|
||||
func (c *cmd) Synopsis() string {
|
||||
|
|
|
@ -43,6 +43,7 @@ func TestConnectExpose(t *testing.T) {
|
|||
entry, _, err := client.ConfigEntries().Get(api.IngressGateway, "ingress", nil)
|
||||
require.NoError(err)
|
||||
ns := entry.(*api.IngressGatewayConfigEntry).Namespace
|
||||
ap := entry.(*api.IngressGatewayConfigEntry).Partition
|
||||
expected := &api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: "ingress",
|
||||
|
@ -55,6 +56,7 @@ func TestConnectExpose(t *testing.T) {
|
|||
{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Partition: ap,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -95,6 +97,7 @@ func TestConnectExpose(t *testing.T) {
|
|||
{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Partition: ap,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -283,6 +286,7 @@ func TestConnectExpose_existingConfig(t *testing.T) {
|
|||
ingressConf.Namespace = entryConf.Namespace
|
||||
for i, listener := range ingressConf.Listeners {
|
||||
listener.Services[0].Namespace = entryConf.Listeners[i].Services[0].Namespace
|
||||
listener.Services[0].Partition = entryConf.Listeners[i].Services[0].Partition
|
||||
}
|
||||
ingressConf.CreateIndex = entry.GetCreateIndex()
|
||||
ingressConf.ModifyIndex = entry.GetModifyIndex()
|
||||
|
@ -317,6 +321,7 @@ func TestConnectExpose_existingConfig(t *testing.T) {
|
|||
ingressConf.Listeners[1].Services = append(ingressConf.Listeners[1].Services, api.IngressService{
|
||||
Name: "zoo",
|
||||
Namespace: entryConf.Listeners[1].Services[1].Namespace,
|
||||
Partition: entryConf.Listeners[1].Services[1].Partition,
|
||||
Hosts: []string{"foo.com", "foo.net"},
|
||||
})
|
||||
ingressConf.CreateIndex = entry.GetCreateIndex()
|
||||
|
|
|
@ -153,24 +153,26 @@ func (c *cmd) ixnsFromArgs(args []string) ([]*api.Intention, error) {
|
|||
return nil, fmt.Errorf("Must specify two arguments: source and destination")
|
||||
}
|
||||
|
||||
srcName, srcNamespace, err := intention.ParseIntentionTarget(args[0])
|
||||
srcName, srcNS, srcPart, err := intention.ParseIntentionTarget(args[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid intention source: %v", err)
|
||||
}
|
||||
|
||||
dstName, dstNamespace, err := intention.ParseIntentionTarget(args[1])
|
||||
dstName, dstNS, dstPart, err := intention.ParseIntentionTarget(args[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid intention destination: %v", err)
|
||||
}
|
||||
|
||||
return []*api.Intention{{
|
||||
SourceNS: srcNamespace,
|
||||
SourceName: srcName,
|
||||
DestinationNS: dstNamespace,
|
||||
DestinationName: dstName,
|
||||
SourceType: api.IntentionSourceConsul,
|
||||
Action: c.ixnAction(),
|
||||
Meta: c.flagMeta,
|
||||
SourcePartition: srcPart,
|
||||
SourceNS: srcNS,
|
||||
SourceName: srcName,
|
||||
DestinationPartition: dstPart,
|
||||
DestinationNS: dstNS,
|
||||
DestinationName: dstName,
|
||||
SourceType: api.IntentionSourceConsul,
|
||||
Action: c.ixnAction(),
|
||||
Meta: c.flagMeta,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,25 +7,28 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
// ParseIntentionTarget parses a target of the form <namespace>/<name> and returns
|
||||
// the two distinct parts. In some cases the namespace may be elided and this function
|
||||
// will return the empty string for the namespace then.
|
||||
func ParseIntentionTarget(input string) (name string, namespace string, err error) {
|
||||
// Get the index to the '/'. If it doesn't exist, we have just a name
|
||||
// so just set that and return.
|
||||
idx := strings.IndexByte(input, '/')
|
||||
if idx == -1 {
|
||||
// let the agent do token based defaulting of the namespace
|
||||
return input, "", nil
|
||||
// ParseIntentionTarget parses a target of the form <partition>/<namespace>/<name> and returns
|
||||
// the distinct parts. In some cases the partition and namespace may be elided and this function
|
||||
// will return the empty string for them then.
|
||||
// If two parts are present, it is assumed they are namespace/name and not partition/name.
|
||||
func ParseIntentionTarget(input string) (name string, ns string, partition string, err error) {
|
||||
ss := strings.Split(input, "/")
|
||||
switch len(ss) {
|
||||
case 1: // Name only
|
||||
name = ss[0]
|
||||
return
|
||||
case 2: // namespace/name
|
||||
ns = ss[0]
|
||||
name = ss[1]
|
||||
return
|
||||
case 3: // partition/namespace/name
|
||||
partition = ss[0]
|
||||
ns = ss[1]
|
||||
name = ss[2]
|
||||
return
|
||||
default:
|
||||
return "", "", "", fmt.Errorf("input can contain at most two '/'")
|
||||
}
|
||||
|
||||
namespace = input[:idx]
|
||||
name = input[idx+1:]
|
||||
if strings.IndexByte(name, '/') != -1 {
|
||||
return "", "", fmt.Errorf("target can contain at most one '/'")
|
||||
}
|
||||
|
||||
return name, namespace, nil
|
||||
}
|
||||
|
||||
func GetFromArgs(client *api.Client, args []string) (*api.Intention, error) {
|
||||
|
|
|
@ -2,4 +2,5 @@
|
|||
|
||||
export DEFAULT_REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
export REQUIRED_SERVICES="${DEFAULT_REQUIRED_SERVICES}"
|
||||
export REQUIRE_SECONDARY=0
|
||||
export REQUIRE_SECONDARY=0
|
||||
export REQUIRE_PARTITIONS=0
|
|
@ -115,14 +115,19 @@ function assert_proxy_presents_cert_uri {
|
|||
local SERVICENAME=$2
|
||||
local DC=${3:-primary}
|
||||
local NS=${4:-default}
|
||||
local PARTITION=${5:default}
|
||||
|
||||
CERT=$(retry_default get_cert $HOSTPORT)
|
||||
|
||||
echo "WANT SERVICE: ${NS}/${SERVICENAME}"
|
||||
echo "WANT SERVICE: ${PARTITION}/${NS}/${SERVICENAME}"
|
||||
echo "GOT CERT:"
|
||||
echo "$CERT"
|
||||
|
||||
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/${NS}/dc/${DC}/svc/$SERVICENAME"
|
||||
if [[ -z $PARTITION ]] || [[ $PARTITION = "default" ]]; then
|
||||
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/${NS}/dc/${DC}/svc/$SERVICENAME"
|
||||
else
|
||||
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ap/${PARTITION}/ns/${NS}/dc/${DC}/svc/$SERVICENAME"
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_dnssan_in_cert {
|
||||
|
|
|
@ -40,39 +40,39 @@ function network_snippet {
|
|||
}
|
||||
|
||||
function init_workdir {
|
||||
local DC="$1"
|
||||
local CLUSTER="$1"
|
||||
|
||||
if test -z "$DC"
|
||||
if test -z "$CLUSTER"
|
||||
then
|
||||
DC=primary
|
||||
CLUSTER=primary
|
||||
fi
|
||||
|
||||
# Note, we use explicit set of dirs so we don't delete .gitignore. Also,
|
||||
# don't wipe logs between runs as they are already split and we need them to
|
||||
# upload as artifacts later.
|
||||
rm -rf workdir/${DC}
|
||||
mkdir -p workdir/${DC}/{consul,register,envoy,bats,statsd,data}
|
||||
rm -rf workdir/${CLUSTER}
|
||||
mkdir -p workdir/${CLUSTER}/{consul,register,envoy,bats,statsd,data}
|
||||
|
||||
# Reload consul config from defaults
|
||||
cp consul-base-cfg/*.hcl workdir/${DC}/consul/
|
||||
cp consul-base-cfg/*.hcl workdir/${CLUSTER}/consul/
|
||||
|
||||
# Add any overrides if there are any (no op if not)
|
||||
find ${CASE_DIR} -maxdepth 1 -name '*.hcl' -type f -exec cp -f {} workdir/${DC}/consul \;
|
||||
find ${CASE_DIR} -maxdepth 1 -name '*.hcl' -type f -exec cp -f {} workdir/${CLUSTER}/consul \;
|
||||
|
||||
# Copy all the test files
|
||||
find ${CASE_DIR} -maxdepth 1 -name '*.bats' -type f -exec cp -f {} workdir/${DC}/bats \;
|
||||
# Copy DC specific bats
|
||||
cp helpers.bash workdir/${DC}/bats
|
||||
find ${CASE_DIR} -maxdepth 1 -name '*.bats' -type f -exec cp -f {} workdir/${CLUSTER}/bats \;
|
||||
# Copy CLUSTER specific bats
|
||||
cp helpers.bash workdir/${CLUSTER}/bats
|
||||
|
||||
# Add any DC overrides
|
||||
if test -d "${CASE_DIR}/${DC}"
|
||||
# Add any CLUSTER overrides
|
||||
if test -d "${CASE_DIR}/${CLUSTER}"
|
||||
then
|
||||
find ${CASE_DIR}/${DC} -type f -name '*.hcl' -exec cp -f {} workdir/${DC}/consul \;
|
||||
find ${CASE_DIR}/${DC} -type f -name '*.bats' -exec cp -f {} workdir/${DC}/bats \;
|
||||
find ${CASE_DIR}/${CLUSTER} -type f -name '*.hcl' -exec cp -f {} workdir/${CLUSTER}/consul \;
|
||||
find ${CASE_DIR}/${CLUSTER} -type f -name '*.bats' -exec cp -f {} workdir/${CLUSTER}/bats \;
|
||||
fi
|
||||
|
||||
# move all of the registration files OUT of the consul config dir now
|
||||
find workdir/${DC}/consul -type f -name 'service_*.hcl' -exec mv -f {} workdir/${DC}/register \;
|
||||
find workdir/${CLUSTER}/consul -type f -name 'service_*.hcl' -exec mv -f {} workdir/${CLUSTER}/register \;
|
||||
|
||||
# copy the ca-certs for SDS so we can verify the right ones are served
|
||||
mkdir -p workdir/test-sds-server/certs
|
||||
|
@ -80,7 +80,7 @@ function init_workdir {
|
|||
|
||||
if test -d "${CASE_DIR}/data"
|
||||
then
|
||||
cp -r ${CASE_DIR}/data/* workdir/${DC}/data
|
||||
cp -r ${CASE_DIR}/data/* workdir/${CLUSTER}/data
|
||||
fi
|
||||
|
||||
return 0
|
||||
|
@ -157,13 +157,48 @@ function start_consul {
|
|||
-client "0.0.0.0" >/dev/null
|
||||
}
|
||||
|
||||
function start_partitioned_client {
|
||||
local PARTITION=${1:-ap1}
|
||||
|
||||
# Start consul now as setup script needs it up
|
||||
docker_kill_rm consul-${PARTITION}
|
||||
|
||||
license="${CONSUL_LICENSE:-}"
|
||||
# load the consul license so we can pass it into the consul
|
||||
# containers as an env var in the case that this is a consul
|
||||
# enterprise test
|
||||
if test -z "$license" -a -n "${CONSUL_LICENSE_PATH:-}"
|
||||
then
|
||||
license=$(cat $CONSUL_LICENSE_PATH)
|
||||
fi
|
||||
|
||||
sh -c "rm -rf /workdir/${PARTITION}/data"
|
||||
|
||||
# Run consul and expose some ports to the host to make debugging locally a
|
||||
# bit easier.
|
||||
#
|
||||
docker run -d --name envoy_consul-${PARTITION}_1 \
|
||||
--net=envoy-tests \
|
||||
$WORKDIR_SNIPPET \
|
||||
--hostname "consul-${PARTITION}" \
|
||||
--network-alias "consul-${PARTITION}" \
|
||||
-e "CONSUL_LICENSE=$license" \
|
||||
consul-dev agent \
|
||||
-datacenter "primary" \
|
||||
-retry-join "consul-primary" \
|
||||
-grpc-port 8502 \
|
||||
-data-dir "/tmp/consul" \
|
||||
-config-dir "/workdir/${PARTITION}/consul" \
|
||||
-client "0.0.0.0" >/dev/null
|
||||
}
|
||||
|
||||
function pre_service_setup {
|
||||
local DC=${1:-primary}
|
||||
local CLUSTER=${1:-primary}
|
||||
|
||||
# Run test case setup (e.g. generating Envoy bootstrap, starting containers)
|
||||
if [ -f "${CASE_DIR}/${DC}/setup.sh" ]
|
||||
if [ -f "${CASE_DIR}/${CLUSTER}/setup.sh" ]
|
||||
then
|
||||
source ${CASE_DIR}/${DC}/setup.sh
|
||||
source ${CASE_DIR}/${CLUSTER}/setup.sh
|
||||
else
|
||||
source ${CASE_DIR}/setup.sh
|
||||
fi
|
||||
|
@ -184,29 +219,29 @@ function start_services {
|
|||
}
|
||||
|
||||
function verify {
|
||||
local DC=$1
|
||||
if test -z "$DC"; then
|
||||
DC=primary
|
||||
local CLUSTER="$1"
|
||||
if test -z "$CLUSTER"; then
|
||||
CLUSTER="primary"
|
||||
fi
|
||||
|
||||
# Execute tests
|
||||
res=0
|
||||
|
||||
# Nuke any previous case's verify container.
|
||||
docker_kill_rm verify-${DC}
|
||||
docker_kill_rm verify-${CLUSTER}
|
||||
|
||||
echo "Running ${DC} verification step for ${CASE_DIR}..."
|
||||
echo "Running ${CLUSTER} verification step for ${CASE_DIR}..."
|
||||
|
||||
# need to tell the PID 1 inside of the container that it won't be actual PID
|
||||
# 1 because we're using --pid=host so we use TINI_SUBREAPER
|
||||
if docker run --name envoy_verify-${DC}_1 -t \
|
||||
if docker run --name envoy_verify-${CLUSTER}_1 -t \
|
||||
-e TINI_SUBREAPER=1 \
|
||||
-e ENVOY_VERSION \
|
||||
$WORKDIR_SNIPPET \
|
||||
--pid=host \
|
||||
$(network_snippet $DC) \
|
||||
$(network_snippet $CLUSTER) \
|
||||
bats-verify \
|
||||
--pretty /workdir/${DC}/bats ; then
|
||||
--pretty /workdir/${CLUSTER}/bats ; then
|
||||
echogreen "✓ PASS"
|
||||
else
|
||||
echored "⨯ FAIL"
|
||||
|
@ -228,6 +263,11 @@ function capture_logs {
|
|||
then
|
||||
services="$services consul-secondary"
|
||||
fi
|
||||
if is_set $REQUIRE_PARTITIONS
|
||||
then
|
||||
services="$services consul-ap1"
|
||||
fi
|
||||
|
||||
|
||||
if [ -f "${CASE_DIR}/capture.sh" ]
|
||||
then
|
||||
|
@ -247,7 +287,7 @@ function stop_services {
|
|||
# Teardown
|
||||
docker_kill_rm $REQUIRED_SERVICES
|
||||
|
||||
docker_kill_rm consul-primary consul-secondary
|
||||
docker_kill_rm consul-primary consul-secondary consul-ap1
|
||||
}
|
||||
|
||||
function init_vars {
|
||||
|
@ -286,6 +326,10 @@ function run_tests {
|
|||
then
|
||||
init_workdir secondary
|
||||
fi
|
||||
if is_set $REQUIRE_PARTITIONS
|
||||
then
|
||||
init_workdir ap1
|
||||
fi
|
||||
|
||||
global_setup
|
||||
|
||||
|
@ -307,6 +351,10 @@ function run_tests {
|
|||
if is_set $REQUIRE_SECONDARY; then
|
||||
start_consul secondary
|
||||
fi
|
||||
if is_set $REQUIRE_PARTITIONS; then
|
||||
docker_consul "primary" admin-partition create -name ap1 > /dev/null
|
||||
start_partitioned_client ap1
|
||||
fi
|
||||
|
||||
echo "Setting up the primary datacenter"
|
||||
pre_service_setup primary
|
||||
|
@ -315,14 +363,20 @@ function run_tests {
|
|||
echo "Setting up the secondary datacenter"
|
||||
pre_service_setup secondary
|
||||
fi
|
||||
if is_set $REQUIRE_PARTITIONS; then
|
||||
echo "Setting up the non-default partition"
|
||||
pre_service_setup ap1
|
||||
fi
|
||||
|
||||
echo "Starting services"
|
||||
start_services
|
||||
|
||||
# Run the verify container and report on the output
|
||||
echo "Verifying the primary datacenter"
|
||||
verify primary
|
||||
|
||||
if is_set $REQUIRE_SECONDARY; then
|
||||
echo "Verifying the secondary datacenter"
|
||||
verify secondary
|
||||
fi
|
||||
}
|
||||
|
@ -378,7 +432,7 @@ function suite_teardown {
|
|||
docker_kill_rm $(grep "^function run_container_" $self_name | \
|
||||
sed 's/^function run_container_\(.*\) {/\1/g')
|
||||
|
||||
docker_kill_rm consul-primary consul-secondary
|
||||
docker_kill_rm consul-primary consul-secondary consul-ap1
|
||||
|
||||
if docker network inspect envoy-tests &>/dev/null ; then
|
||||
echo -n "Deleting network 'envoy-tests'..."
|
||||
|
@ -402,13 +456,13 @@ function run_container {
|
|||
|
||||
function common_run_container_service {
|
||||
local service="$1"
|
||||
local DC="$2"
|
||||
local CLUSTER="$2"
|
||||
local httpPort="$3"
|
||||
local grpcPort="$4"
|
||||
|
||||
docker run -d --name $(container_name_prev) \
|
||||
-e "FORTIO_NAME=${service}" \
|
||||
$(network_snippet $DC) \
|
||||
$(network_snippet $CLUSTER) \
|
||||
"${HASHICORP_DOCKER_PROXY}/fortio/fortio" \
|
||||
server \
|
||||
-http-port ":$httpPort" \
|
||||
|
@ -420,6 +474,10 @@ function run_container_s1 {
|
|||
common_run_container_service s1 primary 8080 8079
|
||||
}
|
||||
|
||||
function run_container_s1-ap1 {
|
||||
common_run_container_service s1 ap1 8080 8079
|
||||
}
|
||||
|
||||
function run_container_s2 {
|
||||
common_run_container_service s2 primary 8181 8179
|
||||
}
|
||||
|
@ -457,7 +515,7 @@ function run_container_s2-secondary {
|
|||
|
||||
function common_run_container_sidecar_proxy {
|
||||
local service="$1"
|
||||
local DC="$2"
|
||||
local CLUSTER="$2"
|
||||
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
|
@ -465,10 +523,10 @@ function common_run_container_sidecar_proxy {
|
|||
# location?
|
||||
docker run -d --name $(container_name_prev) \
|
||||
$WORKDIR_SNIPPET \
|
||||
$(network_snippet $DC) \
|
||||
$(network_snippet $CLUSTER) \
|
||||
"${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \
|
||||
envoy \
|
||||
-c /workdir/${DC}/envoy/${service}-bootstrap.json \
|
||||
-c /workdir/${CLUSTER}/envoy/${service}-bootstrap.json \
|
||||
-l debug \
|
||||
--disable-hot-restart \
|
||||
--drain-time-s 1 >/dev/null
|
||||
|
@ -477,6 +535,11 @@ function common_run_container_sidecar_proxy {
|
|||
function run_container_s1-sidecar-proxy {
|
||||
common_run_container_sidecar_proxy s1 primary
|
||||
}
|
||||
|
||||
function run_container_s1-ap1-sidecar-proxy {
|
||||
common_run_container_sidecar_proxy s1 ap1
|
||||
}
|
||||
|
||||
function run_container_s1-sidecar-proxy-consul-exec {
|
||||
docker run -d --name $(container_name) \
|
||||
$(network_snippet primary) \
|
||||
|
|
Loading…
Reference in New Issue