Merge pull request #9849 from hashicorp/b-cc-ig-id

consul/connect: Enable running multiple ingress gateways per Nomad agent
This commit is contained in:
Seth Hoenig 2021-01-20 10:08:14 -06:00 committed by GitHub
commit 08e323b753
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 227 additions and 22 deletions

View file

@ -3,6 +3,7 @@
BUG FIXES:
* consul/connect: Fixed a bug where gateway proxy connection default timeout not set [[GH-9851](https://github.com/hashicorp/nomad/pull/9851)]
* consul/connect: Fixed a bug preventing more than one connect gateway per Nomad client [[GH-9849](https://github.com/hashicorp/nomad/pull/9849)]
* scheduler: Fixed a bug where shared ports were not persisted during inplace updates for service jobs. [[GH-9830](https://github.com/hashicorp/nomad/issues/9830)]
## 1.0.2 (January 14, 2020)

View file

@ -336,28 +336,34 @@ func (h *envoyBootstrapHook) grpcAddress(env map[string]string) string {
}
}
func (h *envoyBootstrapHook) proxyServiceID(group string, service *structs.Service) string {
return agentconsul.MakeAllocServiceID(h.alloc.ID, "group-"+group, service)
}
func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
tgName string,
service *structs.Service,
group string, service *structs.Service,
grpcAddr, envoyAdminBind, siToken, filepath string,
) envoyBootstrapArgs {
var (
sidecarForID string // sidecar only
gateway string // gateway only
proxyID string // gateway only
)
if service.Connect.HasSidecar() {
sidecarForID = agentconsul.MakeAllocServiceID(h.alloc.ID, "group-"+tgName, service)
sidecarForID = h.proxyServiceID(group, service)
}
if service.Connect.IsGateway() {
gateway = "ingress" // more types in the future
proxyID = h.proxyServiceID(group, service)
}
h.logger.Debug("bootstrapping envoy",
"sidecar_for", service.Name, "bootstrap_file", filepath,
"sidecar_for_id", sidecarForID, "grpc_addr", grpcAddr,
"admin_bind", envoyAdminBind, "gateway", gateway,
"proxy_id", proxyID,
)
return envoyBootstrapArgs{
@ -367,6 +373,7 @@ func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
envoyAdminBind: envoyAdminBind,
siToken: siToken,
gateway: gateway,
proxyID: proxyID,
}
}
@ -380,6 +387,7 @@ type envoyBootstrapArgs struct {
envoyAdminBind string
siToken string
gateway string // gateways only
proxyID string // gateways only
}
// args returns the CLI arguments consul needs in the correct order, with the
@ -395,11 +403,15 @@ func (e envoyBootstrapArgs) args() []string {
}
if v := e.sidecarFor; v != "" {
arguments = append(arguments, "-sidecar-for", e.sidecarFor)
arguments = append(arguments, "-sidecar-for", v)
}
if v := e.gateway; v != "" {
arguments = append(arguments, "-gateway", e.gateway)
arguments = append(arguments, "-gateway", v)
}
if v := e.proxyID; v != "" {
arguments = append(arguments, "-proxy-id", v)
}
if v := e.siToken; v != "" {

View file

@ -7,6 +7,7 @@ package taskrunner
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -173,6 +174,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
grpcAddr: "1.1.1.1",
envoyAdminBind: "localhost:3333",
gateway: "my-ingress-gateway",
proxyID: "_nomad-task-803cb569-881c-b0d8-9222-360bcc33157e-group-ig-ig-8080",
}
result := ebArgs.args()
require.Equal(t, []string{"connect", "envoy",
@ -181,6 +183,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
"-admin-bind", "localhost:3333",
"-bootstrap",
"-gateway", "my-ingress-gateway",
"-proxy-id", "_nomad-task-803cb569-881c-b0d8-9222-360bcc33157e-group-ig-ig-8080",
}, result)
})
}
@ -229,6 +232,7 @@ type envoyConfig struct {
} `json:"admin"`
Node struct {
Cluster string `json:"cluster"`
ID string `json:"id"`
Metadata struct {
Namespace string `json:"namespace"`
Version string `json:"envoy_version"`
@ -514,9 +518,12 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
var out envoyConfig
require.NoError(t, json.NewDecoder(f).Decode(&out))
// the only interesting thing on bootstrap is the presence of the cluster,
// everything is configured at runtime through xDS
require.Equal(t, "my-ingress-service", out.Node.Cluster)
// The only interesting thing on bootstrap is the presence of the cluster,
// and its associated ID that Nomad sets. Everything is configured at runtime
// through xDS.
expID := fmt.Sprintf("_nomad-task-%s-group-web-my-ingress-service-9999", alloc.ID)
require.Equal(t, expID, out.Node.ID)
require.Equal(t, "ingress-gateway", out.Node.Cluster)
}
// TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook

View file

@ -16,20 +16,6 @@ import (
"github.com/stretchr/testify/require"
)
const (
// envConsulToken is the consul http token environment variable
envConsulToken = "CONSUL_HTTP_TOKEN"
// demoConnectJob is the example connect enabled job useful for testing
demoConnectJob = "connect/input/demo.nomad"
// demoConnectNativeJob is the example connect native enabled job useful for testing
demoConnectNativeJob = "connect/input/native-demo.nomad"
// demoConnectIngressGateway is the example ingress gateway job useful for testing
demoConnectIngressGateway = "connect/input/ingress-gateway.nomad"
)
type ConnectACLsE2ETest struct {
framework.TC

View file

@ -8,6 +8,23 @@ import (
"github.com/hashicorp/nomad/helper/uuid"
)
const (
// envConsulToken is the consul http token environment variable
envConsulToken = "CONSUL_HTTP_TOKEN"
// demoConnectJob is the example connect enabled job useful for testing
demoConnectJob = "connect/input/demo.nomad"
// demoConnectNativeJob is the example connect native enabled job useful for testing
demoConnectNativeJob = "connect/input/native-demo.nomad"
// demoConnectIngressGateway is the example ingress gateway job useful for testing
demoConnectIngressGateway = "connect/input/ingress-gateway.nomad"
// demoConnectMultiIngressGateway is the example multi ingress gateway job useful for testing
demoConnectMultiIngressGateway = "connect/input/multi-ingress.nomad"
)
type ConnectE2ETest struct {
framework.TC
jobIds []string
@ -92,3 +109,14 @@ func (tc *ConnectE2ETest) TestConnectIngressGatewayDemo(f *framework.F) {
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
}
func (tc *ConnectE2ETest) TestConnectMultiIngressGatewayDemo(f *framework.F) {
t := f.T()
jobID := connectJobID()
tc.jobIds = append(tc.jobIds, jobID)
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectMultiIngressGateway, jobID, "")
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
}

View file

@ -0,0 +1,171 @@
job "multi-ingress" {
datacenters = ["dc1"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "gateways" {
network {
mode = "bridge"
port "inbound1" {
static = 8081
to = 8081
}
port "inbound2" {
static = 8082
to = 8082
}
port "inbound3" {
static = 8083
to = 8083
}
}
service {
name = "ig1"
port = "8081"
connect {
gateway {
ingress {
listener {
port = 8081
protocol = "tcp"
service {
name = "api1"
}
}
}
}
}
}
service {
name = "ig2"
port = "8082"
connect {
gateway {
ingress {
listener {
port = 8082
protocol = "tcp"
service {
name = "api2"
}
}
}
}
}
}
service {
name = "ig3"
port = "8083"
connect {
gateway {
ingress {
listener {
port = 8083
protocol = "tcp"
service {
name = "api3"
}
}
}
}
}
}
}
group "api1" {
network {
mode = "host"
port "api" {}
}
service {
name = "api1"
port = "api"
connect {
native = true
}
}
task "api1" {
driver = "docker"
config {
image = "hashicorpnomad/uuid-api:v5"
network_mode = "host"
}
env {
BIND = "0.0.0.0"
PORT = "${NOMAD_PORT_api}"
}
}
}
group "api2" {
network {
mode = "host"
port "api" {}
}
service {
name = "api2"
port = "api"
connect {
native = true
}
}
task "api2" {
driver = "docker"
config {
image = "hashicorpnomad/uuid-api:v5"
network_mode = "host"
}
env {
BIND = "0.0.0.0"
PORT = "${NOMAD_PORT_api}"
}
}
}
group "api3" {
network {
mode = "host"
port "api" {}
}
service {
name = "api3"
port = "api"
connect {
native = true
}
}
task "api3" {
driver = "docker"
config {
image = "hashicorpnomad/uuid-api:v5"
network_mode = "host"
}
env {
BIND = "0.0.0.0"
PORT = "${NOMAD_PORT_api}"
}
}
}
}