consul/connect: Enable running multiple ingress gateways per Nomad agent
Connect ingress gateway services were being registered into Consul without an explicit deterministic service ID. Consul would generate one automatically, but then Nomad would have no way to register a second gateway on the same agent as it would not supply 'proxy-id' during envoy bootstrap. Set the ServiceID for gateways, and supply 'proxy-id' when doing envoy bootstrap. Fixes #9834
This commit is contained in:
parent
1bebf475c6
commit
991884e715
|
@ -3,6 +3,7 @@
|
|||
BUG FIXES:
|
||||
|
||||
* consul/connect: Fixed a bug where gateway proxy connection default timeout not set [[GH-9851](https://github.com/hashicorp/nomad/pull/9851)]
|
||||
* consul/connect: Fixed a bug prevening more than one connect gateway per Nomad client [[GH-9849](https://github.com/hashicorp/nomad/pull/9849)]
|
||||
* scheduler: Fixed a bug where shared ports were not persisted during inplace updates for service jobs. [[GH-9830](https://github.com/hashicorp/nomad/issues/9830)]
|
||||
|
||||
## 1.0.2 (January 14, 2020)
|
||||
|
|
|
@ -336,28 +336,34 @@ func (h *envoyBootstrapHook) grpcAddress(env map[string]string) string {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *envoyBootstrapHook) proxyServiceID(group string, service *structs.Service) string {
|
||||
return agentconsul.MakeAllocServiceID(h.alloc.ID, "group-"+group, service)
|
||||
}
|
||||
|
||||
func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
|
||||
tgName string,
|
||||
service *structs.Service,
|
||||
group string, service *structs.Service,
|
||||
grpcAddr, envoyAdminBind, siToken, filepath string,
|
||||
) envoyBootstrapArgs {
|
||||
var (
|
||||
sidecarForID string // sidecar only
|
||||
gateway string // gateway only
|
||||
proxyID string // gateway only
|
||||
)
|
||||
|
||||
if service.Connect.HasSidecar() {
|
||||
sidecarForID = agentconsul.MakeAllocServiceID(h.alloc.ID, "group-"+tgName, service)
|
||||
sidecarForID = h.proxyServiceID(group, service)
|
||||
}
|
||||
|
||||
if service.Connect.IsGateway() {
|
||||
gateway = "ingress" // more types in the future
|
||||
proxyID = h.proxyServiceID(group, service)
|
||||
}
|
||||
|
||||
h.logger.Debug("bootstrapping envoy",
|
||||
"sidecar_for", service.Name, "bootstrap_file", filepath,
|
||||
"sidecar_for_id", sidecarForID, "grpc_addr", grpcAddr,
|
||||
"admin_bind", envoyAdminBind, "gateway", gateway,
|
||||
"proxy_id", proxyID,
|
||||
)
|
||||
|
||||
return envoyBootstrapArgs{
|
||||
|
@ -367,6 +373,7 @@ func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
|
|||
envoyAdminBind: envoyAdminBind,
|
||||
siToken: siToken,
|
||||
gateway: gateway,
|
||||
proxyID: proxyID,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,6 +387,7 @@ type envoyBootstrapArgs struct {
|
|||
envoyAdminBind string
|
||||
siToken string
|
||||
gateway string // gateways only
|
||||
proxyID string // gateways only
|
||||
}
|
||||
|
||||
// args returns the CLI arguments consul needs in the correct order, with the
|
||||
|
@ -402,6 +410,10 @@ func (e envoyBootstrapArgs) args() []string {
|
|||
arguments = append(arguments, "-gateway", e.gateway)
|
||||
}
|
||||
|
||||
if v := e.proxyID; v != "" {
|
||||
arguments = append(arguments, "-proxy-id", e.proxyID)
|
||||
}
|
||||
|
||||
if v := e.siToken; v != "" {
|
||||
arguments = append(arguments, "-token", v)
|
||||
}
|
||||
|
|
|
@ -173,6 +173,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
|
|||
grpcAddr: "1.1.1.1",
|
||||
envoyAdminBind: "localhost:3333",
|
||||
gateway: "my-ingress-gateway",
|
||||
proxyID: "_nomad-task-803cb569-881c-b0d8-9222-360bcc33157e-group-ig-ig-8080",
|
||||
}
|
||||
result := ebArgs.args()
|
||||
require.Equal(t, []string{"connect", "envoy",
|
||||
|
@ -181,6 +182,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
|
|||
"-admin-bind", "localhost:3333",
|
||||
"-bootstrap",
|
||||
"-gateway", "my-ingress-gateway",
|
||||
"-proxy-id", "_nomad-task-803cb569-881c-b0d8-9222-360bcc33157e-group-ig-ig-8080",
|
||||
}, result)
|
||||
})
|
||||
}
|
||||
|
@ -516,7 +518,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
|
|||
|
||||
// the only interesting thing on bootstrap is the presence of the cluster,
|
||||
// everything is configured at runtime through xDS
|
||||
require.Equal(t, "my-ingress-service", out.Node.Cluster)
|
||||
require.Equal(t, "ingress-gateway", out.Node.Cluster)
|
||||
}
|
||||
|
||||
// TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook
|
||||
|
|
|
@ -16,20 +16,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// envConsulToken is the consul http token environment variable
|
||||
envConsulToken = "CONSUL_HTTP_TOKEN"
|
||||
|
||||
// demoConnectJob is the example connect enabled job useful for testing
|
||||
demoConnectJob = "connect/input/demo.nomad"
|
||||
|
||||
// demoConnectNativeJob is the example connect native enabled job useful for testing
|
||||
demoConnectNativeJob = "connect/input/native-demo.nomad"
|
||||
|
||||
// demoConnectIngressGateway is the example ingress gateway job useful for testing
|
||||
demoConnectIngressGateway = "connect/input/ingress-gateway.nomad"
|
||||
)
|
||||
|
||||
type ConnectACLsE2ETest struct {
|
||||
framework.TC
|
||||
|
||||
|
|
|
@ -8,6 +8,23 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// envConsulToken is the consul http token environment variable
|
||||
envConsulToken = "CONSUL_HTTP_TOKEN"
|
||||
|
||||
// demoConnectJob is the example connect enabled job useful for testing
|
||||
demoConnectJob = "connect/input/demo.nomad"
|
||||
|
||||
// demoConnectNativeJob is the example connect native enabled job useful for testing
|
||||
demoConnectNativeJob = "connect/input/native-demo.nomad"
|
||||
|
||||
// demoConnectIngressGateway is the example ingress gateway job useful for testing
|
||||
demoConnectIngressGateway = "connect/input/ingress-gateway.nomad"
|
||||
|
||||
// demoConnectMultiIngressGateway is the example multi ingress gateway job useful for testing
|
||||
demoConnectMultiIngressGateway = "connect/input/multi-ingress.nomad"
|
||||
)
|
||||
|
||||
type ConnectE2ETest struct {
|
||||
framework.TC
|
||||
jobIds []string
|
||||
|
@ -92,3 +109,14 @@ func (tc *ConnectE2ETest) TestConnectIngressGatewayDemo(f *framework.F) {
|
|||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) TestConnectMultiIngressGatewayDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectMultiIngressGateway, jobID, "")
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
job "multi-ingress" {
|
||||
|
||||
datacenters = ["dc1"]
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "gateways" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "inbound1" {
|
||||
static = 8081
|
||||
to = 8081
|
||||
}
|
||||
port "inbound2" {
|
||||
static = 8082
|
||||
to = 8082
|
||||
}
|
||||
port "inbound3" {
|
||||
static = 8083
|
||||
to = 8083
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ig1"
|
||||
port = "8081"
|
||||
connect {
|
||||
gateway {
|
||||
ingress {
|
||||
listener {
|
||||
port = 8081
|
||||
protocol = "tcp"
|
||||
service {
|
||||
name = "api1"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ig2"
|
||||
port = "8082"
|
||||
connect {
|
||||
gateway {
|
||||
ingress {
|
||||
listener {
|
||||
port = 8082
|
||||
protocol = "tcp"
|
||||
service {
|
||||
name = "api2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ig3"
|
||||
port = "8083"
|
||||
connect {
|
||||
gateway {
|
||||
ingress {
|
||||
listener {
|
||||
port = 8083
|
||||
protocol = "tcp"
|
||||
service {
|
||||
name = "api3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "api1" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "api" {}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "api1"
|
||||
port = "api"
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
}
|
||||
|
||||
task "api1" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/uuid-api:v5"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
BIND = "0.0.0.0"
|
||||
PORT = "${NOMAD_PORT_api}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "api2" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "api" {}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "api2"
|
||||
port = "api"
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
}
|
||||
|
||||
task "api2" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/uuid-api:v5"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
BIND = "0.0.0.0"
|
||||
PORT = "${NOMAD_PORT_api}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "api3" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "api" {}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "api3"
|
||||
port = "api"
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
}
|
||||
|
||||
task "api3" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/uuid-api:v5"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
BIND = "0.0.0.0"
|
||||
PORT = "${NOMAD_PORT_api}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue