Merge pull request #10387 from hashicorp/e2e-consul-namespaces-oss
e2e: add tests for consul namespaces from nomad oss
This commit is contained in:
commit
650b94f89f
|
@ -9,9 +9,13 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
consulNamespace = "default"
|
||||
)
|
||||
|
||||
type ConnectClientStateE2ETest struct {
|
||||
framework.TC
|
||||
jobIds []string
|
||||
jobIDs []string
|
||||
}
|
||||
|
||||
func (tc *ConnectClientStateE2ETest) BeforeAll(f *framework.F) {
|
||||
|
@ -24,10 +28,10 @@ func (tc *ConnectClientStateE2ETest) AfterEach(f *framework.F) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, id := range tc.jobIds {
|
||||
for _, id := range tc.jobIDs {
|
||||
tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||
}
|
||||
tc.jobIds = []string{}
|
||||
tc.jobIDs = []string{}
|
||||
tc.Nomad().System().GarbageCollect()
|
||||
}
|
||||
|
||||
|
@ -35,7 +39,7 @@ func (tc *ConnectClientStateE2ETest) TestClientRestart(f *framework.F) {
|
|||
t := f.T()
|
||||
|
||||
jobID := "connect" + uuid.Generate()[0:8]
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
client := tc.Nomad()
|
||||
consulClient := tc.Consul()
|
||||
|
||||
|
@ -43,18 +47,16 @@ func (tc *ConnectClientStateE2ETest) TestClientRestart(f *framework.F) {
|
|||
"connect/input/demo.nomad", jobID, "")
|
||||
f.Equal(2, len(allocs))
|
||||
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient,
|
||||
"count-api-sidecar-proxy", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient, consulNamespace, "count-api-sidecar-proxy", capi.HealthPassing)
|
||||
nodeID := allocs[0].NodeID
|
||||
|
||||
restartID, err := e2eutil.AgentRestart(client, nodeID)
|
||||
if restartID != "" {
|
||||
tc.jobIds = append(tc.jobIds, restartID)
|
||||
tc.jobIDs = append(tc.jobIDs, restartID)
|
||||
}
|
||||
if err != nil {
|
||||
t.Skip("node cannot be restarted", err)
|
||||
}
|
||||
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient,
|
||||
"count-api-sidecar-proxy", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient, consulNamespace, "count-api-sidecar-proxy", capi.HealthPassing)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
api "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
|
@ -22,6 +22,11 @@ const (
|
|||
consulJobRegisterOnUpdatePart2 = "consul/input/services_present.nomad"
|
||||
)
|
||||
|
||||
const (
|
||||
// unless otherwise set, tests should just use the default consul namespace
|
||||
consulNamespace = "default"
|
||||
)
|
||||
|
||||
type ConsulE2ETest struct {
|
||||
framework.TC
|
||||
jobIds []string
|
||||
|
@ -37,6 +42,7 @@ func init() {
|
|||
new(ScriptChecksE2ETest),
|
||||
new(CheckRestartE2ETest),
|
||||
new(OnUpdateChecksTest),
|
||||
new(ConsulNamespacesE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -79,7 +85,7 @@ func (tc *ConsulE2ETest) TestConsulRegistration(f *framework.F) {
|
|||
}
|
||||
|
||||
// Assert services get registered
|
||||
e2eutil.RequireConsulRegistered(r, tc.Consul(), "consul-example", 3)
|
||||
e2eutil.RequireConsulRegistered(r, tc.Consul(), consulNamespace, "consul-example", 3)
|
||||
services, _, err := tc.Consul().Catalog().Service("consul-example", "", nil)
|
||||
require.NoError(t, err)
|
||||
for _, s := range services {
|
||||
|
@ -91,7 +97,7 @@ func (tc *ConsulE2ETest) TestConsulRegistration(f *framework.F) {
|
|||
e2eutil.WaitForJobStopped(t, nomadClient, jobId)
|
||||
|
||||
// Verify that services were de-registered in Consul
|
||||
e2eutil.RequireConsulDeregistered(r, tc.Consul(), "consul-example")
|
||||
e2eutil.RequireConsulDeregistered(r, tc.Consul(), consulNamespace, "consul-example")
|
||||
}
|
||||
|
||||
func (tc *ConsulE2ETest) TestConsulRegisterOnUpdate(f *framework.F) {
|
||||
|
@ -121,7 +127,7 @@ func (tc *ConsulE2ETest) TestConsulRegisterOnUpdate(f *framework.F) {
|
|||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
// Assert service is now registered.
|
||||
e2eutil.RequireConsulRegistered(r, tc.Consul(), "nc-service", 1)
|
||||
e2eutil.RequireConsulRegistered(r, tc.Consul(), consulNamespace, "nc-service", 1)
|
||||
}
|
||||
|
||||
// TestCanaryInplaceUpgrades verifies setting and unsetting canary tags
|
||||
|
|
143
e2e/consul/input/namespaces/connect_ingress.nomad
Normal file
143
e2e/consul/input/namespaces/connect_ingress.nomad
Normal file
|
@ -0,0 +1,143 @@
|
|||
job "connect_ingress" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "ingress-group" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "inbound" {
|
||||
static = 8080
|
||||
to = 8080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "my-ingress-service"
|
||||
port = "8080"
|
||||
|
||||
connect {
|
||||
gateway {
|
||||
ingress {
|
||||
listener {
|
||||
port = 8080
|
||||
protocol = "tcp"
|
||||
service {
|
||||
name = "uuid-api"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "ingress-group-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "inbound" {
|
||||
static = 8081
|
||||
to = 8080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "my-ingress-service-z"
|
||||
port = "8081"
|
||||
|
||||
connect {
|
||||
gateway {
|
||||
ingress {
|
||||
listener {
|
||||
port = 8080
|
||||
protocol = "tcp"
|
||||
service {
|
||||
name = "uuid-api-z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "generator" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "api" {}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "uuid-api"
|
||||
port = "${NOMAD_PORT_api}"
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
}
|
||||
|
||||
task "generate" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/uuid-api:v3"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
BIND = "0.0.0.0"
|
||||
PORT = "${NOMAD_PORT_api}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "generator-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "api" {}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "uuid-api-z"
|
||||
port = "${NOMAD_PORT_api}"
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
}
|
||||
|
||||
task "generate-z" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/uuid-api:v3"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
BIND = "0.0.0.0"
|
||||
PORT = "${NOMAD_PORT_api}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
167
e2e/consul/input/namespaces/connect_sidecars.nomad
Normal file
167
e2e/consul/input/namespaces/connect_sidecars.nomad
Normal file
|
@ -0,0 +1,167 @@
|
|||
job "connect_sidecars" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "api" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-api"
|
||||
port = "9001"
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
|
||||
check {
|
||||
expose = true
|
||||
name = "api-health"
|
||||
type = "http"
|
||||
path = "/health"
|
||||
interval = "5s"
|
||||
timeout = "3s"
|
||||
}
|
||||
}
|
||||
|
||||
task "web" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-api:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "api-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-api-z"
|
||||
port = "9001"
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
|
||||
check {
|
||||
expose = true
|
||||
name = "api-health"
|
||||
type = "http"
|
||||
path = "/health"
|
||||
interval = "5s"
|
||||
timeout = "3s"
|
||||
}
|
||||
}
|
||||
|
||||
task "web-z" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-api:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "dashboard" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "http" {
|
||||
static = 9002
|
||||
to = 9002
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-dashboard"
|
||||
port = "9002"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "count-api"
|
||||
local_bind_port = 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "dashboard" {
|
||||
driver = "docker"
|
||||
|
||||
env {
|
||||
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-dashboard:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "dashboard-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "http" {
|
||||
static = 9003
|
||||
to = 9002
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-dashboard-z"
|
||||
port = "9003"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "count-api-z"
|
||||
local_bind_port = 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "dashboard" {
|
||||
driver = "docker"
|
||||
|
||||
env {
|
||||
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api-z}"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-dashboard:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
205
e2e/consul/input/namespaces/connect_terminating.nomad
Normal file
205
e2e/consul/input/namespaces/connect_terminating.nomad
Normal file
|
@ -0,0 +1,205 @@
|
|||
job "connect_terminating" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "api" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "port" {
|
||||
static = "9001"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-api"
|
||||
port = "port"
|
||||
}
|
||||
|
||||
task "api" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-api:v3"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
PORT = "9001"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "api-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "port" {
|
||||
static = "9011"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-api-z"
|
||||
port = "port"
|
||||
}
|
||||
|
||||
task "api" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-api:v3"
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
env {
|
||||
PORT = "9011"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "gateway" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "api-gateway"
|
||||
|
||||
connect {
|
||||
gateway {
|
||||
terminating {
|
||||
service {
|
||||
name = "count-api"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "gateway-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "api-gateway-z"
|
||||
|
||||
connect {
|
||||
gateway {
|
||||
terminating {
|
||||
service {
|
||||
name = "count-api-z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "dashboard" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "http" {
|
||||
static = 9002
|
||||
to = 9002
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-dashboard"
|
||||
port = "9002"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "count-api"
|
||||
local_bind_port = 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "dashboard" {
|
||||
driver = "docker"
|
||||
|
||||
env {
|
||||
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-dashboard:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "dashboard-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "http" {
|
||||
static = 9012
|
||||
to = 9002
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "count-dashboard-z"
|
||||
port = "9012"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "count-api-z"
|
||||
local_bind_port = 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "dashboard" {
|
||||
driver = "docker"
|
||||
|
||||
env {
|
||||
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api-z}"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "hashicorpnomad/counter-dashboard:v3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
167
e2e/consul/input/namespaces/script_checks_group.nomad
Normal file
167
e2e/consul/input/namespaces/script_checks_group.nomad
Normal file
|
@ -0,0 +1,167 @@
|
|||
job "script_checks_group" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "group-a" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-1a"
|
||||
port = "9001"
|
||||
|
||||
check {
|
||||
name = "alive-1"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-1"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-2a"
|
||||
port = "9002"
|
||||
|
||||
check {
|
||||
name = "alive-2a"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-2a"]
|
||||
}
|
||||
|
||||
# the file expected by this check will not exist when started,
|
||||
# so the check will error-out and be in a warning state until
|
||||
# it's been created
|
||||
check {
|
||||
name = "alive-2ab"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "cat"
|
||||
args = ["/tmp/${NOMAD_ALLOC_ID}-alive-2ab"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-3a"
|
||||
port = "9003"
|
||||
|
||||
# this check should always time out and so the service
|
||||
# should not be marked healthy
|
||||
check {
|
||||
name = "always-dead"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "1s"
|
||||
command = "sleep"
|
||||
args = ["10"]
|
||||
}
|
||||
}
|
||||
|
||||
task "test" {
|
||||
driver = "raw_exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "sleep 15000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-z" {
|
||||
|
||||
# no consul namespace set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-1z"
|
||||
port = "9001"
|
||||
|
||||
check {
|
||||
name = "alive-1z"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-1"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-2z"
|
||||
port = "9002"
|
||||
|
||||
check {
|
||||
name = "alive-2z"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-2z"]
|
||||
}
|
||||
|
||||
# the file expected by this check will not exist when started,
|
||||
# so the check will error-out and be in a warning state until
|
||||
# it's been created
|
||||
check {
|
||||
name = "alive-2zb"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "cat"
|
||||
args = ["/tmp/${NOMAD_ALLOC_ID}-alive-2zb"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-3z"
|
||||
port = "9003"
|
||||
|
||||
# this check should always time out and so the service
|
||||
# should not be marked healthy
|
||||
check {
|
||||
name = "always-dead"
|
||||
type = "script"
|
||||
task = "test"
|
||||
interval = "2s"
|
||||
timeout = "1s"
|
||||
command = "sleep"
|
||||
args = ["10"]
|
||||
}
|
||||
}
|
||||
|
||||
task "test" {
|
||||
driver = "raw_exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "sleep 15000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
145
e2e/consul/input/namespaces/script_checks_task.nomad
Normal file
145
e2e/consul/input/namespaces/script_checks_task.nomad
Normal file
|
@ -0,0 +1,145 @@
|
|||
job "script_checks_task" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "group-a" {
|
||||
|
||||
consul {
|
||||
namespace = "apple"
|
||||
}
|
||||
|
||||
task "test" {
|
||||
service {
|
||||
name = "service-1a"
|
||||
|
||||
check {
|
||||
name = "alive-1"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-1"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-2a"
|
||||
|
||||
check {
|
||||
name = "alive-2a"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-2a"]
|
||||
}
|
||||
|
||||
# the file expected by this check will not exist when started,
|
||||
# so the check will error-out and be in a warning state until
|
||||
# it's been created
|
||||
check {
|
||||
name = "alive-2ab"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "cat"
|
||||
args = ["${NOMAD_TASK_DIR}/alive-2ab"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-3a"
|
||||
|
||||
# this check should always time out and so the service
|
||||
# should not be marked healthy
|
||||
check {
|
||||
name = "always-dead"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "1s"
|
||||
command = "sleep"
|
||||
args = ["10"]
|
||||
}
|
||||
}
|
||||
|
||||
driver = "raw_exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "sleep 15000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
task "test" {
|
||||
service {
|
||||
name = "service-1z"
|
||||
|
||||
check {
|
||||
name = "alive-1"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-1z"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-2z"
|
||||
|
||||
check {
|
||||
name = "alive-2z"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "echo"
|
||||
args = ["alive-2z"]
|
||||
}
|
||||
|
||||
# the file expected by this check will not exist when started,
|
||||
# so the check will error-out and be in a warning state until
|
||||
# it's been created
|
||||
check {
|
||||
name = "alive-2zb"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
command = "cat"
|
||||
args = ["${NOMAD_TASK_DIR}/alive-2zb"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "service-3z"
|
||||
|
||||
# this check should always time out and so the service
|
||||
# should not be marked healthy
|
||||
check {
|
||||
name = "always-dead"
|
||||
type = "script"
|
||||
interval = "2s"
|
||||
timeout = "1s"
|
||||
command = "sleep"
|
||||
args = ["10"]
|
||||
}
|
||||
}
|
||||
|
||||
driver = "raw_exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "sleep 15000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
151
e2e/consul/input/namespaces/services_group.nomad
Normal file
151
e2e/consul/input/namespaces/services_group.nomad
Normal file
|
@ -0,0 +1,151 @@
|
|||
job "services_group" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "group-b" {
|
||||
|
||||
consul {
|
||||
namespace = "banana"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-b" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "b1"
|
||||
port = "port-b"
|
||||
|
||||
check {
|
||||
name = "ping-b1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "b2"
|
||||
port = "port-b"
|
||||
|
||||
check {
|
||||
name = "ping-b2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "task-b" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-c" {
|
||||
|
||||
consul {
|
||||
namespace = "cherry"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-c" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "c1"
|
||||
port = "port-c"
|
||||
|
||||
check {
|
||||
name = "ping-c1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "c2"
|
||||
port = "port-c"
|
||||
|
||||
check {
|
||||
name = "ping-c2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "task-c" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-z" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "z1"
|
||||
port = "port-z"
|
||||
|
||||
check {
|
||||
name = "ping-z1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "z2"
|
||||
port = "port-z"
|
||||
|
||||
check {
|
||||
name = "ping-z2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "task-z" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
151
e2e/consul/input/namespaces/services_task.nomad
Normal file
151
e2e/consul/input/namespaces/services_task.nomad
Normal file
|
@ -0,0 +1,151 @@
|
|||
job "services_task" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "group-b" {
|
||||
|
||||
consul {
|
||||
namespace = "banana"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-b" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
task "task-b" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
|
||||
service {
|
||||
name = "b1"
|
||||
port = "port-b"
|
||||
|
||||
check {
|
||||
name = "ping-b1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "b2"
|
||||
port = "port-b"
|
||||
|
||||
check {
|
||||
name = "ping-b2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-c" {
|
||||
|
||||
consul {
|
||||
namespace = "cherry"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-c" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
task "task-c" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
|
||||
service {
|
||||
name = "c1"
|
||||
port = "port-c"
|
||||
|
||||
check {
|
||||
name = "ping-c1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "c2"
|
||||
port = "port-c"
|
||||
|
||||
check {
|
||||
name = "ping-c2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-z" {
|
||||
|
||||
# consul namespace not set
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "port-z" {
|
||||
to = 1234
|
||||
}
|
||||
}
|
||||
|
||||
task "task-z" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "nc"
|
||||
args = ["-ll", "-p", "1234", "-e", "/bin/cat"]
|
||||
}
|
||||
|
||||
service {
|
||||
name = "z1"
|
||||
port = "port-z"
|
||||
|
||||
check {
|
||||
name = "ping-z1"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "z2"
|
||||
port = "port-z"
|
||||
|
||||
check {
|
||||
name = "ping-z2"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
51
e2e/consul/input/namespaces/template_kv.nomad
Normal file
51
e2e/consul/input/namespaces/template_kv.nomad
Normal file
|
@ -0,0 +1,51 @@
|
|||
job "template_kv" {
|
||||
datacenters = ["dc1"]
|
||||
type = "batch"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "group-b" {
|
||||
|
||||
consul {
|
||||
namespace = "banana"
|
||||
}
|
||||
|
||||
task "task-b" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "cat"
|
||||
args = ["local/a.txt"]
|
||||
}
|
||||
|
||||
template {
|
||||
data = "value: {{ key \"ns-kv-example\" }}"
|
||||
destination = "local/a.txt"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "group-z" {
|
||||
|
||||
# no consul namespace set
|
||||
|
||||
task "task-z" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:1"
|
||||
command = "cat"
|
||||
args = ["local/a.txt"]
|
||||
}
|
||||
|
||||
template {
|
||||
data = "value: {{ key \"ns-kv-example\" }}"
|
||||
destination = "local/a.txt"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
67
e2e/consul/namespaces.go
Normal file
67
e2e/consul/namespaces.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Job files used to test Consul Namespaces. Each job should run on Nomad OSS
|
||||
// and Nomad ENT with expectations set accordingly.
|
||||
//
|
||||
// All tests require Consul Enterprise.
|
||||
const (
|
||||
cnsJobGroupServices = "consul/input/namespaces/services_group.nomad"
|
||||
cnsJobTaskServices = "consul/input/namespaces/services_task.nomad"
|
||||
cnsJobTemplateKV = "consul/input/namespaces/template_kv.nomad"
|
||||
cnsJobConnectSidecars = "consul/input/namespaces/connect_sidecars.nomad"
|
||||
cnsJobConnectIngress = "consul/input/namespaces/connect_ingress.nomad"
|
||||
cnsJobConnectTerminating = "consul/input/namespaces/connect_terminating.nomad"
|
||||
cnsJobScriptChecksTask = "consul/input/namespaces/script_checks_task.nomad"
|
||||
cnsJobScriptChecksGroup = "consul/input/namespaces/script_checks_group.nomad"
|
||||
)
|
||||
|
||||
var (
|
||||
// consulNamespaces represents the custom consul namespaces we create and
|
||||
// can make use of in tests, but usefully so only in Nomad Enterprise
|
||||
consulNamespaces = []string{"apple", "banana", "cherry"}
|
||||
|
||||
// allConsulNamespaces represents all namespaces we expect in consul after
|
||||
// creating consulNamespaces, which then includes "default", which is the
|
||||
// only namespace accessed by Nomad OSS (outside of agent configuration)
|
||||
allConsulNamespaces = append(consulNamespaces, "default")
|
||||
)
|
||||
|
||||
type ConsulNamespacesE2ETest struct {
|
||||
framework.TC
|
||||
jobIDs []string
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||
|
||||
// create a set of consul namespaces in which to register services
|
||||
e2eutil.CreateConsulNamespaces(f.T(), tc.Consul(), consulNamespaces)
|
||||
|
||||
// insert a key of the same name into KV for each namespace, where the value
|
||||
// contains the namespace name making it easy to determine which namespace
|
||||
// consul template actually accessed
|
||||
for _, namespace := range allConsulNamespaces {
|
||||
value := fmt.Sprintf("ns_%s", namespace)
|
||||
e2eutil.PutConsulKey(f.T(), tc.Consul(), namespace, "ns-kv-example", value)
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) AfterAll(f *framework.F) {
|
||||
e2eutil.DeleteConsulNamespaces(f.T(), tc.Consul(), consulNamespaces)
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestNamespacesExist(f *framework.F) {
|
||||
// make sure our namespaces exist + default
|
||||
namespaces := e2eutil.ListConsulNamespaces(f.T(), tc.Consul())
|
||||
require.True(f.T(), helper.CompareSliceSetString(namespaces, append(consulNamespaces, "default")))
|
||||
}
|
386
e2e/consul/namespaces_oss.go
Normal file
386
e2e/consul/namespaces_oss.go
Normal file
|
@ -0,0 +1,386 @@
|
|||
// +build !ent
|
||||
|
||||
// Nomad OSS ignores Consul Namespace configuration in jobs, these e2e tests
|
||||
// verify everything still works and is registered into the "default" namespace,
|
||||
// since e2e always uses Consul Enterprise. With Consul OSS, there are no namespaces.
|
||||
// and these tests will not work.
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulRegisterGroupServices(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-group-services"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobGroupServices, jobID, "")
|
||||
require.Len(f.T(), allocations, 3)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
// Verify our services were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "b1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "b2", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "c1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "c2", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "z1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "z2", 1)
|
||||
|
||||
// Verify our services are all healthy
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "b1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "b2", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "c1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "c2", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "z1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "z2", "passing")
|
||||
|
||||
// Verify our services were NOT registered into specified consul namespaces
|
||||
e2eutil.RequireConsulRegistered(r, c, "banana", "b1", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "banana", "b2", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "c1", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "c2", 0)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
|
||||
// Verify that services were de-registered in Consul
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "b1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "b2")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "c1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "c2")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "z1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "z2")
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulRegisterTaskServices(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-task-services"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobTaskServices, jobID, "")
|
||||
require.Len(f.T(), allocations, 3)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
// Verify our services were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "b1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "b2", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "c1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "c2", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "z1", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "z2", 1)
|
||||
|
||||
// Verify our services are all healthy
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "b1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "b2", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "c1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "c2", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "z1", "passing")
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "z2", "passing")
|
||||
|
||||
// Verify our services were NOT registered into specified consul namespaces
|
||||
e2eutil.RequireConsulRegistered(r, c, "banana", "b1", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "banana", "b2", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "c1", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "c2", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "z1", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "cherry", "z2", 0)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
|
||||
// Verify that services were de-registered from Consul
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "b1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "b2")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "c1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "b2")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "z1")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "z2")
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulTemplateKV(f *framework.F) {
|
||||
t := f.T()
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-template-kv"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs to complete
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, cnsJobTemplateKV, jobID, "")
|
||||
require.Len(t, allocations, 2)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsStopped(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
// Sort allocs by name
|
||||
sort.Sort(e2eutil.AllocsByName(allocations))
|
||||
|
||||
// Check template read from default namespace even if namespace set
|
||||
textB, err := e2eutil.AllocTaskLogs(allocations[0].ID, "task-b", e2eutil.LogsStdOut)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value: ns_default", textB)
|
||||
|
||||
// Check template read from default namespace if no namespace set
|
||||
textZ, err := e2eutil.AllocTaskLogs(allocations[1].ID, "task-z", e2eutil.LogsStdOut)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value: ns_default", textZ)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(t, nomadClient, jobID)
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulConnectSidecars(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-connect-sidecars"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobConnectSidecars, jobID, "")
|
||||
require.Len(f.T(), allocations, 4)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
// Verify services with cns set were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api-sidecar-proxy", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard-sidecar-proxy", 1)
|
||||
|
||||
// Verify services without cns set were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api-z", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api-z-sidecar-proxy", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard-z", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard-z-sidecar-proxy", 1)
|
||||
|
||||
// Verify our services were NOT registered into specified consul namespaces
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-api", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-api-sidecar-proxy", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-dashboard", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-dashb0ard-sidecar-proxy", 0)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
|
||||
// Verify that services were de-registered from Consul
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-api")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-api-sidecar-proxy")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-dashboard")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-dashboard-sidecar-proxy")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-api-z")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-api-z-sidecar-proxy")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-dashboard-z")
|
||||
e2eutil.RequireConsulDeregistered(r, c, namespace, "count-dashboard-z-sidecar-proxy")
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulConnectIngressGateway(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-connect-ingress"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobConnectIngress, jobID, "")
|
||||
require.Len(f.T(), allocations, 4) // 2 x (1 service + 1 gateway)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
// Verify services with cns set were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "my-ingress-service", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "uuid-api", 1)
|
||||
|
||||
// Verify services without cns set were registered into "default"
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "my-ingress-service-z", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "uuid-api-z", 1)
|
||||
|
||||
// Verify services with cns set were NOT registered into specified consul namespaces
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "my-ingress-service", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "uuid-api", 0)
|
||||
|
||||
// Read the config entry of gateway with cns set, checking it exists in "default' namespace
|
||||
ce := e2eutil.ReadConsulConfigEntry(f.T(), c, namespace, "ingress-gateway", "my-ingress-service")
|
||||
require.Equal(f.T(), namespace, ce.GetNamespace())
|
||||
|
||||
// Read the config entry of gateway without cns set, checking it exists in "default' namespace
|
||||
ceZ := e2eutil.ReadConsulConfigEntry(f.T(), c, namespace, "ingress-gateway", "my-ingress-service-z")
|
||||
require.Equal(f.T(), namespace, ceZ.GetNamespace())
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
|
||||
// Remove the config entries
|
||||
e2eutil.DeleteConsulConfigEntry(f.T(), c, namespace, "ingress-gateway", "my-ingress-service")
|
||||
e2eutil.DeleteConsulConfigEntry(f.T(), c, namespace, "ingress-gateway", "my-ingress-service-z")
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulConnectTerminatingGateway(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-connect-terminating"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobConnectTerminating, jobID, "")
|
||||
require.Len(f.T(), allocations, 6) // 2 x (2 services + 1 gateway)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
// Verify services with cns set were registered into "default" Consul namespace
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "api-gateway", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard", 1)
|
||||
|
||||
// Verify services without cns set were registered into "default" Consul namespace
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "api-gateway-z", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-api-z", 1)
|
||||
e2eutil.RequireConsulRegistered(r, c, namespace, "count-dashboard-z", 1)
|
||||
|
||||
// Verify services with cns set were NOT registered into specified consul namespaces
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "api-gateway", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-api", 0)
|
||||
e2eutil.RequireConsulRegistered(r, c, "apple", "count-dashboard", 0)
|
||||
|
||||
// Read the config entry of gateway with cns set, checking it exists in "default' namespace
|
||||
ce := e2eutil.ReadConsulConfigEntry(f.T(), c, namespace, "terminating-gateway", "api-gateway")
|
||||
require.Equal(f.T(), namespace, ce.GetNamespace())
|
||||
|
||||
// Read the config entry of gateway without cns set, checking it exists in "default' namespace
|
||||
ceZ := e2eutil.ReadConsulConfigEntry(f.T(), c, namespace, "terminating-gateway", "api-gateway-z")
|
||||
require.Equal(f.T(), namespace, ceZ.GetNamespace())
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
|
||||
// Remove the config entries
|
||||
e2eutil.DeleteConsulConfigEntry(f.T(), c, namespace, "terminating-gateway", "api-gateway")
|
||||
e2eutil.DeleteConsulConfigEntry(f.T(), c, namespace, "terminating-gateway", "api-gateway-z")
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulScriptChecksTask(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-script-checks-task"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobScriptChecksTask, jobID, "")
|
||||
require.Len(f.T(), allocations, 2)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
sort.Sort(e2eutil.AllocsByName(allocations))
|
||||
allocsWithSetNamespace := allocations[0:1]
|
||||
allocsWithNoNamespace := allocations[1:2]
|
||||
|
||||
// Verify checks were registered into "default" Consul namespace
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-1a", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2a", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-3a", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes for the service
|
||||
// with specified Consul namespace
|
||||
//
|
||||
// (ensures UpdateTTL is respecting namespace)
|
||||
_, _, err := exec(nomadClient, allocsWithSetNamespace,
|
||||
[]string{"/bin/sh", "-c", "touch ${NOMAD_TASK_DIR}/alive-2ab"})
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2a", capi.HealthPassing)
|
||||
|
||||
// Verify checks were registered into "default" Consul namespace when no
|
||||
// namespace was specified.
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-1z", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2z", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-3z", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes for the service
|
||||
// with specified Consul namespace
|
||||
//
|
||||
// (ensures UpdateTTL is respecting namespace)
|
||||
_, _, errZ := exec(nomadClient, allocsWithNoNamespace,
|
||||
[]string{"/bin/sh", "-c", "touch ${NOMAD_TASK_DIR}/alive-2zb"})
|
||||
r.NoError(errZ)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2z", capi.HealthPassing)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulScriptChecksGroup(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-script-checks-group"
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
// Run job and wait for allocs
|
||||
allocations := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, cnsJobScriptChecksGroup, jobID, "")
|
||||
require.Len(f.T(), allocations, 2)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocations)
|
||||
e2eutil.WaitForAllocsRunning(f.T(), tc.Nomad(), allocIDs)
|
||||
|
||||
r := f.Assertions
|
||||
c := tc.Consul()
|
||||
namespace := consulNamespace
|
||||
|
||||
sort.Sort(e2eutil.AllocsByName(allocations))
|
||||
allocsWithSetNamespace := allocations[0:1]
|
||||
allocsWithNoNamespace := allocations[1:2]
|
||||
|
||||
// Verify checks were registered into "default" Consul namespace
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-1a", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2a", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-3a", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes for the service
|
||||
// with specified Consul namespace
|
||||
//
|
||||
// (ensures UpdateTTL is respecting namespace)
|
||||
_, _, err := exec(nomadClient, allocsWithSetNamespace,
|
||||
[]string{"/bin/sh", "-c", "touch /tmp/${NOMAD_ALLOC_ID}-alive-2ab"})
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2a", capi.HealthPassing)
|
||||
|
||||
// Verify checks were registered into "default" Consul namespace when no
|
||||
// namespace was specified.
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-1z", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2z", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-3z", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes for the service
|
||||
// with specified Consul namespace
|
||||
//
|
||||
// (ensures UpdateTTL is respecting namespace)
|
||||
_, _, errZ := exec(nomadClient, allocsWithNoNamespace,
|
||||
[]string{"/bin/sh", "-c", "touch /tmp/${NOMAD_ALLOC_ID}-alive-2zb"})
|
||||
r.NoError(errZ)
|
||||
e2eutil.RequireConsulStatus(r, c, namespace, "service-2z", capi.HealthPassing)
|
||||
|
||||
// Stop the job
|
||||
e2eutil.WaitForJobStopped(f.T(), nomadClient, jobID)
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
napi "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
|
@ -44,23 +44,23 @@ func (tc *ScriptChecksE2ETest) TestGroupScriptCheck(f *framework.F) {
|
|||
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_group.nomad", jobId, "")
|
||||
r.Equal(1, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-3", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes
|
||||
_, _, err := exec(nomadClient, allocs,
|
||||
[]string{"/bin/sh", "-c", "touch /tmp/${NOMAD_ALLOC_ID}-alive-2b"})
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-2", capi.HealthPassing)
|
||||
|
||||
// Job update: verify checks are re-registered in Consul
|
||||
allocs = e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_group_update.nomad", jobId, "")
|
||||
r.Equal(1, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-3", capi.HealthCritical)
|
||||
|
||||
// Verify we don't have any linger script checks running on the client
|
||||
out, _, err := exec(nomadClient, allocs, []string{"pgrep", "sleep"})
|
||||
|
@ -71,26 +71,26 @@ func (tc *ScriptChecksE2ETest) TestGroupScriptCheck(f *framework.F) {
|
|||
// Clean job stop: verify that checks were deregistered in Consul
|
||||
_, _, err = nomadClient.Jobs().Deregister(jobId, false, nil) // nomad job stop
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "group-service-1")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "group-service-2")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "group-service-3")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "group-service-1")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "group-service-2")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "group-service-3")
|
||||
|
||||
// Restore for next test
|
||||
allocs = e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_group.nomad", jobId, "")
|
||||
r.Equal(2, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-3", capi.HealthCritical)
|
||||
|
||||
// Crash a task: verify that checks become healthy again
|
||||
_, _, err = exec(nomadClient, allocs, []string{"pkill", "sleep"})
|
||||
if err != nil && err.Error() != "plugin is shut down" {
|
||||
r.FailNow("unexpected error: %v", err)
|
||||
}
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "group-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "group-service-3", capi.HealthCritical)
|
||||
|
||||
// TODO(tgross) ...
|
||||
// Restart client: verify that checks are re-registered
|
||||
|
@ -112,23 +112,23 @@ func (tc *ScriptChecksE2ETest) TestTaskScriptCheck(f *framework.F) {
|
|||
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_task.nomad", jobId, "")
|
||||
r.Equal(1, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-3", capi.HealthCritical)
|
||||
|
||||
// Check in warning state becomes healthy after check passes
|
||||
_, _, err := exec(nomadClient, allocs,
|
||||
[]string{"/bin/sh", "-c", "touch ${NOMAD_TASK_DIR}/alive-2b"})
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-2", capi.HealthPassing)
|
||||
|
||||
// Job update: verify checks are re-registered in Consul
|
||||
allocs = e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_task_update.nomad", jobId, "")
|
||||
r.Equal(1, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-2", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-3", capi.HealthCritical)
|
||||
|
||||
// Verify we don't have any linger script checks running on the client
|
||||
out, _, err := exec(nomadClient, allocs, []string{"pgrep", "sleep"})
|
||||
|
@ -139,26 +139,26 @@ func (tc *ScriptChecksE2ETest) TestTaskScriptCheck(f *framework.F) {
|
|||
// Clean job stop: verify that checks were deregistered in Consul
|
||||
_, _, err = nomadClient.Jobs().Deregister(jobId, false, nil) // nomad job stop
|
||||
r.NoError(err)
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "task-service-1")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "task-service-2")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, "task-service-3")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "task-service-1")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "task-service-2")
|
||||
e2eutil.RequireConsulDeregistered(r, consulClient, consulNamespace, "task-service-3")
|
||||
|
||||
// Restore for next test
|
||||
allocs = e2eutil.RegisterAndWaitForAllocs(f.T(),
|
||||
nomadClient, "consul/input/checks_task.nomad", jobId, "")
|
||||
r.Equal(2, len(allocs))
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-3", capi.HealthCritical)
|
||||
|
||||
// Crash a task: verify that checks become healthy again
|
||||
_, _, err = exec(nomadClient, allocs, []string{"pkill", "sleep"})
|
||||
if err != nil && err.Error() != "plugin is shut down" {
|
||||
r.FailNow("unexpected error: %v", err)
|
||||
}
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, "task-service-3", capi.HealthCritical)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-1", capi.HealthPassing)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-2", capi.HealthWarning)
|
||||
e2eutil.RequireConsulStatus(r, consulClient, consulNamespace, "task-service-3", capi.HealthCritical)
|
||||
|
||||
// TODO(tgross) ...
|
||||
// Restart client: verify that checks are re-registered
|
||||
|
@ -178,17 +178,17 @@ func (tc *ScriptChecksE2ETest) AfterEach(f *framework.F) {
|
|||
r.NoError(nomadClient.System().GarbageCollect())
|
||||
}
|
||||
|
||||
func exec(client *api.Client, allocs []*api.AllocationListStub, command []string) (bytes.Buffer, bytes.Buffer, error) {
|
||||
func exec(client *napi.Client, allocs []*napi.AllocationListStub, command []string) (bytes.Buffer, bytes.Buffer, error) {
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelFn()
|
||||
|
||||
// we're getting a list of from the registration call here but
|
||||
// one of them might be stopped or stopping, which will return
|
||||
// an error if we try to exec into it.
|
||||
var alloc *api.Allocation
|
||||
var alloc *napi.Allocation
|
||||
for _, stub := range allocs {
|
||||
if stub.DesiredStatus == "run" {
|
||||
alloc = &api.Allocation{
|
||||
alloc = &napi.Allocation{
|
||||
ID: stub.ID,
|
||||
Namespace: stub.Namespace,
|
||||
NodeID: stub.NodeID,
|
||||
|
@ -203,6 +203,6 @@ func exec(client *api.Client, allocs []*api.AllocationListStub, command []string
|
|||
alloc, "test", false,
|
||||
command,
|
||||
os.Stdin, &stdout, &stderr,
|
||||
make(chan api.TerminalSize), nil)
|
||||
make(chan napi.TerminalSize), nil)
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
api "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
e2e "github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
|
|
|
@ -7,11 +7,26 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
api "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
)
|
||||
|
||||
// AllocsByName sorts allocs by Name
|
||||
type AllocsByName []*api.AllocationListStub
|
||||
|
||||
func (a AllocsByName) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a AllocsByName) Less(i, j int) bool {
|
||||
return a[i].Name < a[j].Name
|
||||
}
|
||||
|
||||
func (a AllocsByName) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
// WaitForAllocStatusExpected polls 'nomad job status' and exactly compares
|
||||
// the status of all allocations (including any previous versions) against the
|
||||
// expected list.
|
||||
|
|
|
@ -2,30 +2,32 @@ package e2eutil
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// RequireConsulStatus asserts the aggregate health of the service converges to the expected status.
|
||||
func RequireConsulStatus(require *require.Assertions, client *capi.Client, serviceName, expectedStatus string) {
|
||||
func RequireConsulStatus(require *require.Assertions, client *capi.Client, namespace, service, expectedStatus string) {
|
||||
testutil.WaitForResultRetries(30, func() (bool, error) {
|
||||
defer time.Sleep(time.Second) // needs a long time for killing tasks/clients
|
||||
|
||||
_, status := serviceStatus(require, client, serviceName)
|
||||
return status == expectedStatus, fmt.Errorf("service %v: expected %v but found %v", serviceName, expectedStatus, status)
|
||||
_, status := serviceStatus(require, client, namespace, service)
|
||||
return status == expectedStatus, fmt.Errorf("service %s/%s: expected %s but found %s", namespace, service, expectedStatus, status)
|
||||
}, func(err error) {
|
||||
require.NoError(err, "timedout waiting for consul status")
|
||||
})
|
||||
}
|
||||
|
||||
// serviceStatus gets the aggregate health of the service and returns the []ServiceEntry for further checking.
|
||||
func serviceStatus(require *require.Assertions, client *capi.Client, serviceName string) ([]*capi.ServiceEntry, string) {
|
||||
services, _, err := client.Health().Service(serviceName, "", false, nil)
|
||||
require.NoError(err, "expected no error for %q, got %v", serviceName, err)
|
||||
func serviceStatus(require *require.Assertions, client *capi.Client, namespace, service string) ([]*capi.ServiceEntry, string) {
|
||||
services, _, err := client.Health().Service(service, "", false, &capi.QueryOptions{Namespace: namespace})
|
||||
require.NoError(err, "expected no error for %s/%s, got %s", namespace, service, err)
|
||||
if len(services) > 0 {
|
||||
return services, services[0].Checks.AggregatedStatus()
|
||||
}
|
||||
|
@ -33,11 +35,11 @@ func serviceStatus(require *require.Assertions, client *capi.Client, serviceName
|
|||
}
|
||||
|
||||
// RequireConsulDeregistered asserts that the service eventually is de-registered from Consul.
|
||||
func RequireConsulDeregistered(require *require.Assertions, client *capi.Client, service string) {
|
||||
func RequireConsulDeregistered(require *require.Assertions, client *capi.Client, namespace, service string) {
|
||||
testutil.WaitForResultRetries(5, func() (bool, error) {
|
||||
defer time.Sleep(time.Second)
|
||||
|
||||
services, _, err := client.Health().Service(service, "", false, nil)
|
||||
services, _, err := client.Health().Service(service, "", false, &capi.QueryOptions{Namespace: namespace})
|
||||
require.NoError(err)
|
||||
if len(services) != 0 {
|
||||
return false, fmt.Errorf("service %v: expected empty services but found %v %v", service, len(services), pretty.Sprint(services))
|
||||
|
@ -49,11 +51,11 @@ func RequireConsulDeregistered(require *require.Assertions, client *capi.Client,
|
|||
}
|
||||
|
||||
// RequireConsulRegistered assert that the service is registered in Consul.
|
||||
func RequireConsulRegistered(require *require.Assertions, client *capi.Client, service string, count int) {
|
||||
func RequireConsulRegistered(require *require.Assertions, client *capi.Client, namespace, service string, count int) {
|
||||
testutil.WaitForResultRetries(5, func() (bool, error) {
|
||||
defer time.Sleep(time.Second)
|
||||
|
||||
services, _, err := client.Catalog().Service(service, "", nil)
|
||||
services, _, err := client.Catalog().Service(service, "", &capi.QueryOptions{Namespace: namespace})
|
||||
require.NoError(err)
|
||||
if len(services) != count {
|
||||
return false, fmt.Errorf("service %v: expected %v services but found %v %v", service, count, len(services), pretty.Sprint(services))
|
||||
|
@ -63,3 +65,82 @@ func RequireConsulRegistered(require *require.Assertions, client *capi.Client, s
|
|||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateConsulNamespaces will create each namespace in Consul, with a description
|
||||
// containing the namespace name.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func CreateConsulNamespaces(t *testing.T, client *capi.Client, namespaces []string) {
|
||||
nsClient := client.Namespaces()
|
||||
for _, namespace := range namespaces {
|
||||
_, _, err := nsClient.Create(&capi.Namespace{
|
||||
Name: namespace,
|
||||
Description: fmt.Sprintf("An e2e namespace called %q", namespace),
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteConsulNamespaces will delete each namespace from Consul.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func DeleteConsulNamespaces(t *testing.T, client *capi.Client, namespaces []string) {
|
||||
nsClient := client.Namespaces()
|
||||
for _, namespace := range namespaces {
|
||||
_, err := nsClient.Delete(namespace, nil)
|
||||
assert.NoError(t, err) // be lenient; used in cleanup
|
||||
}
|
||||
}
|
||||
|
||||
// ListConsulNamespaces will list the namespaces in Consul.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func ListConsulNamespaces(t *testing.T, client *capi.Client) []string {
|
||||
nsClient := client.Namespaces()
|
||||
namespaces, _, err := nsClient.List(nil)
|
||||
require.NoError(t, err)
|
||||
result := make([]string, 0, len(namespaces))
|
||||
for _, namespace := range namespaces {
|
||||
result = append(result, namespace.Name)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PutConsulKey sets key:value in the Consul KV store under given namespace.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func PutConsulKey(t *testing.T, client *capi.Client, namespace, key, value string) {
|
||||
kvClient := client.KV()
|
||||
_, err := kvClient.Put(&capi.KVPair{Key: key, Value: []byte(value)}, &capi.WriteOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// DeleteConsulKey deletes the key from the Consul KV store from given namespace.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func DeleteConsulKey(t *testing.T, client *capi.Client, namespace, key string) {
|
||||
kvClient := client.KV()
|
||||
_, err := kvClient.Delete(key, &capi.WriteOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// ReadConsulConfigEntry retrieves the ConfigEntry of the given namespace, kind,
|
||||
// and name.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func ReadConsulConfigEntry(t *testing.T, client *capi.Client, namespace, kind, name string) capi.ConfigEntry {
|
||||
ceClient := client.ConfigEntries()
|
||||
ce, _, err := ceClient.Get(kind, name, &capi.QueryOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
return ce
|
||||
}
|
||||
|
||||
// DeleteConsulConfigEntry deletes the ConfigEntry of the given namespace, kind,
|
||||
// and name.
|
||||
//
|
||||
// Requires Consul Enterprise.
|
||||
func DeleteConsulConfigEntry(t *testing.T, client *capi.Client, namespace, kind, name string) {
|
||||
ceClient := client.ConfigEntries()
|
||||
_, err := ceClient.Delete(kind, name, &capi.WriteOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
api "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/discover"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
|
|
|
@ -3,12 +3,10 @@ package e2eutil
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
api "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/jobspec2"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -209,6 +207,12 @@ func WaitForJobStopped(t *testing.T, nomadClient *api.Client, job string) {
|
|||
}
|
||||
}
|
||||
|
||||
func WaitForAllocsStopped(t *testing.T, nomadClient *api.Client, allocIDs []string) {
|
||||
for _, allocID := range allocIDs {
|
||||
WaitForAllocStopped(t, nomadClient, allocID)
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForAllocStopped(t *testing.T, nomadClient *api.Client, allocID string) {
|
||||
testutil.WaitForResultRetries(retries, func() (bool, error) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
@ -276,50 +280,3 @@ func WaitForDeployment(t *testing.T, nomadClient *api.Client, deployID string, s
|
|||
require.NoError(t, err, "failed to wait on deployment")
|
||||
})
|
||||
}
|
||||
|
||||
// CheckServicesPassing scans for passing agent checks via the given agent API
|
||||
// client.
|
||||
//
|
||||
// Deprecated: not useful in e2e, where more than one node exists and Nomad jobs
|
||||
// are placed non-deterministically. The Consul agentAPI only knows about what
|
||||
// is registered on its node, and cannot be used to query for cluster wide state.
|
||||
func CheckServicesPassing(t *testing.T, agentAPI *consulapi.Agent, allocIDs []string) {
|
||||
failing := map[string]*consulapi.AgentCheck{}
|
||||
for i := 0; i < 60; i++ {
|
||||
checks, err := agentAPI.Checks()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Filter out checks for other services
|
||||
for cid, check := range checks {
|
||||
found := false
|
||||
for _, allocID := range allocIDs {
|
||||
if strings.Contains(check.ServiceID, allocID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
delete(checks, cid)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure checks are all passing
|
||||
failing = map[string]*consulapi.AgentCheck{}
|
||||
for _, check := range checks {
|
||||
if check.Status != "passing" {
|
||||
failing[check.CheckID] = check
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(failing) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
t.Logf("still %d checks not passing", len(failing))
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
require.Len(t, failing, 0, pretty.Sprint(failing))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue