Basic consul registration e2e
This commit is contained in:
parent
2d135e96b7
commit
b4a722e08f
|
@ -0,0 +1,97 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ConsulE2ETest struct {
|
||||
framework.TC
|
||||
jobIds []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "Consul",
|
||||
CanRunLocal: true,
|
||||
Consul: true,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConsulE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *ConsulE2ETest) BeforeAll(f *framework.F) {
|
||||
// Ensure cluster has leader before running tests
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
// Ensure that we have four client nodes in ready state
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||
}
|
||||
|
||||
// This test runs a job that registers in Consul with specific tags
|
||||
func (tc *ConsulE2ETest) TestConsulRegistration(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
uuid := uuid.Generate()
|
||||
jobId := "consul" + uuid[0:8]
|
||||
tc.jobIds = append(tc.jobIds, jobId)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, "consul/input/consul_example.nomad", jobId)
|
||||
consulClient := tc.Consul()
|
||||
require := require.New(f.T())
|
||||
require.Equal(3, len(allocs))
|
||||
|
||||
// Query consul catalog for service
|
||||
catalog := consulClient.Catalog()
|
||||
g := NewGomegaWithT(f.T())
|
||||
|
||||
type serviceNameTagPair struct {
|
||||
serviceName string
|
||||
tags map[string]struct{}
|
||||
}
|
||||
|
||||
expectedTags := map[string]struct{}{}
|
||||
expectedTags["global"] = struct{}{}
|
||||
expectedTags["cache"] = struct{}{}
|
||||
|
||||
g.Eventually(func() []serviceNameTagPair {
|
||||
consulService, _, err := catalog.Service("redis-cache", "", nil)
|
||||
require.Nil(err)
|
||||
var serviceInfo []serviceNameTagPair
|
||||
for _, serviceInstance := range consulService {
|
||||
tags := map[string]struct{}{}
|
||||
for _, tag := range serviceInstance.ServiceTags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
serviceInfo = append(serviceInfo, serviceNameTagPair{serviceInstance.ServiceName, tags})
|
||||
}
|
||||
return serviceInfo
|
||||
}, 5*time.Second, time.Second).Should(ConsistOf([]serviceNameTagPair{
|
||||
{"redis-cache", expectedTags},
|
||||
{"redis-cache", expectedTags},
|
||||
{"redis-cache", expectedTags},
|
||||
}))
|
||||
|
||||
jobs := nomadClient.Jobs()
|
||||
// Stop all jobs in test
|
||||
for _, id := range tc.jobIds {
|
||||
jobs.Deregister(id, true, nil)
|
||||
}
|
||||
// Garbage collect
|
||||
nomadClient.System().GarbageCollect()
|
||||
|
||||
// Verify that services were deregistered in Consul
|
||||
g.Eventually(func() []string {
|
||||
consulService, _, err := catalog.Service("redis-cache", "", nil)
|
||||
require.Nil(err)
|
||||
var serviceIDs []string
|
||||
for _, serviceInstance := range consulService {
|
||||
serviceIDs = append(serviceIDs, serviceInstance.ServiceID)
|
||||
}
|
||||
return serviceIDs
|
||||
}, 5*time.Second, time.Second).Should(BeEmpty())
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
job "consul-example" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "3m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = false
|
||||
canary = 0
|
||||
}
|
||||
|
||||
migrate {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
}
|
||||
group "cache" {
|
||||
count = 3
|
||||
|
||||
restart {
|
||||
attempts = 2
|
||||
interval = "30m"
|
||||
delay = "15s"
|
||||
mode = "fail"
|
||||
}
|
||||
|
||||
ephemeral_disk {
|
||||
size = 300
|
||||
}
|
||||
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "redis:3.2"
|
||||
port_map {
|
||||
db = 6379
|
||||
}
|
||||
}
|
||||
resources {
|
||||
cpu = 500 # 500 MHz
|
||||
memory = 256 # 256MB
|
||||
network {
|
||||
mbits = 10
|
||||
port "db" {}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "redis-cache"
|
||||
tags = ["global", "cache"]
|
||||
port = "db"
|
||||
check {
|
||||
name = "alive"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
_ "github.com/hashicorp/nomad/e2e/affinities"
|
||||
_ "github.com/hashicorp/nomad/e2e/consul"
|
||||
_ "github.com/hashicorp/nomad/e2e/consultemplate"
|
||||
_ "github.com/hashicorp/nomad/e2e/example"
|
||||
_ "github.com/hashicorp/nomad/e2e/nomad09upgrade"
|
||||
|
|
|
@ -53,15 +53,12 @@ func (p *singleClusterProvisioner) ProvisionCluster(opts ProvisionerOptions) (*C
|
|||
}
|
||||
info.NomadClient = nomadClient
|
||||
|
||||
if len(os.Getenv(capi.HTTPAddrEnvName)) != 0 {
|
||||
if opts.ExpectConsul {
|
||||
consulClient, err := capi.NewClient(capi.DefaultConfig())
|
||||
if err != nil && opts.ExpectConsul {
|
||||
return nil, err
|
||||
}
|
||||
info.ConsulClient = consulClient
|
||||
} else if opts.ExpectConsul {
|
||||
return nil, fmt.Errorf("consul client expected but environment variable %s not set",
|
||||
capi.HTTPAddrEnvName)
|
||||
}
|
||||
|
||||
if len(os.Getenv(vapi.EnvVaultAddress)) != 0 {
|
||||
|
|
Loading…
Reference in New Issue