Merge pull request #6342 from hashicorp/f-host-volume-e2e

Add Host Volumes E2E test
This commit is contained in:
Danielle 2019-09-18 12:59:32 -07:00 committed by GitHub
commit 940bbcc639
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 190 additions and 10 deletions

View file

@ -11,6 +11,7 @@ import (
_ "github.com/hashicorp/nomad/e2e/consultemplate"
_ "github.com/hashicorp/nomad/e2e/deployment"
_ "github.com/hashicorp/nomad/e2e/example"
_ "github.com/hashicorp/nomad/e2e/hostvolumes"
_ "github.com/hashicorp/nomad/e2e/nomad09upgrade"
_ "github.com/hashicorp/nomad/e2e/nomadexec"
_ "github.com/hashicorp/nomad/e2e/spread"

View file

@ -0,0 +1,132 @@
package hostvolumes
import (
"time"
"github.com/hashicorp/nomad/e2e/e2eutil"
"github.com/hashicorp/nomad/e2e/framework"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
type BasicHostVolumeTest struct {
framework.TC
jobIds []string
}
func init() {
framework.AddSuites(&framework.TestSuite{
Component: "Host Volumes",
CanRunLocal: true,
Cases: []framework.TestCase{
new(BasicHostVolumeTest),
},
})
}
func (tc *BasicHostVolumeTest) BeforeAll(f *framework.F) {
// Ensure cluster has leader before running tests
e2eutil.WaitForLeader(f.T(), tc.Nomad())
// Ensure that we have at least 1 client nodes in ready state
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
}
func (tc *BasicHostVolumeTest) TestSingleHostVolume(f *framework.F) {
require := require.New(f.T())
nomadClient := tc.Nomad()
uuid := uuid.Generate()
jobID := "hostvol" + uuid[0:8]
tc.jobIds = append(tc.jobIds, jobID)
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, "hostvolumes/input/single_mount.nomad", jobID)
waitForTaskState := func(desiredState string) {
require.Eventually(func() bool {
allocs, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
if len(allocs) != 1 {
return false
}
first := allocs[0]
taskState := first.TaskStates["test"]
if taskState == nil {
return false
}
return taskState.State == desiredState
}, 30*time.Second, 1*time.Second)
}
waitForClientAllocStatus := func(desiredStatus string) {
require.Eventually(func() bool {
allocSummaries, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
if len(allocSummaries) != 1 {
return false
}
alloc, _, _ := nomadClient.Allocations().Info(allocSummaries[0].ID, nil)
if alloc == nil {
return false
}
return alloc.ClientStatus == desiredStatus
}, 30*time.Second, 1*time.Second)
}
waitForRestartCount := func(desiredCount uint64) {
require.Eventually(func() bool {
allocs, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
if len(allocs) != 1 {
return false
}
first := allocs[0]
return first.TaskStates["test"].Restarts == desiredCount
}, 30*time.Second, 1*time.Second)
}
// Verify scheduling
for _, allocStub := range allocs {
node, _, err := nomadClient.Nodes().Info(allocStub.NodeID, nil)
require.Nil(err)
_, ok := node.HostVolumes["shared_data"]
require.True(ok, "Node does not have the requested volume")
}
// Wrap in retry to wait until running
waitForTaskState(structs.TaskStateRunning)
// Client should be running
waitForClientAllocStatus(structs.AllocClientStatusRunning)
// Should not be restarted
waitForRestartCount(0)
// Ensure allocs can be restarted
for _, allocStub := range allocs {
alloc, _, err := nomadClient.Allocations().Info(allocStub.ID, nil)
require.Nil(err)
err = nomadClient.Allocations().Restart(alloc, "", nil)
require.Nil(err)
}
// Should be restarted once
waitForRestartCount(1)
// Wrap in retry to wait until running again
waitForTaskState(structs.TaskStateRunning)
// Client should be running again
waitForClientAllocStatus(structs.AllocClientStatusRunning)
}
func (tc *BasicHostVolumeTest) AfterEach(f *framework.F) {
nomadClient := tc.Nomad()
jobs := nomadClient.Jobs()
// Stop all jobs in test
for _, id := range tc.jobIds {
jobs.Deregister(id, true, nil)
}
// Garbage collect
nomadClient.System().GarbageCollect()
}

View file

@ -0,0 +1,29 @@
job "test1" {
datacenters = ["dc1", "dc2"]
type = "service"
group "test1" {
count = 1
volume "data" {
type = "host"
source = "shared_data"
}
task "test" {
driver = "docker"
volume_mount {
volume = "data"
destination = "/tmp/foo"
}
config {
image = "bash:latest"
command = "bash"
args = ["-c", "sleep 15000"]
}
}
}
}

View file

@ -63,7 +63,7 @@ resource "aws_instance" "server" {
"sudo chmod 0755 /usr/local/bin/nomad",
"sudo chown root:root /usr/local/bin/nomad",
"sudo systemctl enable nomad.service",
"sudo systemctl start nomad.service"
"sudo systemctl start nomad.service",
]
connection {
@ -89,11 +89,11 @@ resource "aws_instance" "client" {
User = "${data.aws_caller_identity.current.arn}"
}
ebs_block_device = {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
ebs_block_device = {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
}
user_data = "${element(data.template_file.user_data_client.*.rendered, count.index)}"
@ -117,13 +117,23 @@ resource "aws_instance" "client" {
"sudo cp /tmp/client.hcl /etc/nomad.d/nomad.hcl",
"sudo chmod 0755 /usr/local/bin/nomad",
"sudo chown root:root /usr/local/bin/nomad",
"sudo systemctl enable nomad.service",
# Setup Host Volumes
"sudo mkdir /tmp/data",
# Run Nomad Service
"sudo systemctl enable nomad.service",
"sudo systemctl start nomad.service",
# Install CNI plugins
"sudo mkdir -p /opt/cni/bin",
"wget -q -O - https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | sudo tar -C /opt/cni/bin -xz",
# Install CNI plugins
"sudo mkdir -p /opt/cni/bin",
"wget -q -O - https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | sudo tar -C /opt/cni/bin -xz",
]
# Setup host volumes
connection {
user = "ubuntu"
private_key = "${module.keys.private_key_pem}"

View file

@ -20,6 +20,10 @@ client {
# Allow privileged docker jobs
"docker.privileged.enabled" = "true"
}
host_volume "shared_data" {
path = "/tmp/data"
}
}
consul {

View file

@ -18,6 +18,10 @@ client {
meta {
"rack" = "r1"
}
host_volume "shared_data" {
path = "/tmp/data"
}
}
consul {