open-nomad/e2e/terraform/compute.tf
Tim Gross 2edbdfc8be
e2e: update framework to allow deploying Nomad (#6969)
The e2e framework instantiates clients for Nomad/Consul but the
provisioning of the actual Nomad cluster is left to Terraform. The
Terraform provisioning process uses `remote-exec` to deploy specific
versions of Nomad so that we don't have to bake an AMI every time we
want to test a new version. But Terraform treats the resulting
instances as immutable, so we can't use the same tooling to update the
version of Nomad in-place. This is a prerequisite for upgrade testing.

This changeset extends the e2e framework to provide the option of
deploying Nomad (and, in the future, Consul/Vault) with specific
versions to running infrastructure. This initial implementation is
focused on deploying to a single cluster via `ssh` (because that's our
current need), but provides interfaces to hook the test run at the
start of the run, the start of each suite, or the start of a given
test case.

Terraform work includes:
* provides Terraform output that written to JSON used by the framework
  to configure provisioning via `terraform output provisioning`.
* provides Terraform output that can be used by test operators to
  configure their shell via `$(terraform output environment)`
* drops `remote-exec` provisioning steps from Terraform
* makes changes to the deployment scripts to ensure they can be run
  multiple times w/ different versions against the same host.
2020-01-22 08:48:52 -05:00

74 lines
2.4 KiB
HCL

resource "aws_instance" "server" {
ami = data.aws_ami.linux.image_id
instance_type = var.instance_type
key_name = module.keys.key_name
vpc_security_group_ids = [aws_security_group.primary.id]
count = var.server_count
# Instance tags
tags = {
Name = "${local.random_name}-server-${count.index}"
ConsulAutoJoin = "auto-join"
SHA = var.nomad_sha
User = data.aws_caller_identity.current.arn
}
iam_instance_profile = aws_iam_instance_profile.instance_profile.name
}
resource "aws_instance" "client_linux" {
ami = data.aws_ami.linux.image_id
instance_type = var.instance_type
key_name = module.keys.key_name
vpc_security_group_ids = [aws_security_group.primary.id]
count = var.client_count
depends_on = [aws_instance.server]
# Instance tags
tags = {
Name = "${local.random_name}-client-${count.index}"
ConsulAutoJoin = "auto-join"
SHA = var.nomad_sha
User = data.aws_caller_identity.current.arn
}
ebs_block_device {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
}
iam_instance_profile = aws_iam_instance_profile.instance_profile.name
}
resource "aws_instance" "client_windows" {
ami = data.aws_ami.windows.image_id
instance_type = var.instance_type
key_name = module.keys.key_name
vpc_security_group_ids = [aws_security_group.primary.id]
count = var.windows_client_count
depends_on = [aws_instance.server]
iam_instance_profile = aws_iam_instance_profile.instance_profile.name
# Instance tags
tags = {
Name = "${local.random_name}-client-windows-${count.index}"
ConsulAutoJoin = "auto-join"
SHA = var.nomad_sha
User = data.aws_caller_identity.current.arn
}
ebs_block_device {
device_name = "xvdd"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
}
# We need this userdata script because Windows machines don't
# configure ssh with cloud-init by default.
user_data = file("${path.root}/shared/config/userdata-windows.ps1")
}