From 04eed0b14ce6439d15b8a1a5ff69136bac319d33 Mon Sep 17 00:00:00 2001 From: hc-github-team-secure-vault-core <82990506+hc-github-team-secure-vault-core@users.noreply.github.com> Date: Thu, 20 Jul 2023 16:51:07 -0400 Subject: [PATCH] backport of commit 6b21994d76b18c91397247dfd69bb01e46c5de25 (#21981) Co-authored-by: Ryan Cragun --- .../workflows/{enos-fmt.yml => enos-lint.yml} | 13 +- .../test-run-enos-scenario-matrix.yml | 2 +- enos/Makefile | 7 + enos/enos-modules.hcl | 7 + enos/enos-scenario-agent.hcl | 4 +- enos/enos-scenario-autopilot.hcl | 2 +- enos/enos-scenario-proxy.hcl | 210 ++++++++++++++++++ enos/enos-scenario-replication.hcl | 2 +- enos/enos-scenario-smoke.hcl | 2 +- enos/enos-scenario-ui.hcl | 2 +- enos/enos-scenario-upgrade.hcl | 2 +- enos/enos-variables.hcl | 86 +++---- enos/enos.vars.hcl | 93 +++++++- enos/modules/target_ec2_fleet/main.tf | 14 +- enos/modules/target_ec2_instances/main.tf | 14 +- enos/modules/target_ec2_spot_fleet/main.tf | 14 +- enos/modules/vault_proxy/main.tf | 85 +++++++ .../templates/set-up-approle-and-proxy.sh | 87 ++++++++ .../vault_proxy/templates/use-proxy.sh | 32 +++ 19 files changed, 584 insertions(+), 94 deletions(-) rename .github/workflows/{enos-fmt.yml => enos-lint.yml} (67%) create mode 100644 enos/enos-scenario-proxy.hcl create mode 100644 enos/modules/vault_proxy/main.tf create mode 100644 enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh create mode 100644 enos/modules/vault_proxy/templates/use-proxy.sh diff --git a/.github/workflows/enos-fmt.yml b/.github/workflows/enos-lint.yml similarity index 67% rename from .github/workflows/enos-fmt.yml rename to .github/workflows/enos-lint.yml index de1fa0f86..3f44084a4 100644 --- a/.github/workflows/enos-fmt.yml +++ b/.github/workflows/enos-lint.yml @@ -1,5 +1,5 @@ --- -name: enos_fmt +name: lint-enos on: pull_request: @@ -7,21 +7,26 @@ on: - enos/** jobs: - fmt_check: + lint: # Only run this workflow on pull requests from hashicorp/vault branches # as we need secrets to install enos. if: "! github.event.pull_request.head.repo.fork" runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - id: get-version + run: echo "version=$(make ci-get-version)" >> "$GITHUB_OUTPUT" - uses: hashicorp/setup-terraform@v2 with: terraform_wrapper: false - uses: hashicorp/action-setup-enos@v1 with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: check formatting + - name: lint working-directory: ./enos - run: make check-fmt + env: + ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }} + run: make lint diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml index 372af9afd..6a6b4df84 100644 --- a/.github/workflows/test-run-enos-scenario-matrix.yml +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -99,10 +99,10 @@ jobs: ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }} ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} - ENOS_VAR_vault_bundle_path: ./support/downloads/${{ inputs.build-artifact-name }} ENOS_VAR_vault_license_path: ./support/vault.hclic ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data steps: diff --git a/enos/Makefile b/enos/Makefile index ad27fb0ff..8155bfcae 100644 --- a/enos/Makefile +++ b/enos/Makefile @@ -22,3 +22,10 @@ check-fmt-modules: .PHONY: fmt-modules fmt-modules: terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate + +.PHONY: lint +lint: check-fmt validate-enos diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index e808f2fe9..9ec3b60e1 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -104,6 +104,13 @@ module "vault_agent" { vault_instance_count = var.vault_instance_count } +module "vault_proxy" { + source = "./modules/vault_proxy" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + module "vault_verify_agent_output" { source = "./modules/vault_verify_agent_output" diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index eec1f800f..c469344c6 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -25,7 +25,7 @@ scenario "agent" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null distro_version = { "rhel" = var.rhel_distro_version "ubuntu" = var.ubuntu_distro_version @@ -124,6 +124,7 @@ scenario "agent" { artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null awskms_unseal_key_arn = step.create_vpc.kms_key_arn cluster_name = step.create_vault_cluster_targets.cluster_name + enable_file_audit_device = var.vault_enable_file_audit_device install_dir = var.vault_install_dir license = matrix.edition != "oss" ? step.read_license.license : null local_artifact_path = local.bundle_path @@ -131,7 +132,6 @@ scenario "agent" { storage_backend = "raft" target_hosts = step.create_vault_cluster_targets.hosts unseal_method = "shamir" - enable_file_audit_device = var.vault_enable_file_audit_device } } diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 91d24d02e..1d901a332 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -38,7 +38,7 @@ scenario "autopilot" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null distro_version = { "rhel" = var.rhel_distro_version "ubuntu" = var.ubuntu_distro_version diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl new file mode 100644 index 000000000..ac6fb4800 --- /dev/null +++ b/enos/enos-scenario-proxy.hcl @@ -0,0 +1,210 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "proxy" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + distro = ["ubuntu", "rhel"] + edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + backend_tag_key = "VaultStorage" + build_tags = { + "oss" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + distro_version = { + "rhel" = var.rhel_distro_version + "ubuntu" = var.ubuntu_distro_version + } + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + install_artifactory_artifact = local.bundle_path == null + packages = ["jq"] + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + bundle_path = local.bundle_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = local.tags + } + } + + step "read_license" { + skip_step = matrix.edition == "oss" + module = module.read_license + + variables { + file_name = local.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_tag_key = local.vault_tag_key + common_tags = local.tags + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + enable_file_audit_device = var.vault_enable_file_audit_device + install_dir = var.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + storage_backend = "raft" + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = "shamir" + } + } + + step "start_vault_proxy" { + module = "vault_proxy" + depends_on = [ + step.build_vault, + step.create_vault_cluster, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + output "awkms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl index 0d9cd81d0..385c391e0 100644 --- a/enos/enos-scenario-replication.hcl +++ b/enos/enos-scenario-replication.hcl @@ -50,7 +50,7 @@ scenario "replication" { "rhel" = var.rhel_distro_version "ubuntu" = var.ubuntu_distro_version } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 1dfa52bac..5dfc8c93f 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -42,7 +42,7 @@ scenario "smoke" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null distro_version = { "rhel" = var.rhel_distro_version "ubuntu" = var.ubuntu_distro_version diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl index db5aff3ad..f30afa01f 100644 --- a/enos/enos-scenario-ui.hcl +++ b/enos/enos-scenario-ui.hcl @@ -22,7 +22,7 @@ scenario "ui" { "oss" = ["ui"] "ent" = ["ui", "enterprise", "ent"] } - bundle_path = abspath(var.vault_bundle_path) + bundle_path = abspath(var.vault_artifact_path) distro = "ubuntu" consul_version = "1.14.2" seal = "awskms" diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index b61828110..cb83e45f7 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -36,7 +36,7 @@ scenario "upgrade" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null distro_version = { "rhel" = var.rhel_distro_version "ubuntu" = var.ubuntu_distro_version diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index 951888ce9..80acbc26e 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -1,22 +1,16 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -variable "artifact_path" { - type = string - description = "The local path for dev artifact to test" - default = null -} - variable "artifactory_username" { type = string - description = "The username to use when connecting to artifactory" + description = "The username to use when testing an artifact from artifactory" default = null sensitive = true } variable "artifactory_token" { type = string - description = "The token to use when connecting to artifactory" + description = "The token to use when authenticating to artifactory" default = null sensitive = true } @@ -36,7 +30,7 @@ variable "artifactory_repo" { variable "aws_region" { description = "The AWS region where we'll create infrastructure" type = string - default = "us-west-1" + default = "us-east-1" } variable "aws_ssh_keypair_name" { @@ -75,44 +69,12 @@ variable "backend_log_level" { default = "trace" } -variable "operator_instance" { - type = string - description = "The ip address of the operator (Voter) node" -} - variable "project_name" { description = "The description of the project" type = string default = "vault-enos-integration" } -variable "remove_vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The old vault nodes to be removed" -} - - -variable "ui_test_filter" { - type = string - description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" - default = null -} - -variable "ui_run_tests" { - type = bool - description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" - default = true -} - -variable "vault_enable_file_audit_device" { - description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log" - type = bool - default = true -} - variable "rhel_distro_version" { description = "The version of RHEL to use" type = string @@ -132,7 +94,7 @@ variable "terraform_plugin_cache_dir" { } variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." + description = "The Terraform Cloud QTI Organization API token. This is used to download the enos Terraform provider." type = string sensitive = true } @@ -143,8 +105,20 @@ variable "ubuntu_distro_version" { default = "22.04" // or "20.04", "18.04" } +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} + variable "vault_artifact_type" { - description = "The Vault artifact type package or bundle" + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" default = "bundle" } @@ -156,24 +130,30 @@ variable "vault_autopilot_initial_release" { } } -variable "vault_bundle_path" { +variable "vault_artifact_path" { description = "Path to CRT generated or local vault.zip bundle" type = string default = "/tmp/vault.zip" } +variable "vault_build_date" { + description = "The build date for Vault artifact" + type = string + default = "" +} + +variable "vault_enable_file_audit_device" { + description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log" + type = bool + default = true +} + variable "vault_install_dir" { type = string description = "The directory where the Vault binary will be installed" default = "/opt/vault/bin" } -variable "vault_instance_type" { - description = "The instance type to use for the Vault backend" - type = string - default = null -} - variable "vault_instance_count" { description = "How many instances to create for the Vault cluster" type = number @@ -198,12 +178,6 @@ variable "vault_log_level" { default = "trace" } -variable "vault_build_date" { - description = "The build date for Vault artifact" - type = string - default = "" -} - variable "vault_product_version" { description = "The version of Vault we are testing" type = string diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index d63af4452..fd6928f7c 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -1,9 +1,21 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 +# artifactory_username is the username to use when testing an artifact stored in artfactory. +# artifactory_username = "yourname@hashicorp.com" + +# artifactory_token is the token to use when authenticating to artifactory. +# artifactory_token = "yourtoken" + +# artifactory_host is the artifactory host to search for vault artifacts. +# artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +# artifactory_repo is the artifactory repo to search for vault artifacts. +# artifactory_repo = "hashicorp-crt-stable-local*" + # aws_region is the AWS region where we'll create infrastructure # for the smoke scenario -# aws_region = "us-west-1" +# aws_region = "us-east-1" # aws_ssh_keypair_name is the AWS keypair to use for SSH # aws_ssh_keypair_name = "enos-ci-ssh-key" @@ -11,8 +23,25 @@ # aws_ssh_private_key_path is the path to the AWS keypair private key # aws_ssh_private_key_path = "./support/private_key.pem" -# backend_instance_type is the instance type to use for the Vault backend -# backend_instance_type = "t3.small" +# backend_edition is the backend (consul) release edition if applicable to the scenario. +# backend_edition = "oss" + +# backend_license_path is the license for the backend if applicable (Consul Enterprise)". +# backend_license_path = "./support/consul.hclic" + +# backend_log_level is the server log level for the backend. Supported values include 'trace', +# 'debug', 'info', 'warn', 'error'" +# backend_log_level = "trace" + +# backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +# backend_instance_type = "t4g.small" + +# project_name is the description of the project. It will often be used to tag infrastructure +# resources. +# project_name = "vault-enos-integration" + +# rhel_distro_version is the version of RHEL to use for "distro:rhel" variants. +# rhel_distro_version = "9.1" // or "8.8" # tags are a map of tags that will be applied to infrastructure resources that # support tagging. @@ -26,10 +55,41 @@ # to download the enos Terraform provider and the enos Terraform modules. # tfc_api_token = "XXXXX.atlasv1.XXXXX..." -# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When +# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +# be appended to the ember test command as '-f=\"\"'. +# ui_test_filter = "sometest" + +# ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +# cluster will be created but no tests will be run. +# ui_run_tests = true + +# ubuntu_distro_version is the version of ubuntu to use for "distro:ubuntu" variants +# ubuntu_distro_version = "22.04" // or "20.04", "18.04" + +# vault_artifact_path is the path to CRT generated or local vault.zip bundle. When # using the "builder:local" variant a bundle will be built from the current branch. # In CI it will use the output of the build workflow. -# vault_bundle_path = "./dist/vault.zip" +# vault_artifact_path = "./dist/vault.zip" + +# vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +# vault_artifact_type = "bundle" + +# vault_autopilot_initial_release is the version of Vault to deploy before doing an autopilot upgrade +# to the test artifact. +# vault_autopilot_initial_release = { +# edition = "ent" +# version = "1.11.0" +# } +# } + +# vault_build_date is the build date for Vault artifact. Some validations will require the binary build +# date to match" +# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +# vault_enable_file_audit_device sets whether or not to enable the 'file' audit device. It true it +# will be enabled at the path /var/log/vault_audit.log +# vault_enable_file_audit_device = true # vault_install_dir is the directory where the vault binary will be installed on # the remote machines. @@ -48,4 +108,27 @@ # This is only required for non-oss editions" # vault_license_path = "./support/vault.hclic" +# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +# vault_local_build_tags = ["ui", "ent"] + +# vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +# trace, debug, info, warn, and err." +# vault_log_level = "trace" + +# vault_product_version is the version of Vault we are testing. Some validations will expect the vault +# binary and cluster to report this version. +# vault_product_version = "1.15.0" + # vault_upgrade_initial_release is the Vault release to deploy before upgrading. + +# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +# binary and cluster to report this revision. +# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" + +# vault_upgrade_initial_release is the Vault release to deploy before doing an in-place upgrade. +# vault_upgrade_initial_release = { +# edition = "oss" +# // Vault 1.10.5 has a known issue with retry_join. +# version = "1.10.4" +# } +# } diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf index 1dac69456..2ec00b233 100644 --- a/enos/modules/target_ec2_fleet/main.tf +++ b/enos/modules/target_ec2_fleet/main.tf @@ -125,7 +125,7 @@ resource "aws_security_group" "target" { to_port = 22 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -136,7 +136,7 @@ resource "aws_security_group" "target" { to_port = 8201 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), formatlist("%s/32", var.ssh_allow_ips) ]) @@ -148,7 +148,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -158,7 +158,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -168,7 +168,7 @@ resource "aws_security_group" "target" { to_port = 8503 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -178,7 +178,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -188,7 +188,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf index 7f14b7a04..b4e6364be 100644 --- a/enos/modules/target_ec2_instances/main.tf +++ b/enos/modules/target_ec2_instances/main.tf @@ -144,7 +144,7 @@ resource "aws_security_group" "target" { to_port = 22 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -155,7 +155,7 @@ resource "aws_security_group" "target" { to_port = 8201 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), formatlist("%s/32", var.ssh_allow_ips) ]) @@ -167,7 +167,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -177,7 +177,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -187,7 +187,7 @@ resource "aws_security_group" "target" { to_port = 8503 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -197,7 +197,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -207,7 +207,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf index 2a5123064..aa67ef830 100644 --- a/enos/modules/target_ec2_spot_fleet/main.tf +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -219,7 +219,7 @@ resource "aws_security_group" "target" { to_port = 22 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -230,7 +230,7 @@ resource "aws_security_group" "target" { to_port = 8201 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), formatlist("%s/32", var.ssh_allow_ips) ]) @@ -242,7 +242,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -252,7 +252,7 @@ resource "aws_security_group" "target" { to_port = 8302 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -262,7 +262,7 @@ resource "aws_security_group" "target" { to_port = 8503 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -272,7 +272,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "tcp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } @@ -282,7 +282,7 @@ resource "aws_security_group" "target" { to_port = 8600 protocol = "udp" cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), ]) } diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf new file mode 100644 index 000000000..8dead4e86 --- /dev/null +++ b/enos/modules/vault_proxy/main.tf @@ -0,0 +1,85 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_proxy_pidfile" { + type = string + description = "The filepath where the Vault Proxy pid file is kept" + default = "/tmp/pidfile" +} + +locals { + vault_instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } + vault_proxy_address = "127.0.0.1:8100" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { + content = templatefile("${path.module}/templates/set-up-approle-and-proxy.sh", { + vault_install_dir = var.vault_install_dir + vault_token = var.vault_root_token + vault_proxy_pidfile = var.vault_proxy_pidfile + vault_proxy_address = local.vault_proxy_address + }) + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } +} + +resource "enos_remote_exec" "use_proxy" { + content = templatefile("${path.module}/templates/use-proxy.sh", { + vault_install_dir = var.vault_install_dir + vault_proxy_pidfile = var.vault_proxy_pidfile + vault_proxy_address = local.vault_proxy_address + }) + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } + + depends_on = [ + enos_remote_exec.set_up_approle_auth_and_proxy + ] +} diff --git a/enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh new file mode 100644 index 000000000..dd46db53b --- /dev/null +++ b/enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${vault_install_dir}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +export VAULT_TOKEN='${vault_token}' + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +approle_create_status=$($binpath auth enable approle) + +approle_status=$($binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) + +ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo $ROLEID > /tmp/role-id +echo $SECRETID > /tmp/secret-id + +# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl +# The Proxy references the fixed Vault server address of http://127.0.0.1:8200 +# The Proxy itself listens at the address http://127.0.0.1:8100 +cat > /tmp/vault-proxy.hcl <<- EOM +pid_file = "${vault_proxy_pidfile}" + +vault { + address = "http://127.0.0.1:8200" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +api_proxy { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${vault_proxy_address}" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Proxy is still running from a previous run, kill it +pkill -F "${vault_proxy_pidfile}" || true + +# Run proxy in the background +$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/templates/use-proxy.sh b/enos/modules/vault_proxy/templates/use-proxy.sh new file mode 100644 index 000000000..d26a46da0 --- /dev/null +++ b/enos/modules/vault_proxy/templates/use-proxy.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${vault_install_dir}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Will cause the Vault CLI to communicate with the Vault Proxy, since it +# is listening at port 8100. +export VAULT_ADDR='http://${vault_proxy_address}' + +# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token +# is used. +unset VAULT_TOKEN + +# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env +# var) to lookup the details of the Proxy's token and make sure that the +# .data.path field contains 'auth/approle/login', thus confirming that the Proxy +# automatically authenticated itself. +$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login' + +# Now that we're done, kill the proxy +pkill -F "${vault_proxy_pidfile}" || true