backport of commit 6b21994d76b18c91397247dfd69bb01e46c5de25 (#21981)
Co-authored-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
parent
cf6c2937b0
commit
04eed0b14c
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: enos_fmt
|
name: lint-enos
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
@ -7,21 +7,26 @@ on:
|
||||||
- enos/**
|
- enos/**
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
fmt_check:
|
lint:
|
||||||
# Only run this workflow on pull requests from hashicorp/vault branches
|
# Only run this workflow on pull requests from hashicorp/vault branches
|
||||||
# as we need secrets to install enos.
|
# as we need secrets to install enos.
|
||||||
if: "! github.event.pull_request.head.repo.fork"
|
if: "! github.event.pull_request.head.repo.fork"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
||||||
|
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||||
|
- id: get-version
|
||||||
|
run: echo "version=$(make ci-get-version)" >> "$GITHUB_OUTPUT"
|
||||||
- uses: hashicorp/setup-terraform@v2
|
- uses: hashicorp/setup-terraform@v2
|
||||||
with:
|
with:
|
||||||
terraform_wrapper: false
|
terraform_wrapper: false
|
||||||
- uses: hashicorp/action-setup-enos@v1
|
- uses: hashicorp/action-setup-enos@v1
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
||||||
- name: check formatting
|
- name: lint
|
||||||
working-directory: ./enos
|
working-directory: ./enos
|
||||||
run: make check-fmt
|
env:
|
||||||
|
ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }}
|
||||||
|
run: make lint
|
|
@ -99,10 +99,10 @@ jobs:
|
||||||
ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }}
|
ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }}
|
||||||
ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }}
|
ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }}
|
||||||
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
|
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
|
||||||
|
ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }}
|
||||||
ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }}
|
ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }}
|
||||||
ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }}
|
ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }}
|
||||||
ENOS_VAR_vault_revision: ${{ inputs.vault-revision }}
|
ENOS_VAR_vault_revision: ${{ inputs.vault-revision }}
|
||||||
ENOS_VAR_vault_bundle_path: ./support/downloads/${{ inputs.build-artifact-name }}
|
|
||||||
ENOS_VAR_vault_license_path: ./support/vault.hclic
|
ENOS_VAR_vault_license_path: ./support/vault.hclic
|
||||||
ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data
|
ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data
|
||||||
steps:
|
steps:
|
||||||
|
|
|
@ -22,3 +22,10 @@ check-fmt-modules:
|
||||||
.PHONY: fmt-modules
|
.PHONY: fmt-modules
|
||||||
fmt-modules:
|
fmt-modules:
|
||||||
terraform fmt -diff -recursive ./modules
|
terraform fmt -diff -recursive ./modules
|
||||||
|
|
||||||
|
.PHONY: validate-enos
|
||||||
|
validate-enos:
|
||||||
|
enos scenario validate
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint: check-fmt validate-enos
|
||||||
|
|
|
@ -104,6 +104,13 @@ module "vault_agent" {
|
||||||
vault_instance_count = var.vault_instance_count
|
vault_instance_count = var.vault_instance_count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "vault_proxy" {
|
||||||
|
source = "./modules/vault_proxy"
|
||||||
|
|
||||||
|
vault_install_dir = var.vault_install_dir
|
||||||
|
vault_instance_count = var.vault_instance_count
|
||||||
|
}
|
||||||
|
|
||||||
module "vault_verify_agent_output" {
|
module "vault_verify_agent_output" {
|
||||||
source = "./modules/vault_verify_agent_output"
|
source = "./modules/vault_verify_agent_output"
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ scenario "agent" {
|
||||||
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||||
}
|
}
|
||||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
distro_version = {
|
distro_version = {
|
||||||
"rhel" = var.rhel_distro_version
|
"rhel" = var.rhel_distro_version
|
||||||
"ubuntu" = var.ubuntu_distro_version
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
|
@ -124,6 +124,7 @@ scenario "agent" {
|
||||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||||
|
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||||
install_dir = var.vault_install_dir
|
install_dir = var.vault_install_dir
|
||||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||||
local_artifact_path = local.bundle_path
|
local_artifact_path = local.bundle_path
|
||||||
|
@ -131,7 +132,6 @@ scenario "agent" {
|
||||||
storage_backend = "raft"
|
storage_backend = "raft"
|
||||||
target_hosts = step.create_vault_cluster_targets.hosts
|
target_hosts = step.create_vault_cluster_targets.hosts
|
||||||
unseal_method = "shamir"
|
unseal_method = "shamir"
|
||||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ scenario "autopilot" {
|
||||||
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||||
}
|
}
|
||||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
distro_version = {
|
distro_version = {
|
||||||
"rhel" = var.rhel_distro_version
|
"rhel" = var.rhel_distro_version
|
||||||
"ubuntu" = var.ubuntu_distro_version
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
|
|
|
@ -0,0 +1,210 @@
|
||||||
|
# Copyright (c) HashiCorp, Inc.
|
||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
scenario "proxy" {
|
||||||
|
matrix {
|
||||||
|
arch = ["amd64", "arm64"]
|
||||||
|
artifact_source = ["local", "crt", "artifactory"]
|
||||||
|
distro = ["ubuntu", "rhel"]
|
||||||
|
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
|
||||||
|
}
|
||||||
|
|
||||||
|
terraform_cli = terraform_cli.default
|
||||||
|
terraform = terraform.default
|
||||||
|
providers = [
|
||||||
|
provider.aws.default,
|
||||||
|
provider.enos.ubuntu,
|
||||||
|
provider.enos.rhel
|
||||||
|
]
|
||||||
|
|
||||||
|
locals {
|
||||||
|
backend_tag_key = "VaultStorage"
|
||||||
|
build_tags = {
|
||||||
|
"oss" = ["ui"]
|
||||||
|
"ent" = ["ui", "enterprise", "ent"]
|
||||||
|
"ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"]
|
||||||
|
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||||
|
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||||
|
}
|
||||||
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
|
distro_version = {
|
||||||
|
"rhel" = var.rhel_distro_version
|
||||||
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
|
}
|
||||||
|
enos_provider = {
|
||||||
|
rhel = provider.enos.rhel
|
||||||
|
ubuntu = provider.enos.ubuntu
|
||||||
|
}
|
||||||
|
install_artifactory_artifact = local.bundle_path == null
|
||||||
|
packages = ["jq"]
|
||||||
|
tags = merge({
|
||||||
|
"Project Name" : var.project_name
|
||||||
|
"Project" : "Enos",
|
||||||
|
"Environment" : "ci"
|
||||||
|
}, var.tags)
|
||||||
|
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||||
|
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||||
|
}
|
||||||
|
|
||||||
|
step "get_local_metadata" {
|
||||||
|
skip_step = matrix.artifact_source != "local"
|
||||||
|
module = module.get_local_metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
step "build_vault" {
|
||||||
|
module = "build_${matrix.artifact_source}"
|
||||||
|
|
||||||
|
variables {
|
||||||
|
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition]
|
||||||
|
bundle_path = local.bundle_path
|
||||||
|
goarch = matrix.arch
|
||||||
|
goos = "linux"
|
||||||
|
artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null
|
||||||
|
artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null
|
||||||
|
artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null
|
||||||
|
artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null
|
||||||
|
arch = matrix.artifact_source == "artifactory" ? matrix.arch : null
|
||||||
|
product_version = var.vault_product_version
|
||||||
|
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null
|
||||||
|
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||||
|
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||||
|
revision = var.vault_revision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
step "ec2_info" {
|
||||||
|
module = module.ec2_info
|
||||||
|
}
|
||||||
|
|
||||||
|
step "create_vpc" {
|
||||||
|
module = module.create_vpc
|
||||||
|
|
||||||
|
variables {
|
||||||
|
common_tags = local.tags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
step "read_license" {
|
||||||
|
skip_step = matrix.edition == "oss"
|
||||||
|
module = module.read_license
|
||||||
|
|
||||||
|
variables {
|
||||||
|
file_name = local.vault_license_path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
step "create_vault_cluster_targets" {
|
||||||
|
module = module.target_ec2_instances
|
||||||
|
depends_on = [step.create_vpc]
|
||||||
|
|
||||||
|
providers = {
|
||||||
|
enos = local.enos_provider[matrix.distro]
|
||||||
|
}
|
||||||
|
|
||||||
|
variables {
|
||||||
|
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||||
|
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||||
|
cluster_tag_key = local.vault_tag_key
|
||||||
|
common_tags = local.tags
|
||||||
|
vpc_id = step.create_vpc.vpc_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
step "create_vault_cluster" {
|
||||||
|
module = module.vault_cluster
|
||||||
|
depends_on = [
|
||||||
|
step.build_vault,
|
||||||
|
step.create_vault_cluster_targets
|
||||||
|
]
|
||||||
|
|
||||||
|
providers = {
|
||||||
|
enos = local.enos_provider[matrix.distro]
|
||||||
|
}
|
||||||
|
|
||||||
|
variables {
|
||||||
|
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||||
|
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||||
|
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||||
|
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||||
|
install_dir = var.vault_install_dir
|
||||||
|
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||||
|
local_artifact_path = local.bundle_path
|
||||||
|
packages = local.packages
|
||||||
|
storage_backend = "raft"
|
||||||
|
target_hosts = step.create_vault_cluster_targets.hosts
|
||||||
|
unseal_method = "shamir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
step "start_vault_proxy" {
|
||||||
|
module = "vault_proxy"
|
||||||
|
depends_on = [
|
||||||
|
step.build_vault,
|
||||||
|
step.create_vault_cluster,
|
||||||
|
]
|
||||||
|
|
||||||
|
providers = {
|
||||||
|
enos = local.enos_provider[matrix.distro]
|
||||||
|
}
|
||||||
|
|
||||||
|
variables {
|
||||||
|
vault_instances = step.create_vault_cluster_targets.hosts
|
||||||
|
vault_root_token = step.create_vault_cluster.root_token
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "awkms_unseal_key_arn" {
|
||||||
|
description = "The Vault cluster KMS key arn"
|
||||||
|
value = step.create_vpc.kms_key_arn
|
||||||
|
}
|
||||||
|
|
||||||
|
output "cluster_name" {
|
||||||
|
description = "The Vault cluster name"
|
||||||
|
value = step.create_vault_cluster.cluster_name
|
||||||
|
}
|
||||||
|
|
||||||
|
output "hosts" {
|
||||||
|
description = "The Vault cluster target hosts"
|
||||||
|
value = step.create_vault_cluster.target_hosts
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_ips" {
|
||||||
|
description = "The Vault cluster private IPs"
|
||||||
|
value = step.create_vault_cluster.private_ips
|
||||||
|
}
|
||||||
|
|
||||||
|
output "public_ips" {
|
||||||
|
description = "The Vault cluster public IPs"
|
||||||
|
value = step.create_vault_cluster.public_ips
|
||||||
|
}
|
||||||
|
|
||||||
|
output "root_token" {
|
||||||
|
description = "The Vault cluster root token"
|
||||||
|
value = step.create_vault_cluster.root_token
|
||||||
|
}
|
||||||
|
|
||||||
|
output "recovery_key_shares" {
|
||||||
|
description = "The Vault cluster recovery key shares"
|
||||||
|
value = step.create_vault_cluster.recovery_key_shares
|
||||||
|
}
|
||||||
|
|
||||||
|
output "recovery_keys_b64" {
|
||||||
|
description = "The Vault cluster recovery keys b64"
|
||||||
|
value = step.create_vault_cluster.recovery_keys_b64
|
||||||
|
}
|
||||||
|
|
||||||
|
output "recovery_keys_hex" {
|
||||||
|
description = "The Vault cluster recovery keys hex"
|
||||||
|
value = step.create_vault_cluster.recovery_keys_hex
|
||||||
|
}
|
||||||
|
|
||||||
|
output "unseal_keys_b64" {
|
||||||
|
description = "The Vault cluster unseal keys"
|
||||||
|
value = step.create_vault_cluster.unseal_keys_b64
|
||||||
|
}
|
||||||
|
|
||||||
|
output "unseal_keys_hex" {
|
||||||
|
description = "The Vault cluster unseal keys hex"
|
||||||
|
value = step.create_vault_cluster.unseal_keys_hex
|
||||||
|
}
|
||||||
|
}
|
|
@ -50,7 +50,7 @@ scenario "replication" {
|
||||||
"rhel" = var.rhel_distro_version
|
"rhel" = var.rhel_distro_version
|
||||||
"ubuntu" = var.ubuntu_distro_version
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
}
|
}
|
||||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
enos_provider = {
|
enos_provider = {
|
||||||
rhel = provider.enos.rhel
|
rhel = provider.enos.rhel
|
||||||
ubuntu = provider.enos.ubuntu
|
ubuntu = provider.enos.ubuntu
|
||||||
|
|
|
@ -42,7 +42,7 @@ scenario "smoke" {
|
||||||
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||||
}
|
}
|
||||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
distro_version = {
|
distro_version = {
|
||||||
"rhel" = var.rhel_distro_version
|
"rhel" = var.rhel_distro_version
|
||||||
"ubuntu" = var.ubuntu_distro_version
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
|
|
|
@ -22,7 +22,7 @@ scenario "ui" {
|
||||||
"oss" = ["ui"]
|
"oss" = ["ui"]
|
||||||
"ent" = ["ui", "enterprise", "ent"]
|
"ent" = ["ui", "enterprise", "ent"]
|
||||||
}
|
}
|
||||||
bundle_path = abspath(var.vault_bundle_path)
|
bundle_path = abspath(var.vault_artifact_path)
|
||||||
distro = "ubuntu"
|
distro = "ubuntu"
|
||||||
consul_version = "1.14.2"
|
consul_version = "1.14.2"
|
||||||
seal = "awskms"
|
seal = "awskms"
|
||||||
|
|
|
@ -36,7 +36,7 @@ scenario "upgrade" {
|
||||||
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||||
}
|
}
|
||||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
|
||||||
distro_version = {
|
distro_version = {
|
||||||
"rhel" = var.rhel_distro_version
|
"rhel" = var.rhel_distro_version
|
||||||
"ubuntu" = var.ubuntu_distro_version
|
"ubuntu" = var.ubuntu_distro_version
|
||||||
|
|
|
@ -1,22 +1,16 @@
|
||||||
# Copyright (c) HashiCorp, Inc.
|
# Copyright (c) HashiCorp, Inc.
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
variable "artifact_path" {
|
|
||||||
type = string
|
|
||||||
description = "The local path for dev artifact to test"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "artifactory_username" {
|
variable "artifactory_username" {
|
||||||
type = string
|
type = string
|
||||||
description = "The username to use when connecting to artifactory"
|
description = "The username to use when testing an artifact from artifactory"
|
||||||
default = null
|
default = null
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "artifactory_token" {
|
variable "artifactory_token" {
|
||||||
type = string
|
type = string
|
||||||
description = "The token to use when connecting to artifactory"
|
description = "The token to use when authenticating to artifactory"
|
||||||
default = null
|
default = null
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
|
@ -36,7 +30,7 @@ variable "artifactory_repo" {
|
||||||
variable "aws_region" {
|
variable "aws_region" {
|
||||||
description = "The AWS region where we'll create infrastructure"
|
description = "The AWS region where we'll create infrastructure"
|
||||||
type = string
|
type = string
|
||||||
default = "us-west-1"
|
default = "us-east-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_ssh_keypair_name" {
|
variable "aws_ssh_keypair_name" {
|
||||||
|
@ -75,44 +69,12 @@ variable "backend_log_level" {
|
||||||
default = "trace"
|
default = "trace"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "operator_instance" {
|
|
||||||
type = string
|
|
||||||
description = "The ip address of the operator (Voter) node"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "project_name" {
|
variable "project_name" {
|
||||||
description = "The description of the project"
|
description = "The description of the project"
|
||||||
type = string
|
type = string
|
||||||
default = "vault-enos-integration"
|
default = "vault-enos-integration"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "remove_vault_instances" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
public_ip = string
|
|
||||||
}))
|
|
||||||
description = "The old vault nodes to be removed"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
variable "ui_test_filter" {
|
|
||||||
type = string
|
|
||||||
description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"<filter>\"'"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ui_run_tests" {
|
|
||||||
type = bool
|
|
||||||
description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vault_enable_file_audit_device" {
|
|
||||||
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
|
||||||
type = bool
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "rhel_distro_version" {
|
variable "rhel_distro_version" {
|
||||||
description = "The version of RHEL to use"
|
description = "The version of RHEL to use"
|
||||||
type = string
|
type = string
|
||||||
|
@ -132,7 +94,7 @@ variable "terraform_plugin_cache_dir" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "tfc_api_token" {
|
variable "tfc_api_token" {
|
||||||
description = "The Terraform Cloud QTI Organization API token."
|
description = "The Terraform Cloud QTI Organization API token. This is used to download the enos Terraform provider."
|
||||||
type = string
|
type = string
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
|
@ -143,8 +105,20 @@ variable "ubuntu_distro_version" {
|
||||||
default = "22.04" // or "20.04", "18.04"
|
default = "22.04" // or "20.04", "18.04"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "ui_test_filter" {
|
||||||
|
type = string
|
||||||
|
description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"<filter>\"'"
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ui_run_tests" {
|
||||||
|
type = bool
|
||||||
|
description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "vault_artifact_type" {
|
variable "vault_artifact_type" {
|
||||||
description = "The Vault artifact type package or bundle"
|
description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
|
||||||
default = "bundle"
|
default = "bundle"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,24 +130,30 @@ variable "vault_autopilot_initial_release" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vault_bundle_path" {
|
variable "vault_artifact_path" {
|
||||||
description = "Path to CRT generated or local vault.zip bundle"
|
description = "Path to CRT generated or local vault.zip bundle"
|
||||||
type = string
|
type = string
|
||||||
default = "/tmp/vault.zip"
|
default = "/tmp/vault.zip"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "vault_build_date" {
|
||||||
|
description = "The build date for Vault artifact"
|
||||||
|
type = string
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_enable_file_audit_device" {
|
||||||
|
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "vault_install_dir" {
|
variable "vault_install_dir" {
|
||||||
type = string
|
type = string
|
||||||
description = "The directory where the Vault binary will be installed"
|
description = "The directory where the Vault binary will be installed"
|
||||||
default = "/opt/vault/bin"
|
default = "/opt/vault/bin"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vault_instance_type" {
|
|
||||||
description = "The instance type to use for the Vault backend"
|
|
||||||
type = string
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vault_instance_count" {
|
variable "vault_instance_count" {
|
||||||
description = "How many instances to create for the Vault cluster"
|
description = "How many instances to create for the Vault cluster"
|
||||||
type = number
|
type = number
|
||||||
|
@ -198,12 +178,6 @@ variable "vault_log_level" {
|
||||||
default = "trace"
|
default = "trace"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vault_build_date" {
|
|
||||||
description = "The build date for Vault artifact"
|
|
||||||
type = string
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vault_product_version" {
|
variable "vault_product_version" {
|
||||||
description = "The version of Vault we are testing"
|
description = "The version of Vault we are testing"
|
||||||
type = string
|
type = string
|
||||||
|
|
|
@ -1,9 +1,21 @@
|
||||||
# Copyright (c) HashiCorp, Inc.
|
# Copyright (c) HashiCorp, Inc.
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
# artifactory_username is the username to use when testing an artifact stored in artfactory.
|
||||||
|
# artifactory_username = "yourname@hashicorp.com"
|
||||||
|
|
||||||
|
# artifactory_token is the token to use when authenticating to artifactory.
|
||||||
|
# artifactory_token = "yourtoken"
|
||||||
|
|
||||||
|
# artifactory_host is the artifactory host to search for vault artifacts.
|
||||||
|
# artifactory_host = "https://artifactory.hashicorp.engineering/artifactory"
|
||||||
|
|
||||||
|
# artifactory_repo is the artifactory repo to search for vault artifacts.
|
||||||
|
# artifactory_repo = "hashicorp-crt-stable-local*"
|
||||||
|
|
||||||
# aws_region is the AWS region where we'll create infrastructure
|
# aws_region is the AWS region where we'll create infrastructure
|
||||||
# for the smoke scenario
|
# for the smoke scenario
|
||||||
# aws_region = "us-west-1"
|
# aws_region = "us-east-1"
|
||||||
|
|
||||||
# aws_ssh_keypair_name is the AWS keypair to use for SSH
|
# aws_ssh_keypair_name is the AWS keypair to use for SSH
|
||||||
# aws_ssh_keypair_name = "enos-ci-ssh-key"
|
# aws_ssh_keypair_name = "enos-ci-ssh-key"
|
||||||
|
@ -11,8 +23,25 @@
|
||||||
# aws_ssh_private_key_path is the path to the AWS keypair private key
|
# aws_ssh_private_key_path is the path to the AWS keypair private key
|
||||||
# aws_ssh_private_key_path = "./support/private_key.pem"
|
# aws_ssh_private_key_path = "./support/private_key.pem"
|
||||||
|
|
||||||
# backend_instance_type is the instance type to use for the Vault backend
|
# backend_edition is the backend (consul) release edition if applicable to the scenario.
|
||||||
# backend_instance_type = "t3.small"
|
# backend_edition = "oss"
|
||||||
|
|
||||||
|
# backend_license_path is the license for the backend if applicable (Consul Enterprise)".
|
||||||
|
# backend_license_path = "./support/consul.hclic"
|
||||||
|
|
||||||
|
# backend_log_level is the server log level for the backend. Supported values include 'trace',
|
||||||
|
# 'debug', 'info', 'warn', 'error'"
|
||||||
|
# backend_log_level = "trace"
|
||||||
|
|
||||||
|
# backend_instance_type is the instance type to use for the Vault backend. Must support arm64
|
||||||
|
# backend_instance_type = "t4g.small"
|
||||||
|
|
||||||
|
# project_name is the description of the project. It will often be used to tag infrastructure
|
||||||
|
# resources.
|
||||||
|
# project_name = "vault-enos-integration"
|
||||||
|
|
||||||
|
# rhel_distro_version is the version of RHEL to use for "distro:rhel" variants.
|
||||||
|
# rhel_distro_version = "9.1" // or "8.8"
|
||||||
|
|
||||||
# tags are a map of tags that will be applied to infrastructure resources that
|
# tags are a map of tags that will be applied to infrastructure resources that
|
||||||
# support tagging.
|
# support tagging.
|
||||||
|
@ -26,10 +55,41 @@
|
||||||
# to download the enos Terraform provider and the enos Terraform modules.
|
# to download the enos Terraform provider and the enos Terraform modules.
|
||||||
# tfc_api_token = "XXXXX.atlasv1.XXXXX..."
|
# tfc_api_token = "XXXXX.atlasv1.XXXXX..."
|
||||||
|
|
||||||
# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When
|
# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will
|
||||||
|
# be appended to the ember test command as '-f=\"<filter>\"'.
|
||||||
|
# ui_test_filter = "sometest"
|
||||||
|
|
||||||
|
# ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a
|
||||||
|
# cluster will be created but no tests will be run.
|
||||||
|
# ui_run_tests = true
|
||||||
|
|
||||||
|
# ubuntu_distro_version is the version of ubuntu to use for "distro:ubuntu" variants
|
||||||
|
# ubuntu_distro_version = "22.04" // or "20.04", "18.04"
|
||||||
|
|
||||||
|
# vault_artifact_path is the path to CRT generated or local vault.zip bundle. When
|
||||||
# using the "builder:local" variant a bundle will be built from the current branch.
|
# using the "builder:local" variant a bundle will be built from the current branch.
|
||||||
# In CI it will use the output of the build workflow.
|
# In CI it will use the output of the build workflow.
|
||||||
# vault_bundle_path = "./dist/vault.zip"
|
# vault_artifact_path = "./dist/vault.zip"
|
||||||
|
|
||||||
|
# vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory.
|
||||||
|
# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
|
||||||
|
# vault_artifact_type = "bundle"
|
||||||
|
|
||||||
|
# vault_autopilot_initial_release is the version of Vault to deploy before doing an autopilot upgrade
|
||||||
|
# to the test artifact.
|
||||||
|
# vault_autopilot_initial_release = {
|
||||||
|
# edition = "ent"
|
||||||
|
# version = "1.11.0"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
# vault_build_date is the build date for Vault artifact. Some validations will require the binary build
|
||||||
|
# date to match"
|
||||||
|
# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example
|
||||||
|
|
||||||
|
# vault_enable_file_audit_device sets whether or not to enable the 'file' audit device. It true it
|
||||||
|
# will be enabled at the path /var/log/vault_audit.log
|
||||||
|
# vault_enable_file_audit_device = true
|
||||||
|
|
||||||
# vault_install_dir is the directory where the vault binary will be installed on
|
# vault_install_dir is the directory where the vault binary will be installed on
|
||||||
# the remote machines.
|
# the remote machines.
|
||||||
|
@ -48,4 +108,27 @@
|
||||||
# This is only required for non-oss editions"
|
# This is only required for non-oss editions"
|
||||||
# vault_license_path = "./support/vault.hclic"
|
# vault_license_path = "./support/vault.hclic"
|
||||||
|
|
||||||
|
# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants.
|
||||||
|
# vault_local_build_tags = ["ui", "ent"]
|
||||||
|
|
||||||
|
# vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are
|
||||||
|
# trace, debug, info, warn, and err."
|
||||||
|
# vault_log_level = "trace"
|
||||||
|
|
||||||
|
# vault_product_version is the version of Vault we are testing. Some validations will expect the vault
|
||||||
|
# binary and cluster to report this version.
|
||||||
|
# vault_product_version = "1.15.0"
|
||||||
|
|
||||||
# vault_upgrade_initial_release is the Vault release to deploy before upgrading.
|
# vault_upgrade_initial_release is the Vault release to deploy before upgrading.
|
||||||
|
|
||||||
|
# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault
|
||||||
|
# binary and cluster to report this revision.
|
||||||
|
# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de"
|
||||||
|
|
||||||
|
# vault_upgrade_initial_release is the Vault release to deploy before doing an in-place upgrade.
|
||||||
|
# vault_upgrade_initial_release = {
|
||||||
|
# edition = "oss"
|
||||||
|
# // Vault 1.10.5 has a known issue with retry_join.
|
||||||
|
# version = "1.10.4"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
|
@ -125,7 +125,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 22
|
to_port = 22
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8201
|
to_port = 8201
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
formatlist("%s/32", var.ssh_allow_ips)
|
formatlist("%s/32", var.ssh_allow_ips)
|
||||||
])
|
])
|
||||||
|
@ -148,7 +148,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -158,7 +158,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8503
|
to_port = 8503
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -178,7 +178,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 22
|
to_port = 22
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8201
|
to_port = 8201
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
formatlist("%s/32", var.ssh_allow_ips)
|
formatlist("%s/32", var.ssh_allow_ips)
|
||||||
])
|
])
|
||||||
|
@ -167,7 +167,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8503
|
to_port = 8503
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -197,7 +197,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,7 +219,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 22
|
to_port = 22
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -230,7 +230,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8201
|
to_port = 8201
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
formatlist("%s/32", var.ssh_allow_ips)
|
formatlist("%s/32", var.ssh_allow_ips)
|
||||||
])
|
])
|
||||||
|
@ -242,7 +242,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8302
|
to_port = 8302
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -262,7 +262,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8503
|
to_port = 8503
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
@ -282,7 +282,7 @@ resource "aws_security_group" "target" {
|
||||||
to_port = 8600
|
to_port = 8600
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
cidr_blocks = flatten([
|
cidr_blocks = flatten([
|
||||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
# Copyright (c) HashiCorp, Inc.
|
||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
aws = {
|
||||||
|
source = "hashicorp/aws"
|
||||||
|
}
|
||||||
|
enos = {
|
||||||
|
source = "app.terraform.io/hashicorp-qti/enos"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_root_token" {
|
||||||
|
type = string
|
||||||
|
description = "The Vault root token"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_instances" {
|
||||||
|
type = map(object({
|
||||||
|
private_ip = string
|
||||||
|
public_ip = string
|
||||||
|
}))
|
||||||
|
description = "The Vault cluster instances that were created"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_instance_count" {
|
||||||
|
type = number
|
||||||
|
description = "How many vault instances are in the cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_install_dir" {
|
||||||
|
type = string
|
||||||
|
description = "The directory where the Vault binary will be installed"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vault_proxy_pidfile" {
|
||||||
|
type = string
|
||||||
|
description = "The filepath where the Vault Proxy pid file is kept"
|
||||||
|
default = "/tmp/pidfile"
|
||||||
|
}
|
||||||
|
|
||||||
|
locals {
|
||||||
|
vault_instances = {
|
||||||
|
for idx in range(var.vault_instance_count) : idx => {
|
||||||
|
public_ip = values(var.vault_instances)[idx].public_ip
|
||||||
|
private_ip = values(var.vault_instances)[idx].private_ip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vault_proxy_address = "127.0.0.1:8100"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
|
||||||
|
content = templatefile("${path.module}/templates/set-up-approle-and-proxy.sh", {
|
||||||
|
vault_install_dir = var.vault_install_dir
|
||||||
|
vault_token = var.vault_root_token
|
||||||
|
vault_proxy_pidfile = var.vault_proxy_pidfile
|
||||||
|
vault_proxy_address = local.vault_proxy_address
|
||||||
|
})
|
||||||
|
|
||||||
|
transport = {
|
||||||
|
ssh = {
|
||||||
|
host = local.vault_instances[0].public_ip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "enos_remote_exec" "use_proxy" {
|
||||||
|
content = templatefile("${path.module}/templates/use-proxy.sh", {
|
||||||
|
vault_install_dir = var.vault_install_dir
|
||||||
|
vault_proxy_pidfile = var.vault_proxy_pidfile
|
||||||
|
vault_proxy_address = local.vault_proxy_address
|
||||||
|
})
|
||||||
|
|
||||||
|
transport = {
|
||||||
|
ssh = {
|
||||||
|
host = local.vault_instances[0].public_ip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
depends_on = [
|
||||||
|
enos_remote_exec.set_up_approle_auth_and_proxy
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,87 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright (c) HashiCorp, Inc.
|
||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
binpath=${vault_install_dir}/vault
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
echo "$1" 1>&2
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||||
|
|
||||||
|
export VAULT_ADDR='http://127.0.0.1:8200'
|
||||||
|
export VAULT_TOKEN='${vault_token}'
|
||||||
|
|
||||||
|
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
|
||||||
|
$binpath auth disable approle || true
|
||||||
|
|
||||||
|
approle_create_status=$($binpath auth enable approle)
|
||||||
|
|
||||||
|
approle_status=$($binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000)
|
||||||
|
|
||||||
|
ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id')
|
||||||
|
|
||||||
|
if [[ "$ROLEID" == '' ]]; then
|
||||||
|
fail "expected ROLEID to be nonempty, but it is empty"
|
||||||
|
fi
|
||||||
|
|
||||||
|
SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id')
|
||||||
|
|
||||||
|
if [[ "$SECRETID" == '' ]]; then
|
||||||
|
fail "expected SECRETID to be nonempty, but it is empty"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo $ROLEID > /tmp/role-id
|
||||||
|
echo $SECRETID > /tmp/secret-id
|
||||||
|
|
||||||
|
# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl
|
||||||
|
# The Proxy references the fixed Vault server address of http://127.0.0.1:8200
|
||||||
|
# The Proxy itself listens at the address http://127.0.0.1:8100
|
||||||
|
cat > /tmp/vault-proxy.hcl <<- EOM
|
||||||
|
pid_file = "${vault_proxy_pidfile}"
|
||||||
|
|
||||||
|
vault {
|
||||||
|
address = "http://127.0.0.1:8200"
|
||||||
|
tls_skip_verify = true
|
||||||
|
retry {
|
||||||
|
num_retries = 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
api_proxy {
|
||||||
|
enforce_consistency = "always"
|
||||||
|
use_auto_auth_token = true
|
||||||
|
}
|
||||||
|
|
||||||
|
listener "tcp" {
|
||||||
|
address = "${vault_proxy_address}"
|
||||||
|
tls_disable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
auto_auth {
|
||||||
|
method {
|
||||||
|
type = "approle"
|
||||||
|
config = {
|
||||||
|
role_id_file_path = "/tmp/role-id"
|
||||||
|
secret_id_file_path = "/tmp/secret-id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sink {
|
||||||
|
type = "file"
|
||||||
|
config = {
|
||||||
|
path = "/tmp/token"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOM
|
||||||
|
|
||||||
|
# If Proxy is still running from a previous run, kill it
|
||||||
|
pkill -F "${vault_proxy_pidfile}" || true
|
||||||
|
|
||||||
|
# Run proxy in the background
|
||||||
|
$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 &
|
|
@ -0,0 +1,32 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright (c) HashiCorp, Inc.
|
||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
binpath=${vault_install_dir}/vault
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
echo "$1" 1>&2
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||||
|
|
||||||
|
# Will cause the Vault CLI to communicate with the Vault Proxy, since it
|
||||||
|
# is listening at port 8100.
|
||||||
|
export VAULT_ADDR='http://${vault_proxy_address}'
|
||||||
|
|
||||||
|
# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token
|
||||||
|
# is used.
|
||||||
|
unset VAULT_TOKEN
|
||||||
|
|
||||||
|
# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env
|
||||||
|
# var) to lookup the details of the Proxy's token and make sure that the
|
||||||
|
# .data.path field contains 'auth/approle/login', thus confirming that the Proxy
|
||||||
|
# automatically authenticated itself.
|
||||||
|
$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login'
|
||||||
|
|
||||||
|
# Now that we're done, kill the proxy
|
||||||
|
pkill -F "${vault_proxy_pidfile}" || true
|
Loading…
Reference in New Issue