Backport [QT-602] Run `proxy` and `agent` test scenarios (#23176) into release/1.14.x (#23302)

* [QT-602] Run `proxy` and `agent` test scenarios (#23176)

Update our `proxy` and `agent` scenarios to support new variants and
perform baseline verification and their scenario specific verification.
We integrate these updated scenarios into the pipeline by adding them
to artifact samples.

We've also improved the reliability of the `autopilot` and `replication`
scenarios by refactoring our IP address gathering. Previously, we'd ask
vault for the primary IP address and use some Terraform logic to determine
followers. The leader IP address gathering script was also implicitly
responsible for ensuring that a found leader was within a given group of
hosts, and thus waiting for a given cluster to have a leader, and also for
doing some arithmetic and outputting `replication` specific output data.
We've broken these responsibilities into individual modules, improved their
error messages, and fixed various races and bugs, including:
* Fix a race between creating the file audit device and installing and starting
  vault in the `replication` scenario.
* Fix how we determine our leader and follower IP addresses. We now query
  vault instead of a prior implementation that inferred the followers and sometimes
  did not allow all nodes to be an expected leader.
* Fix a bug where we'd always always fail on the first wrong condition
  in the `vault_verify_performance_replication` module.

We also performed some maintenance tasks on Enos scenarios  byupdating our
references from `oss` to `ce` to handle the naming and license changes. We
also enabled `shellcheck` linting for enos module scripts.

* Rename `oss` to `ce` for license and naming changes.
* Convert template enos scripts to scripts that take environment
  variables.
* Add `shellcheck` linting for enos module scripts.
* Add additional `backend` and `seal` support to `proxy` and `agent`
  scenarios.
* Update scenarios to include all baseline verification.
* Add `proxy` and `agent` scenarios to artifact samples.
* Remove IP address verification from the `vault_get_cluster_ips`
  modules and implement a new `vault_wait_for_leader` module.
* Determine follower IP addresses by querying vault in the
  `vault_get_cluster_ips` module.
* Move replication specific behavior out of the `vault_get_cluster_ips`
  module and into it's own `replication_data` module.
* Extend initial version support for the `upgrade` and `autopilot`
  scenarios.

We also discovered an issue with undo_logs that has been described in
the VAULT-20259. As such, we've disabled the undo_logs check until
it has been fixed.

* actions: fix actionlint error and linting logic (#23305)

Signed-off-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
Ryan Cragun 2023-09-27 10:53:12 -06:00 committed by GitHub
parent be4f05ed25
commit d2db7fbcdd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 2392 additions and 1028 deletions

View File

@ -118,7 +118,7 @@ jobs:
- goos: windows
goarch: arm
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
create-packages: false
goarch: ${{ matrix.goarch }}
@ -139,7 +139,7 @@ jobs:
goos: [linux]
goarch: [arm, arm64, 386, amd64]
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
goarch: ${{ matrix.goarch }}
goos: ${{ matrix.goos }}
@ -159,7 +159,7 @@ jobs:
goos: [darwin]
goarch: [amd64, arm64]
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
create-packages: false
goarch: ${{ matrix.goarch }}
@ -236,17 +236,17 @@ jobs:
fail-fast: false
matrix:
include:
- sample-name: build_oss_linux_amd64_deb
- sample-name: build_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: build_oss_linux_arm64_deb
- sample-name: build_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: build_oss_linux_amd64_rpm
- sample-name: build_ce_linux_amd64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: build_oss_linux_arm64_rpm
- sample-name: build_ce_linux_arm64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: build_oss_linux_amd64_zip
- sample-name: build_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: build_oss_linux_arm64_zip
- sample-name: build_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with:
build-artifact-name: ${{ matrix.build-artifact-name }}
@ -325,8 +325,8 @@ jobs:
steps:
- run: |
tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)'
notify-completed-successfully-failures-oss:
notify-completed-successfully-failures-ce:
if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}
runs-on: ubuntu-latest
permissions:
@ -346,7 +346,7 @@ jobs:
with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official
payload: |
{"text":"OSS build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: OSS build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]}
{"text":"CE build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: CE build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]}
notify-completed-successfully-failures-ent:
if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}

View File

@ -7,21 +7,37 @@ on:
- enos/**
jobs:
lint:
metadata:
# Only run this workflow on pull requests from hashicorp/vault branches
# as we need secrets to install enos.
if: "! github.event.pull_request.head.repo.fork"
name: metadata
runs-on: ubuntu-latest
outputs:
runs-on: ${{ steps.metadata.outputs.runs-on }}
version: ${{ steps.metadata.outputs.version }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: metadata
run: |
echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
github_repository="${{ github.repository }}"
if [ "${github_repository##*/}" == "vault-enterprise" ] ; then
echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT"
else
echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT"
fi
lint:
needs: metadata
runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }}
env:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- name: Set Product version
id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: get-version
run: echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
- uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
@ -31,5 +47,5 @@ jobs:
- name: lint
working-directory: ./enos
env:
ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }}
ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }}
run: make lint

View File

@ -43,17 +43,17 @@ jobs:
fail-fast: false
matrix:
include:
- sample-name: release_oss_linux_amd64_deb
- sample-name: release_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: release_oss_linux_arm64_deb
- sample-name: release_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: release_oss_linux_amd64_rpm
- sample-name: release_ce_linux_amd64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: release_oss_linux_arm64_rpm
- sample-name: release_ce_linux_arm64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: release_oss_linux_amd64_zip
- sample-name: release_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: release_oss_linux_arm64_zip
- sample-name: release_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with:
build-artifact-name: ${{ matrix.build-artifact-name }}

View File

@ -60,8 +60,8 @@ jobs:
echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault-enterprise'"
else
echo "edition=oss" >> "$GITHUB_ENV"
echo "edition set to 'oss'"
echo "edition=ce" >> "$GITHUB_ENV"
echo "edition set to 'ce'"
echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault'"
fi

View File

@ -91,7 +91,7 @@ jobs:
echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem
chmod 600 ./enos/support/private_key.pem
- name: Set Up Vault Enterprise License
if: contains(${{ github.event.repository.name }}, 'ent')
if: contains(github.event.repository.name, 'ent')
run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true
- name: Check Chrome Installed
id: chrome-check

View File

@ -1,5 +1,5 @@
.PHONY: default
default: check-fmt
default: check-fmt shellcheck
.PHONY: check-fmt
check-fmt: check-fmt-enos check-fmt-modules
@ -25,7 +25,11 @@ fmt-modules:
.PHONY: validate-enos
validate-enos:
enos scenario validate
enos scenario validate --timeout 30m0s
.PHONY: lint
lint: check-fmt validate-enos
lint: check-fmt shellcheck validate-enos
.PHONY: shellcheck
shellcheck:
find ./modules/ -type f -name '*.sh' | xargs shellcheck

View File

@ -4,7 +4,7 @@
globals {
backend_tag_key = "VaultStorage"
build_tags = {
"oss" = ["ui"]
"ce" = ["ui"]
"ent" = ["ui", "enterprise", "ent"]
"ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"]
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
@ -20,9 +20,7 @@ globals {
rhel = ["nc"]
}
sample_attributes = {
# aws_region = ["us-east-1", "us-west-2"]
# NOTE(9/18/23): use more expensive regions temporarily until AWS network outage is resolved.
aws_region = ["us-east-2", "us-west-1"]
aws_region = ["us-east-1", "us-west-2"]
}
tags = merge({
"Project Name" : var.project_name

View File

@ -53,6 +53,10 @@ module "read_license" {
source = "./modules/read_license"
}
module "replication_data" {
source = "./modules/replication_data"
}
module "shutdown_node" {
source = "./modules/shutdown_node"
}
@ -128,9 +132,27 @@ module "vault_cluster" {
module "vault_get_cluster_ips" {
source = "./modules/vault_get_cluster_ips"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_raft_remove_peer" {
source = "./modules/vault_raft_remove_peer"
vault_install_dir = var.vault_install_dir
}
module "vault_setup_perf_secondary" {
source = "./modules/vault_setup_perf_secondary"
vault_install_dir = var.vault_install_dir
}
module "vault_test_ui" {
source = "./modules/vault_test_ui"
ui_run_tests = var.ui_run_tests
}
module "vault_unseal_nodes" {
source = "./modules/vault_unseal_nodes"
@ -145,6 +167,7 @@ module "vault_upgrade" {
vault_instance_count = var.vault_instance_count
}
module "vault_verify_autopilot" {
source = "./modules/vault_verify_autopilot"
@ -177,7 +200,6 @@ module "vault_verify_replication" {
module "vault_verify_ui" {
source = "./modules/vault_verify_ui"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
@ -194,12 +216,6 @@ module "vault_setup_perf_primary" {
vault_install_dir = var.vault_install_dir
}
module "vault_setup_perf_secondary" {
source = "./modules/vault_setup_perf_secondary"
vault_install_dir = var.vault_install_dir
}
module "vault_verify_read_data" {
source = "./modules/vault_verify_read_data"
@ -227,13 +243,9 @@ module "vault_verify_write_data" {
vault_instance_count = var.vault_instance_count
}
module "vault_raft_remove_peer" {
source = "./modules/vault_raft_remove_peer"
vault_install_dir = var.vault_install_dir
}
module "vault_wait_for_leader" {
source = "./modules/vault_wait_for_leader"
module "vault_test_ui" {
source = "./modules/vault_test_ui"
ui_run_tests = var.ui_run_tests
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}

View File

@ -0,0 +1,294 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "build_ce_linux_amd64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_amd64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}

View File

@ -0,0 +1,294 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "release_ce_linux_amd64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_amd64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}

View File

@ -1,142 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "build_oss_linux_amd64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_amd64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
}

View File

@ -1,142 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "release_oss_linux_amd64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_amd64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
}

View File

@ -5,8 +5,12 @@ scenario "agent" {
matrix {
arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
exclude {
@ -30,12 +34,18 @@ scenario "agent" {
]
locals {
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
enos_provider = {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
install_artifactory_artifact = local.bundle_path == null
manage_service = matrix.artifact_type == "bundle"
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
}
step "get_local_metadata" {
skip_step = matrix.artifact_source != "local"
module = module.get_local_metadata
}
step "build_vault" {
@ -43,7 +53,7 @@ scenario "agent" {
variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
bundle_path = local.bundle_path
artifact_path = local.artifact_path
goarch = matrix.arch
goos = "linux"
artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null
@ -52,7 +62,7 @@ scenario "agent" {
artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null
arch = matrix.artifact_source == "artifactory" ? matrix.arch : null
product_version = var.vault_product_version
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null
artifact_type = matrix.artifact_type
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
revision = var.vault_revision
@ -71,8 +81,19 @@ scenario "agent" {
}
}
step "read_license" {
skip_step = matrix.edition == "oss"
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
file_name = global.backend_license_path
}
}
step "read_vault_license" {
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -97,9 +118,49 @@ scenario "agent" {
}
}
step "create_vault_cluster_backend_targets" {
module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_tag_key = global.backend_tag_key
common_tags = global.tags
vpc_id = step.create_vpc.vpc_id
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vault_cluster_backend_targets
]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
cluster_tag_key = global.backend_tag_key
license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
release = {
edition = var.backend_edition
version = matrix.consul_version
}
target_hosts = step.create_vault_cluster_backend_targets.hosts
}
}
step "create_vault_cluster" {
module = module.vault_cluster
depends_on = [
step.create_backend_cluster,
step.build_vault,
step.create_vault_cluster_targets
]
@ -109,17 +170,42 @@ scenario "agent" {
}
variables {
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name
enable_audit_devices = var.vault_enable_audit_devices
install_dir = var.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null
local_artifact_path = local.bundle_path
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = "raft"
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = "shamir"
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
backend_cluster_tag_key = global.backend_tag_key
cluster_name = step.create_vault_cluster_targets.cluster_name
consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
consul_release = matrix.backend == "consul" ? {
edition = var.backend_edition
version = matrix.consul_version
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
}
}
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
@ -128,6 +214,7 @@ scenario "agent" {
depends_on = [
step.build_vault,
step.create_vault_cluster,
step.wait_for_leader,
]
providers = {
@ -135,6 +222,7 @@ scenario "agent" {
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
vault_agent_template_destination = "/tmp/agent_output.txt"
@ -147,6 +235,7 @@ scenario "agent" {
depends_on = [
step.create_vault_cluster,
step.start_vault_agent,
step.wait_for_leader,
]
providers = {
@ -160,7 +249,147 @@ scenario "agent" {
}
}
output "awkms_unseal_key_arn" {
step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_write_test_data" {
module = module.vault_verify_write_data
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_read_test_data" {
module = module.vault_verify_read_data
depends_on = [
step.verify_write_test_data,
step.verify_replication
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
vault_install_dir = local.vault_install_dir
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
output "awskms_unseal_key_arn" {
description = "The Vault cluster KMS key arn"
value = step.create_vpc.kms_key_arn
}
@ -214,9 +443,4 @@ scenario "agent" {
description = "The Vault cluster unseal keys hex"
value = step.create_vault_cluster.unseal_keys_hex
}
output "vault_audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
}

View File

@ -7,7 +7,10 @@ scenario "autopilot" {
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
distro = ["ubuntu", "rhel"]
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
// NOTE: when backporting, make sure that our initial versions are less than that
// release branch's version.
initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
@ -114,12 +117,15 @@ scenario "autopilot" {
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null
license = matrix.edition != "ce" ? step.read_license.license : null
packages = concat(global.packages, global.distro_packages[matrix.distro])
release = var.vault_autopilot_initial_release
storage_backend = "raft"
release = {
edition = matrix.edition
version = matrix.initial_version
}
storage_backend = "raft"
storage_backend_addl_config = {
autopilot_upgrade_version = var.vault_autopilot_initial_release.version
autopilot_upgrade_version = matrix.initial_version
}
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
@ -141,7 +147,7 @@ scenario "autopilot" {
}
variables {
vault_instances = step.create_vault_cluster.target_hosts
vault_hosts = step.create_vault_cluster.target_hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
@ -213,7 +219,7 @@ scenario "autopilot" {
force_unseal = matrix.seal == "shamir"
initialize_cluster = false
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null
license = matrix.edition != "ce" ? step.read_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
@ -285,8 +291,8 @@ scenario "autopilot" {
}
}
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
step "wait_for_leader_in_upgrade_targets" {
module = module.vault_wait_for_leader
depends_on = [
step.create_vault_cluster,
step.create_vault_cluster_upgrade_targets,
@ -299,11 +305,30 @@ scenario "autopilot" {
}
variables {
vault_instances = step.create_vault_cluster.target_hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
node_public_ip = step.get_vault_cluster_ips.leader_public_ip
added_vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
}
}
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.create_vault_cluster_upgrade_targets,
step.get_vault_cluster_ips,
step.upgrade_vault_cluster_with_autopilot,
step.wait_for_leader_in_upgrade_targets,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
@ -388,9 +413,73 @@ scenario "autopilot" {
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
}
}
step "verify_undo_logs_status" {
skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0")
module = module.vault_verify_undo_logs
skip_step = true
# NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259
# skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0")
module = module.vault_verify_undo_logs
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.remove_old_nodes,

View File

@ -5,8 +5,24 @@ scenario "proxy" {
matrix {
arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
exclude {
artifact_source = ["local"]
artifact_type = ["package"]
}
# HSM and FIPS 140-2 are only supported on amd64
exclude {
arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
}
}
terraform_cli = terraform_cli.default
@ -18,11 +34,13 @@ scenario "proxy" {
]
locals {
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
enos_provider = {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
manage_service = matrix.artifact_type == "bundle"
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
}
step "get_local_metadata" {
@ -35,7 +53,7 @@ scenario "proxy" {
variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
bundle_path = local.bundle_path
artifact_path = local.artifact_path
goarch = matrix.arch
goos = "linux"
artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null
@ -44,7 +62,7 @@ scenario "proxy" {
artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null
arch = matrix.artifact_source == "artifactory" ? matrix.arch : null
product_version = var.vault_product_version
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null
artifact_type = matrix.artifact_type
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
revision = var.vault_revision
@ -63,8 +81,19 @@ scenario "proxy" {
}
}
step "read_license" {
skip_step = matrix.edition == "oss"
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
file_name = global.backend_license_path
}
}
step "read_vault_license" {
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -89,9 +118,49 @@ scenario "proxy" {
}
}
step "create_vault_cluster_backend_targets" {
module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_tag_key = global.backend_tag_key
common_tags = global.tags
vpc_id = step.create_vpc.vpc_id
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vault_cluster_backend_targets
]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
cluster_tag_key = global.backend_tag_key
license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
release = {
edition = var.backend_edition
version = matrix.consul_version
}
target_hosts = step.create_vault_cluster_backend_targets.hosts
}
}
step "create_vault_cluster" {
module = module.vault_cluster
depends_on = [
step.create_backend_cluster,
step.build_vault,
step.create_vault_cluster_targets
]
@ -101,17 +170,42 @@ scenario "proxy" {
}
variables {
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name
enable_audit_devices = var.vault_enable_audit_devices
install_dir = var.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null
local_artifact_path = local.bundle_path
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = "raft"
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = "shamir"
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
backend_cluster_tag_key = global.backend_tag_key
cluster_name = step.create_vault_cluster_targets.cluster_name
consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
consul_release = matrix.backend == "consul" ? {
edition = var.backend_edition
version = matrix.consul_version
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
}
}
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
@ -127,12 +221,147 @@ scenario "proxy" {
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
}
}
output "awkms_unseal_key_arn" {
step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_write_test_data" {
module = module.vault_verify_write_data
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_read_test_data" {
module = module.vault_verify_read_data
depends_on = [
step.verify_write_test_data,
step.verify_replication
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
vault_install_dir = local.vault_install_dir
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
output "awskms_unseal_key_arn" {
description = "The Vault cluster KMS key arn"
value = step.create_vpc.kms_key_arn
}

View File

@ -9,7 +9,7 @@ scenario "replication" {
arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"]
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
primary_backend = ["raft", "consul"]
@ -48,6 +48,11 @@ scenario "replication" {
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
}
step "get_local_metadata" {
skip_step = matrix.artifact_source != "local"
module = module.get_local_metadata
}
step "build_vault" {
module = "build_${matrix.artifact_source}"
@ -84,7 +89,7 @@ scenario "replication" {
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "oss"
skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "ce"
module = module.read_license
variables {
@ -241,7 +246,7 @@ scenario "replication" {
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
@ -298,7 +303,7 @@ scenario "replication" {
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
@ -340,6 +345,42 @@ scenario "replication" {
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_primary_cluster
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_primary_cluster.root_token
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.create_primary_cluster
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
}
}
step "get_primary_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.verify_that_vault_primary_cluster_is_unsealed]
@ -349,12 +390,21 @@ scenario "replication" {
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
vault_hosts = step.create_primary_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_primary_cluster.root_token
}
}
step "get_primary_cluster_replication_data" {
module = module.replication_data
depends_on = [step.get_primary_cluster_ips]
variables {
follower_hosts = step.get_primary_cluster_ips.follower_hosts
}
}
step "get_secondary_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed]
@ -364,7 +414,7 @@ scenario "replication" {
}
variables {
vault_instances = step.create_secondary_cluster_targets.hosts
vault_hosts = step.create_secondary_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_secondary_cluster.root_token
}
@ -542,7 +592,7 @@ scenario "replication" {
force_unseal = matrix.primary_seal == "shamir"
initialize_cluster = false
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
@ -555,7 +605,7 @@ scenario "replication" {
}
}
step "verify_addtional_primary_nodes_are_unsealed" {
step "verify_additional_primary_nodes_are_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.add_additional_nodes_to_primary_cluster]
@ -575,7 +625,7 @@ scenario "replication" {
depends_on = [
step.add_additional_nodes_to_primary_cluster,
step.create_primary_cluster,
step.verify_addtional_primary_nodes_are_unsealed
step.verify_additional_primary_nodes_are_unsealed
]
providers = {
@ -592,8 +642,8 @@ scenario "replication" {
step "remove_primary_follower_1" {
module = module.shutdown_node
depends_on = [
step.get_primary_cluster_ips,
step.verify_addtional_primary_nodes_are_unsealed
step.get_primary_cluster_replication_data,
step.verify_additional_primary_nodes_are_unsealed
]
providers = {
@ -601,7 +651,7 @@ scenario "replication" {
}
variables {
node_public_ip = step.get_primary_cluster_ips.follower_public_ip_1
node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1
}
}
@ -621,12 +671,31 @@ scenario "replication" {
}
}
step "get_updated_primary_cluster_ips" {
module = module.vault_get_cluster_ips
// After we've removed two nodes from the cluster we need to get an updated set of vault hosts
// to work with.
step "get_remaining_hosts_replication_data" {
module = module.replication_data
depends_on = [
step.add_additional_nodes_to_primary_cluster,
step.remove_primary_follower_1,
step.remove_primary_leader
step.get_primary_cluster_ips,
step.remove_primary_leader,
]
variables {
added_hosts = step.create_primary_cluster_additional_targets.hosts
added_hosts_count = var.vault_instance_count
initial_hosts = step.create_primary_cluster_targets.hosts
initial_hosts_count = var.vault_instance_count
removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1
removed_primary_host = step.get_primary_cluster_ips.leader_host
}
}
// Wait for the remaining hosts in our cluster to elect a new leader.
step "wait_for_leader_in_remaining_hosts" {
module = module.vault_wait_for_leader
depends_on = [
step.remove_primary_leader,
step.get_remaining_hosts_replication_data,
]
providers = {
@ -634,17 +703,41 @@ scenario "replication" {
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
added_vault_instances = step.create_primary_cluster_additional_targets.hosts
vault_root_token = step.create_primary_cluster.root_token
node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2
timeout = 120 # seconds
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_primary_cluster.root_token
vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
}
}
// Get our new leader and follower IP addresses.
step "get_updated_primary_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.get_remaining_hosts_replication_data,
step.wait_for_leader_in_remaining_hosts,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
vault_install_dir = local.vault_install_dir
vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count
vault_root_token = step.create_primary_cluster.root_token
}
}
// Make sure the cluster has the correct performance replication state after the new leader election.
step "verify_updated_performance_replication" {
module = module.vault_verify_performance_replication
depends_on = [step.get_updated_primary_cluster_ips]
module = module.vault_verify_performance_replication
depends_on = [
step.get_remaining_hosts_replication_data,
step.wait_for_leader_in_remaining_hosts,
step.get_updated_primary_cluster_ips,
]
providers = {
enos = local.enos_provider[matrix.distro]
@ -709,6 +802,11 @@ scenario "replication" {
value = step.create_secondary_cluster_targets.hosts
}
output "remaining_hosts" {
description = "The Vault cluster primary hosts after removing the leader and follower"
value = step.get_remaining_hosts_replication_data.remaining_hosts
}
output "initial_primary_replication_status" {
description = "The Vault primary cluster performance replication status"
value = step.verify_performance_replication.primary_replication_status

View File

@ -4,12 +4,12 @@
scenario "smoke" {
matrix {
arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"]
backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
@ -84,7 +84,7 @@ scenario "smoke" {
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss"
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
@ -93,7 +93,7 @@ scenario "smoke" {
}
step "read_vault_license" {
skip_step = matrix.edition == "oss"
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -182,7 +182,7 @@ scenario "smoke" {
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
@ -192,8 +192,9 @@ scenario "smoke" {
}
}
step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
@ -201,7 +202,23 @@ scenario "smoke" {
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
@ -228,7 +245,7 @@ scenario "smoke" {
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.create_vault_cluster]
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
@ -261,9 +278,12 @@ scenario "smoke" {
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [step.create_vault_cluster]
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
@ -277,8 +297,11 @@ scenario "smoke" {
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [step.create_vault_cluster]
module = module.vault_verify_replication
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
@ -309,16 +332,18 @@ scenario "smoke" {
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [step.create_vault_cluster]
module = module.vault_verify_ui
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}

View File

@ -3,7 +3,7 @@
scenario "ui" {
matrix {
edition = ["oss", "ent"]
edition = ["ce", "ent"]
backend = ["consul", "raft"]
}
@ -20,12 +20,12 @@ scenario "ui" {
backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic"))
backend_tag_key = "VaultStorage"
build_tags = {
"oss" = ["ui"]
"ce" = ["ui"]
"ent" = ["ui", "enterprise", "ent"]
}
bundle_path = abspath(var.vault_artifact_path)
distro = "ubuntu"
consul_version = "1.14.2"
consul_version = "1.16.1"
seal = "awskms"
tags = merge({
"Project Name" : var.project_name
@ -39,7 +39,7 @@ scenario "ui" {
vault_install_dir = var.vault_install_dir
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null
ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null
}
step "build_vault" {
@ -71,7 +71,7 @@ scenario "ui" {
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss"
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
@ -80,7 +80,7 @@ scenario "ui" {
}
step "read_vault_license" {
skip_step = matrix.edition == "oss"
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -168,7 +168,7 @@ scenario "ui" {
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.bundle_path
packages = global.distro_packages["ubuntu"]
storage_backend = matrix.backend
@ -177,8 +177,26 @@ scenario "ui" {
}
}
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = provider.enos.ubuntu
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "test_ui" {
module = module.vault_test_ui
module = module.vault_test_ui
depends_on = [step.wait_for_leader]
variables {
vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip

View File

@ -4,12 +4,17 @@
scenario "upgrade" {
matrix {
arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"]
backend = ["consul", "raft"]
consul_version = ["1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
// NOTE: when backporting the initial version make sure we don't include initial versions that
// are a higher minor version that our release candidate. Also, prior to 1.11.x the
// /v1/sys/seal-status API has known issues that could cause this scenario to fail when using
// those earlier versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11", "1.11.12", "1.12.11", "1.13.6", "1.14.2"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
@ -23,6 +28,12 @@ scenario "upgrade" {
arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
}
# FIPS 140-2 editions began at 1.10
exclude {
edition = ["ent.fips1402", "ent.hsm.fips1402"]
initial_version = ["1.8.12", "1.9.10"]
}
}
terraform_cli = terraform_cli.default
@ -85,7 +96,7 @@ scenario "upgrade" {
// This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss"
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
@ -94,7 +105,7 @@ scenario "upgrade" {
}
step "read_vault_license" {
skip_step = matrix.edition == "oss"
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -182,12 +193,15 @@ scenario "upgrade" {
} : null
enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null
license = matrix.edition != "ce" ? step.read_vault_license.license : null
packages = concat(global.packages, global.distro_packages[matrix.distro])
release = var.vault_upgrade_initial_release
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
release = {
edition = matrix.edition
version = matrix.initial_version
}
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
}
}
@ -200,7 +214,7 @@ scenario "upgrade" {
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
@ -210,7 +224,7 @@ scenario "upgrade" {
module = module.vault_verify_write_data
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
step.get_vault_cluster_ips,
]
providers = {
@ -232,6 +246,7 @@ scenario "upgrade" {
module = module.vault_upgrade
depends_on = [
step.create_vault_cluster,
step.verify_write_test_data,
]
providers = {
@ -249,11 +264,49 @@ scenario "upgrade" {
}
}
// Wait for our upgraded cluster to elect a leader
step "wait_for_leader_after_upgrade" {
module = module.vault_wait_for_leader
depends_on = [
step.create_vault_cluster,
step.upgrade_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.upgrade_vault,
step.wait_for_leader_after_upgrade,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_backend_cluster,
step.upgrade_vault,
step.get_updated_vault_cluster_ips,
]
providers = {
@ -271,30 +324,10 @@ scenario "upgrade" {
}
}
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.upgrade_vault
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [
step.create_vault_cluster,
step.get_updated_vault_cluster_ips,
step.upgrade_vault,
]
providers = {
@ -329,8 +362,7 @@ scenario "upgrade" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_backend_cluster,
step.upgrade_vault,
step.get_updated_vault_cluster_ips,
]
providers = {
@ -344,6 +376,38 @@ scenario "upgrade" {
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.get_updated_vault_cluster_ips,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.get_updated_vault_cluster_ips,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path

View File

@ -11,7 +11,7 @@ terraform_cli "default" {
/*
provider_installation {
dev_overrides = {
"app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider")
"app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider/dist")
}
direct {}
}

View File

@ -48,7 +48,7 @@ variable "aws_ssh_private_key_path" {
variable "backend_edition" {
description = "The backend release edition if applicable"
type = string
default = "oss" // or "ent"
default = "ce" // or "ent"
}
variable "backend_instance_type" {
@ -122,14 +122,6 @@ variable "vault_artifact_type" {
default = "bundle"
}
variable "vault_autopilot_initial_release" {
description = "The Vault release to deploy before upgrading with autopilot"
default = {
edition = "ent"
version = "1.11.0"
}
}
variable "vault_artifact_path" {
description = "Path to CRT generated or local vault.zip bundle"
type = string
@ -161,7 +153,7 @@ variable "vault_instance_count" {
}
variable "vault_license_path" {
description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions"
description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions"
type = string
default = null
}
@ -193,7 +185,7 @@ variable "vault_revision" {
variable "vault_upgrade_initial_release" {
description = "The Vault release to deploy before upgrading"
default = {
edition = "oss"
edition = "ce"
// Vault 1.10.5 has a known issue with retry_join.
version = "1.10.4"
}

View File

@ -24,7 +24,7 @@
# aws_ssh_private_key_path = "./support/private_key.pem"
# backend_edition is the backend (consul) release edition if applicable to the scenario.
# backend_edition = "oss"
# backend_edition = "ce"
# backend_license_path is the license for the backend if applicable (Consul Enterprise)".
# backend_license_path = "./support/consul.hclic"
@ -75,14 +75,6 @@
# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
# vault_artifact_type = "bundle"
# vault_autopilot_initial_release is the version of Vault to deploy before doing an autopilot upgrade
# to the test artifact.
# vault_autopilot_initial_release = {
# edition = "ent"
# version = "1.11.0"
# }
# }
# vault_build_date is the build date for Vault artifact. Some validations will require the binary build
# date to match"
# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example
@ -108,7 +100,7 @@
# vault_instance_count = 3
# vault_license_path is the path to a valid Vault enterprise edition license.
# This is only required for non-oss editions"
# This is only required for non-ce editions"
# vault_license_path = "./support/vault.hclic"
# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants.
@ -122,16 +114,6 @@
# binary and cluster to report this version.
# vault_product_version = "1.15.0"
# vault_upgrade_initial_release is the Vault release to deploy before upgrading.
# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault
# binary and cluster to report this revision.
# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de"
# vault_upgrade_initial_release is the Vault release to deploy before doing an in-place upgrade.
# vault_upgrade_initial_release = {
# edition = "oss"
# // Vault 1.10.5 has a known issue with retry_join.
# version = "1.10.4"
# }
# }

View File

@ -3,7 +3,7 @@
scenario "k8s" {
matrix {
edition = ["oss", "ent"]
edition = ["ce", "ent"]
}
terraform_cli = terraform_cli.default
@ -17,7 +17,7 @@ scenario "k8s" {
locals {
image_path = abspath(var.vault_docker_image_archive)
image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "oss" ? "hashicorp/vault" : "hashicorp/vault-enterprise"
image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "ce" ? "hashicorp/vault" : "hashicorp/vault-enterprise"
image_tag = replace(var.vault_product_version, "+ent", "-ent")
// The additional '-0' is required in the constraint since without it, the semver function will
@ -27,7 +27,7 @@ scenario "k8s" {
}
step "read_license" {
skip_step = matrix.edition == "oss"
skip_step = matrix.edition == "ce"
module = module.read_license
variables {
@ -66,7 +66,7 @@ scenario "k8s" {
kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64
vault_edition = matrix.edition
vault_log_level = var.vault_log_level
ent_license = matrix.edition != "oss" ? step.read_license.license : null
ent_license = matrix.edition != "ce" ? step.read_license.license : null
}
depends_on = [step.load_docker_image, step.create_kind_cluster]
@ -101,7 +101,7 @@ scenario "k8s" {
step "verify_ui" {
module = module.k8s_verify_ui
skip_step = matrix.edition == "oss"
skip_step = matrix.edition == "ce"
variables {
vault_pods = step.deploy_vault.vault_pods

View File

@ -7,7 +7,7 @@ terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
version = ">= 0.4.0"
version = ">= 0.4.4"
}
}
}

View File

@ -63,7 +63,7 @@ variable "release" {
description = "Consul release version and edition to install from releases.hashicorp.com"
default = {
version = "1.15.3"
edition = "oss"
edition = "ce"
}
}

View File

@ -1,4 +1,4 @@
#!/bin/env bash
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1

View File

@ -1,4 +1,4 @@
#!/bin/env bash
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1

View File

@ -7,14 +7,6 @@ listener "tcp" {
storage "raft" {
path = "/vault/data"
autopilot {
cleanup_dead_servers = "true"
last_contact_threshold = "200ms"
last_contact_failure_threshold = "10m"
max_trailing_logs = 250000
min_quorum = 5
server_stabilization_time = "10s"
}
}
service_registration "kubernetes" {}

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
@ -9,14 +9,14 @@
set -e
fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
# Replication STATUS endpoint should have data.mode disabled for OSS release
if [ "$VAULT_EDITION" == "oss" ]; then
# Replication STATUS endpoint should have data.mode disabled for CE release
if [ "$VAULT_EDITION" == "ce" ]; then
if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then
fail "replication data mode is not disabled for OSS release!"
fail "replication data mode is not disabled for CE release!"
fi
else
if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then

View File

@ -6,8 +6,8 @@
set -e
fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then

View File

@ -12,7 +12,7 @@ terraform {
locals {
instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)])
expected_version = var.vault_edition == "oss" ? var.vault_product_version : "${var.vault_product_version}-ent"
expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent"
}
resource "enos_remote_exec" "release_info" {
@ -38,13 +38,13 @@ resource "enos_local_exec" "smoke-verify-version" {
for_each = enos_remote_exec.release_info
environment = {
VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status)
ACTUAL_VERSION = jsondecode(each.value.stdout).version
BUILD_DATE = var.vault_build_date
CHECK_BUILD_DATE = var.check_build_date
EXPECTED_VERSION = var.vault_product_version,
VAULT_EDITION = var.vault_edition,
VAULT_REVISION = var.vault_product_revision,
CHECK_BUILD_DATE = var.check_build_date
BUILD_DATE = var.vault_build_date
VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status)
}
scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")]

View File

@ -8,38 +8,39 @@
set -e
fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then
expected_build_date=""
else
build_date="${BUILD_DATE}"
if [[ "${build_date}" == "" ]]; then
build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date)
cfg_build_date="${BUILD_DATE}"
if [[ "${cfg_build_date}" == "" ]]; then
cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date)
fi
expected_build_date=", built $build_date"
expected_build_date=", built $cfg_build_date"
fi
vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})"
case "${VAULT_EDITION}" in
oss) version_expected="${vault_expected_version}${expected_build_date}";;
ent) version_expected="${vault_expected_version}${expected_build_date}";;
ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";;
ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
ce) version_expected="${vault_expected_version}${expected_build_date}";;
ent) version_expected="${vault_expected_version}${expected_build_date}";;
ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";;
ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
*) fail "(${VAULT_EDITION}) does not match any known Vault editions"
esac
version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//')
if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then
echo "Version verification succeeded!"
echo "Version verification succeeded!"
else
echo "CHECK_BUILD_DATE: ${CHECK_BUILD_DATE}"
echo "BUILD_DATE: ${BUILD_DATE}"
echo "build_date: ${build_date}"
fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}"
echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2
echo "Given build date: ${BUILD_DATE}" 1>&2
echo "Interpreted build date: ${cfg_build_date}" 1>&2
fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}"
fi

View File

@ -0,0 +1,104 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
// An arithmetic module for calculating inputs and outputs for various replication steps.
// Get the first follower out of the hosts set
variable "follower_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
output "follower_host_1" {
value = try(var.follower_hosts[0], null)
}
output "follower_public_ip_1" {
value = try(var.follower_hosts[0].public_ip, null)
}
output "follower_private_ip_1" {
value = try(var.follower_hosts[0].private_ip, null)
}
output "follower_host_2" {
value = try(var.follower_hosts[1], null)
}
output "follower_public_ip_2" {
value = try(var.follower_hosts[1].public_ip, null)
}
output "follower_private_ip_2" {
value = try(var.follower_hosts[1].private_ip, null)
}
// Calculate our remainder hosts after we've added and removed leader
variable "initial_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
variable "initial_hosts_count" {
type = number
default = 0
}
variable "added_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
variable "added_hosts_count" {
type = number
default = 0
}
variable "removed_primary_host" {
type = object({
private_ip = string
public_ip = string
})
default = null
}
variable "removed_follower_host" {
type = object({
private_ip = string
public_ip = string
})
default = null
}
locals {
remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0)
indices = [for idx in range(local.remaining_hosts_count) : idx]
remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host])
remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial))
remaining_hosts = zipmap(local.indices, local.remaining_hosts_list)
}
output "remaining_initial_count" {
value = length(local.remaining_initial)
}
output "remaining_initial_hosts" {
value = local.remaining_initial
}
output "remaining_hosts_count" {
value = local.remaining_hosts_count
}
output "remaining_hosts" {
value = local.remaining_hosts
}

View File

@ -55,12 +55,14 @@ locals {
}
resource "enos_remote_exec" "set_up_approle_auth_and_agent" {
content = templatefile("${path.module}/templates/set-up-approle-and-agent.sh", {
vault_install_dir = var.vault_install_dir
vault_token = var.vault_root_token
vault_agent_template_destination = var.vault_agent_template_destination
vault_agent_template_contents = var.vault_agent_template_contents
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_TOKEN = var.vault_root_token,
VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination,
VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents,
}
scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")]
transport = {
ssh = {

View File

@ -5,7 +5,7 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2
@ -15,14 +15,14 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
$binpath auth disable approle || true
approle_create_status=$($binpath auth enable approle)
$binpath auth enable approle
approle_status=$($binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000)
$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000
ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id')
@ -36,8 +36,8 @@ if [[ "$SECRETID" == '' ]]; then
fail "expected SECRETID to be nonempty, but it is empty"
fi
echo $ROLEID > /tmp/role-id
echo $SECRETID > /tmp/secret-id
echo "$ROLEID" > /tmp/role-id
echo "$SECRETID" > /tmp/secret-id
cat > /tmp/vault-agent.hcl <<- EOM
pid_file = "/tmp/pidfile"
@ -51,18 +51,18 @@ vault {
}
cache {
enforce_consistency = "always"
use_auto_auth_token = true
enforce_consistency = "always"
use_auto_auth_token = true
}
listener "tcp" {
address = "127.0.0.1:8100"
tls_disable = true
address = "127.0.0.1:8100"
tls_disable = true
}
template {
destination = "${vault_agent_template_destination}"
contents = "${vault_agent_template_contents}"
destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}"
contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}"
exec {
command = "pkill -F /tmp/pidfile"
}
@ -72,7 +72,7 @@ auto_auth {
method {
type = "approle"
config = {
role_id_file_path = "/tmp/role-id"
role_id_file_path = "/tmp/role-id"
secret_id_file_path = "/tmp/secret-id"
}
}
@ -89,7 +89,7 @@ EOM
pkill -F /tmp/pidfile || true
# If the template file already exists, remove it
rm ${vault_agent_template_destination} || true
rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true
# Run agent (it will kill itself when it finishes rendering the template)
$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1

View File

@ -21,14 +21,14 @@ locals {
// file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle)
artifact_package_release_names = {
ubuntu = {
"oss" = "vault_"
"ce" = "vault_"
"ent" = "vault-enterprise_",
"ent.fips1402" = "vault-enterprise-fips1402_",
"ent.hsm" = "vault-enterprise-hsm_",
"ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_",
},
rhel = {
"oss" = "vault-"
"ce" = "vault-"
"ent" = "vault-enterprise-",
"ent.fips1402" = "vault-enterprise-fips1402-",
"ent.hsm" = "vault-enterprise-hsm-",
@ -38,7 +38,7 @@ locals {
// edition --> artifact name edition
artifact_name_edition = {
"oss" = ""
"ce" = ""
"ent" = ""
"ent.hsm" = ".hsm"
"ent.fips1402" = ".fips1402"

View File

@ -16,10 +16,10 @@ data "enos_artifactory_item" "vault" {
name = local.artifact_name
host = var.artifactory_host
repo = var.artifactory_repo
path = var.edition == "oss" ? "vault/*" : "vault-enterprise/*"
path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*"
properties = tomap({
"commit" = var.revision
"product-name" = var.edition == "oss" ? "vault" : "vault-enterprise"
"product-name" = var.edition == "ce" ? "vault" : "vault-enterprise"
"product-version" = local.artifact_version
})
}

View File

@ -109,9 +109,11 @@ resource "enos_remote_exec" "install_packages" {
if length(var.packages) > 0
}
content = templatefile("${path.module}/templates/install-packages.sh", {
packages = join(" ", var.packages)
})
environment = {
PACKAGES = join(" ", var.packages)
}
scripts = [abspath("${path.module}/scripts/install-packages.sh")]
transport = {
ssh = {
@ -271,59 +273,6 @@ resource "enos_vault_unseal" "leader" {
}
}
# We need to ensure that the directory used for audit logs is present and accessible to the vault
# user on all nodes, since logging will only happen on the leader.
resource "enos_remote_exec" "create_audit_log_dir" {
depends_on = [
enos_bundle_install.vault,
enos_vault_unseal.leader,
]
for_each = toset([
for idx, host in toset(local.instances) : idx
if var.enable_audit_devices
])
environment = {
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")]
transport = {
ssh = {
host = var.target_hosts[each.value].public_ip
}
}
}
resource "enos_remote_exec" "enable_audit_devices" {
depends_on = [
enos_remote_exec.create_audit_log_dir,
enos_vault_unseal.leader,
]
for_each = toset([
for idx in local.leader : idx
if local.enable_audit_devices
])
environment = {
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_BIN_PATH = local.bin_path
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")]
transport = {
ssh = {
host = var.target_hosts[each.key].public_ip
}
}
}
resource "enos_vault_unseal" "followers" {
depends_on = [
enos_vault_init.leader,
@ -387,11 +336,13 @@ resource "enos_remote_exec" "vault_write_license" {
enos_vault_unseal.maybe_force_unseal,
]
content = templatefile("${path.module}/templates/vault-write-license.sh", {
bin_path = local.bin_path,
root_token = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
license = coalesce(var.license, "none")
})
environment = {
BIN_PATH = local.bin_path,
LICENSE = coalesce(var.license, "none")
VAULT_TOKEN = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
}
scripts = [abspath("${path.module}/scripts/vault-write-license.sh")]
transport = {
ssh = {
@ -400,6 +351,61 @@ resource "enos_remote_exec" "vault_write_license" {
}
}
# We need to ensure that the directory used for audit logs is present and accessible to the vault
# user on all nodes, since logging will only happen on the leader.
resource "enos_remote_exec" "create_audit_log_dir" {
depends_on = [
enos_vault_start.leader,
enos_vault_start.followers,
enos_vault_unseal.leader,
enos_vault_unseal.followers,
enos_vault_unseal.maybe_force_unseal,
]
for_each = toset([
for idx, host in toset(local.instances) : idx
if var.enable_audit_devices
])
environment = {
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")]
transport = {
ssh = {
host = var.target_hosts[each.value].public_ip
}
}
}
resource "enos_remote_exec" "enable_audit_devices" {
depends_on = [
enos_remote_exec.create_audit_log_dir,
]
for_each = toset([
for idx in local.leader : idx
if local.enable_audit_devices
])
environment = {
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_BIN_PATH = local.bin_path
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")]
transport = {
ssh = {
host = var.target_hosts[each.key].public_ip
}
}
}
resource "enos_local_exec" "wait_for_install_packages" {
depends_on = [
enos_remote_exec.install_packages,

View File

@ -5,9 +5,7 @@
set -ex -o pipefail
packages="${packages}"
if [ "$packages" == "" ]
if [ "$PACKAGES" == "" ]
then
echo "No dependencies to install."
exit 0
@ -25,14 +23,14 @@ function retry {
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
return "$exit"
exit "$exit"
fi
done
return 0
}
echo "Installing Dependencies: $packages"
echo "Installing Dependencies: $PACKAGES"
if [ -f /etc/debian_version ]; then
# Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we
# see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case
@ -41,8 +39,10 @@ if [ -f /etc/debian_version ]; then
cd /tmp
retry 5 sudo apt update
retry 5 sudo apt install -y $${packages[@]}
# shellcheck disable=2068
retry 5 sudo apt install -y ${PACKAGES[@]}
else
cd /tmp
retry 7 sudo yum -y install $${packages[@]}
# shellcheck disable=2068
retry 7 sudo yum -y install ${PACKAGES[@]}
fi

View File

@ -3,8 +3,7 @@
# SPDX-License-Identifier: BUSL-1.1
license='${license}'
if test $license = "none"; then
if test "$LICENSE" = "none"; then
exit 0
fi
@ -29,13 +28,13 @@ function retry {
}
export VAULT_ADDR=http://localhost:8200
export VAULT_TOKEN='${root_token}'
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# Temporary hack until we can make the unseal resource handle legacy license
# setting. If we're running 1.8 and above then we shouldn't try to set a license.
ver=$(${bin_path} version)
ver=$(${BIN_PATH} version)
if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then
exit 0
fi
retry 5 ${bin_path} write /sys/license text="$license"
retry 5 "${BIN_PATH}" write /sys/license text="$LICENSE"

View File

@ -92,7 +92,7 @@ variable "consul_release" {
description = "Consul release version and edition to install from releases.hashicorp.com"
default = {
version = "1.15.1"
edition = "oss"
edition = "ce"
}
}

View File

@ -19,124 +19,97 @@ variable "vault_root_token" {
description = "The vault root token"
}
variable "node_public_ip" {
type = string
description = "The primary node public ip"
default = ""
variable "vault_instance_count" {
type = number
description = "The number of instances in the vault cluster"
}
variable "vault_instances" {
variable "vault_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
variable "added_vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were added"
default = {}
description = "The vault cluster hosts. These are required to map private ip addresses to public addresses."
}
locals {
leftover_primary_instances = var.node_public_ip != "" ? {
for k, v in var.vault_instances : k => v if contains(values(v), trimspace(var.node_public_ip))
} : null
all_instances = var.node_public_ip != "" ? merge(var.added_vault_instances, local.leftover_primary_instances) : var.vault_instances
updated_instance_count = length(local.all_instances)
updated_instances = {
for idx in range(local.updated_instance_count) : idx => {
public_ip = values(local.all_instances)[idx].public_ip
private_ip = values(local.all_instances)[idx].private_ip
follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : {
private_ip = local.follower_private_ips[idx]
public_ip = local.follower_public_ips[idx]
}
]
follower_hosts = {
for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null)
}
node_ip = var.node_public_ip != "" ? var.node_public_ip : local.updated_instances[0].public_ip
instance_private_ips = [
for k, v in values(tomap(local.updated_instances)) :
tostring(v["private_ip"])
]
follower_public_ips = [
for k, v in values(tomap(local.updated_instances)) :
tostring(v["public_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout)
]
follower_private_ips = [
for k, v in values(tomap(local.updated_instances)) :
tostring(v["private_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout)
follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout)
follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains(
local.follower_private_ips, var.vault_hosts[idx].private_ip)
]
leader_host = {
private_ip = local.leader_private_ip
public_ip = local.leader_public_ip
}
leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout)
leader_public_ip = element([
for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip
], 0)
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
}
resource "enos_remote_exec" "get_leader_private_ip" {
environment = {
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.instance_private_ips)
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")]
transport = {
ssh = {
host = local.node_ip
host = var.vault_hosts[0].public_ip
}
}
}
output "leftover_primary_instances" {
value = local.leftover_primary_instances
resource "enos_remote_exec" "get_follower_private_ips" {
environment = {
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_LEADER_PRIVATE_IP = local.leader_private_ip
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")]
transport = {
ssh = {
host = var.vault_hosts[0].public_ip
}
}
}
output "all_instances" {
value = local.all_instances
}
output "updated_instance_count" {
value = local.updated_instance_count
}
output "updated_instances" {
value = local.updated_instances
}
output "leader_private_ip" {
value = trimspace(enos_remote_exec.get_leader_private_ip.stdout)
}
output "leader_public_ip" {
value = element([
for k, v in values(tomap(local.all_instances)) :
tostring(v["public_ip"]) if v["private_ip"] == trimspace(enos_remote_exec.get_leader_private_ip.stdout)
], 0)
}
output "vault_instance_private_ips" {
value = jsonencode(local.instance_private_ips)
}
output "follower_public_ips" {
value = local.follower_public_ips
}
output "follower_public_ip_1" {
value = element(local.follower_public_ips, 0)
}
output "follower_public_ip_2" {
value = element(local.follower_public_ips, 1)
output "follower_hosts" {
value = local.follower_hosts
}
output "follower_private_ips" {
value = local.follower_private_ips
}
output "follower_private_ip_1" {
value = element(local.follower_private_ips, 0)
output "follower_public_ips" {
value = local.follower_public_ips
}
output "follower_private_ip_2" {
value = element(local.follower_private_ips, 1)
output "leader_host" {
value = local.leader_host
}
output "leader_private_ip" {
value = local.leader_private_ip
}
output "leader_public_ip" {
value = local.leader_public_ip
}

View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
function fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
count=0
retries=5
while :; do
# Vault >= 1.10.x has the operator members. If we have that then we'll use it.
if $binpath operator -h 2>&1 | grep members &> /dev/null; then
# Get the folllowers that are part of our private ips.
if followers=$($binpath operator members -format json | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then
# Make sure that we got all the followers
if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then
echo "$followers"
exit 0
fi
fi
else
# We're using an old version of vault so we'll just return ips that don't match the leader.
# Get the public ip addresses of the followers
if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then
if [[ -n "$followers" ]]; then
echo "$followers"
exit 0
fi
fi
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
fail "Timed out trying to obtain the cluster followers"
fi
done

View File

@ -5,31 +5,42 @@
set -e
binpath=${VAULT_INSTALL_DIR}/vault
instance_ips=${VAULT_INSTANCE_PRIVATE_IPS}
function fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
count=0
retries=5
while :; do
# Find the leader private IP address
leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")')
match_ip=$(echo "$instance_ips" |jq -r --argjson ip "$leader_private_ip" 'map(select(. == $ip))')
if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then
echo "$leader_private_ip" | sed 's/\"//g'
exit 0
# Find the leader private IP address
if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
if [[ -n "$ip" ]]; then
echo "$ip"
exit 0
fi
fi
wait=$((5 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
fail "leader IP address $leader_private_ip was not found in $instance_ips"
# Some older versions of vault don't support reading sys/leader. Try falling back to the cli status.
if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
if [[ -n "$ip" ]]; then
echo "$ip"
exit 0
fi
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
fail "Timed out trying to obtain the cluster leader"
fi
done

View File

@ -52,12 +52,14 @@ locals {
}
resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
content = templatefile("${path.module}/templates/set-up-approle-and-proxy.sh", {
vault_install_dir = var.vault_install_dir
vault_token = var.vault_root_token
vault_proxy_pidfile = var.vault_proxy_pidfile
vault_proxy_address = local.vault_proxy_address
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_TOKEN = var.vault_root_token
VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
VAULT_PROXY_ADDRESS = local.vault_proxy_address
}
scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")]
transport = {
ssh = {
@ -67,11 +69,13 @@ resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
}
resource "enos_remote_exec" "use_proxy" {
content = templatefile("${path.module}/templates/use-proxy.sh", {
vault_install_dir = var.vault_install_dir
vault_proxy_pidfile = var.vault_proxy_pidfile
vault_proxy_address = local.vault_proxy_address
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
VAULT_PROXY_ADDRESS = local.vault_proxy_address
}
scripts = [abspath("${path.module}/scripts/use-proxy.sh")]
transport = {
ssh = {

View File

@ -5,7 +5,7 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2
@ -15,14 +15,14 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
$binpath auth disable approle || true
approle_create_status=$($binpath auth enable approle)
$binpath auth enable approle
approle_status=$($binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000)
$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000
ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id')
@ -36,14 +36,14 @@ if [[ "$SECRETID" == '' ]]; then
fail "expected SECRETID to be nonempty, but it is empty"
fi
echo $ROLEID > /tmp/role-id
echo $SECRETID > /tmp/secret-id
echo "$ROLEID" > /tmp/role-id
echo "$SECRETID" > /tmp/secret-id
# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl
# The Proxy references the fixed Vault server address of http://127.0.0.1:8200
# The Proxy itself listens at the address http://127.0.0.1:8100
cat > /tmp/vault-proxy.hcl <<- EOM
pid_file = "${vault_proxy_pidfile}"
pid_file = "${VAULT_PROXY_PIDFILE}"
vault {
address = "http://127.0.0.1:8200"
@ -59,7 +59,7 @@ api_proxy {
}
listener "tcp" {
address = "${vault_proxy_address}"
address = "${VAULT_PROXY_ADDRESS}"
tls_disable = true
}
@ -81,7 +81,7 @@ auto_auth {
EOM
# If Proxy is still running from a previous run, kill it
pkill -F "${vault_proxy_pidfile}" || true
pkill -F "${VAULT_PROXY_PIDFILE}" || true
# Run proxy in the background
$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 &

View File

@ -5,7 +5,7 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2
@ -16,7 +16,7 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath"
# Will cause the Vault CLI to communicate with the Vault Proxy, since it
# is listening at port 8100.
export VAULT_ADDR='http://${vault_proxy_address}'
export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}"
# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token
# is used.
@ -29,4 +29,4 @@ unset VAULT_TOKEN
$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login'
# Now that we're done, kill the proxy
pkill -F "${vault_proxy_pidfile}" || true
pkill -F "${VAULT_PROXY_PIDFILE}" || true

View File

@ -56,15 +56,13 @@ resource "enos_remote_exec" "vault_raft_remove_peer" {
for_each = local.instances
environment = {
VAULT_TOKEN = var.vault_root_token
VAULT_ADDR = "http://localhost:8200"
REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
VAULT_TOKEN = var.vault_root_token
VAULT_ADDR = "http://localhost:8200"
VAULT_INSTALL_DIR = var.vault_install_dir
}
content = templatefile("${path.module}/templates/raft-remove-peer.sh", {
remove_vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
})
scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")]
transport = {
ssh = {

View File

@ -5,15 +5,16 @@
set -e
binpath=${vault_install_dir}/vault
node_addr=${remove_vault_cluster_addr}
binpath=${VAULT_INSTALL_DIR}/vault
node_addr=${REMOVE_VAULT_CLUSTER_ADDR}
fail() {
echo "$1" 2>&1
return 1
}
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
retry() {
local retries=$1
shift
@ -35,8 +36,7 @@ retry() {
}
remove_peer() {
node_id=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id')
if [ "$?" != "0" ];then
if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then
fail "failed to get node id of a non-voter node"
fi

View File

@ -39,7 +39,7 @@ resource "enos_remote_exec" "configure_pr_primary" {
environment = {
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
vault_install_dir = var.vault_install_dir
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")]

View File

@ -5,7 +5,7 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2

View File

@ -5,23 +5,24 @@
binpath=${VAULT_INSTALL_DIR}/vault
IFS="," read -a keys <<< ${UNSEAL_KEYS}
IFS="," read -r -a keys <<< "${UNSEAL_KEYS}"
function fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
count=0
retries=5
while :; do
for key in ${keys[@]}; do
for key in "${keys[@]}"; do
# Check the Vault seal status
seal_status=$($binpath status -format json | jq '.sealed')
if [[ "$seal_status" == "true" ]]; then
echo "running unseal with $key count $count with retry $retry" >> /tmp/unseal_script.out
$binpath operator unseal $key > /dev/null 2>&1
echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out
"$binpath" operator unseal "$key" > /dev/null 2>&1
else
exit 0
fi

View File

@ -6,8 +6,8 @@
binpath=${VAULT_INSTALL_DIR}/vault
function fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
count=0

View File

@ -92,10 +92,12 @@ resource "enos_bundle_install" "upgrade_vault_binary" {
resource "enos_remote_exec" "get_leader_public_ip" {
depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-leader-public-ip.sh", {
vault_install_dir = var.vault_install_dir,
vault_instances = jsonencode(local.instances)
})
scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")]
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_INSTANCES = jsonencode(local.instances)
}
transport = {
ssh = {
@ -107,10 +109,12 @@ resource "enos_remote_exec" "get_leader_public_ip" {
resource "enos_remote_exec" "get_follower_public_ips" {
depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-follower-public-ips.sh", {
vault_install_dir = var.vault_install_dir,
vault_instances = jsonencode(local.instances)
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_INSTANCES = jsonencode(local.instances)
}
scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")]
transport = {
ssh = {
@ -123,7 +127,7 @@ resource "enos_remote_exec" "restart_followers" {
for_each = local.followers
depends_on = [enos_remote_exec.get_follower_public_ips]
content = file("${path.module}/templates/restart-vault.sh")
scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
transport = {
ssh = {
@ -153,7 +157,7 @@ resource "enos_vault_unseal" "followers" {
resource "enos_remote_exec" "restart_leader" {
depends_on = [enos_vault_unseal.followers]
content = file("${path.module}/templates/restart-vault.sh")
scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
transport = {
ssh = {

View File

@ -5,13 +5,13 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}'
instances=${VAULT_INSTANCES}
# Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")')
leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
# Get the public ip addresses of the followers
follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances")

View File

@ -5,14 +5,15 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}'
instances=${VAULT_INSTANCES}
# Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")')
leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
# Get the public ip address of the leader
leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances")
#shellcheck disable=SC2001
echo "$leader_public" | sed 's/\"//g'

View File

@ -42,11 +42,13 @@ locals {
}
resource "enos_remote_exec" "verify_vault_agent_output" {
content = templatefile("${path.module}/templates/verify-vault-agent-output.sh", {
vault_agent_template_destination = var.vault_agent_template_destination
vault_agent_expected_output = var.vault_agent_expected_output
vault_instances = jsonencode(local.vault_instances)
})
environment = {
VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination
VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output
VAULT_INSTANCES = jsonencode(local.vault_instances)
}
scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")]
transport = {
ssh = {

View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
return 1
}
actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}")
if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then
fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'"
fi

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
return 1
}
actual_output=$(cat ${vault_agent_template_destination})
if [[ "$actual_output" != "${vault_agent_expected_output}" ]]; then
fail "expected '${vault_agent_expected_output}' to be the Agent output, but got: '$actual_output'"
fi

View File

@ -54,12 +54,14 @@ locals {
resource "enos_remote_exec" "smoke-verify-autopilot" {
for_each = local.public_ips
content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", {
vault_install_dir = var.vault_install_dir
vault_token = var.vault_root_token
vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status,
vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version,
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_TOKEN = var.vault_root_token,
VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status,
VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version,
}
scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")]
transport = {
ssh = {

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
fail() {
echo "$1" 1>&2
exit 1
}
export VAULT_ADDR="http://localhost:8200"
[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set"
[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
count=0
retries=8
while :; do
state=$($binpath read -format=json sys/storage/raft/autopilot/state)
status="$(jq -r '.data.upgrade_info.status' <<< "$state")"
target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")"
if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status"
echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version"
sleep "$wait"
else
echo "$state"
echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status"
echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version"
fail "Autopilot did not get into the correct status"
fi
done

View File

@ -1,37 +0,0 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
token="${vault_token}"
autopilot_version="${vault_autopilot_upgrade_version}"
autopilot_status="${vault_autopilot_upgrade_status}"
export VAULT_ADDR="http://localhost:8200"
export VAULT_TOKEN="$token"
function fail() {
echo "$1" 1>&2
exit 1
}
count=0
retries=7
while :; do
state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state)
status="$(jq -r '.data.upgrade_info.status' <<< "$state")"
target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")"
if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "$state"
sleep "$wait"
else
fail "Autopilot did not get into the correct status"
fi
done

View File

@ -9,66 +9,76 @@
set -e
binpath=${VAULT_INSTALL_DIR}/vault
function fail() {
echo "$1" 1>&2
exit 1
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set"
[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry() {
local retries=$1
shift
local count=0
until "$@"; do
exit=$?
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
return "$exit"
fail "$($binpath read -format=json sys/replication/performance/status)"
fi
done
}
test -x "$binpath" || exit 1
check_pr_status() {
pr_status=$($binpath read -format=json sys/replication/performance/status)
cluster_state=$(echo $pr_status | jq -r '.data.state')
connection_mode=$(echo $pr_status | jq -r '.data.mode')
cluster_state=$(echo "$pr_status" | jq -r '.data.state')
connection_mode=$(echo "$pr_status" | jq -r '.data.mode')
if [[ "$cluster_state" == 'idle' ]]; then
fail "replication cluster state is $cluster_state"
echo "replication cluster state is idle" 1>&2
return 1
fi
if [[ "$connection_mode" == "primary" ]]; then
connection_status=$(echo $pr_status | jq -r '.data.secondaries[0].connection_status')
connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status')
if [[ "$connection_status" == 'disconnected' ]]; then
fail "replication connection status of secondaries is $connection_status"
echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2
return 1
fi
secondary_cluster_addr=$(echo $pr_status | jq -r '.data.secondaries[0].cluster_address')
if [[ "$secondary_cluster_addr" != "https://"${SECONDARY_LEADER_PRIV_IP}":8201" ]]; then
fail "Expected secondary cluster address $SECONDARY_LEADER_PRIV_IP got $secondary_cluster_addr "
secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then
echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2
return 1
fi
else
connection_status=$(echo $pr_status | jq -r '.data.primaries[0].connection_status')
connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status')
if [[ "$connection_status" == 'disconnected' ]]; then
fail "replication connection status of secondaries is $connection_status"
echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2
return 1
fi
primary_cluster_addr=$(echo $pr_status | jq -r '.data.primaries[0].cluster_address')
if [[ "$primary_cluster_addr" != "https://"${PRIMARY_LEADER_PRIV_IP}":8201" ]]; then
fail "Expected primary cluster address $PRIMARY_LEADER_PRIV_IP got $primary_cluster_addr"
primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then
echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2
return 1
fi
known_primary_cluster_addrs=$(echo $pr_status | jq -r '.data.known_primary_cluster_addrs')
# IFS="," read -a cluster_addr <<< ${known_primary_cluster_addrs}
if ! $(echo $known_primary_cluster_addrs |grep -q $PRIMARY_LEADER_PRIV_IP); then
fail "Primary leader address $PRIMARY_LEADER_PRIV_IP not found in Known primary cluster addresses $known_primary_cluster_addrs"
known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs')
if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then
echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2
return 1
fi
fi
echo $pr_status
echo "$pr_status"
return 0
}
# Retry a few times because it can take some time for replication to sync

View File

@ -50,12 +50,14 @@ locals {
resource "enos_remote_exec" "verify_raft_auto_join_voter" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", {
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
vault_token = var.vault_root_token
})
environment = {
VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault"
VAULT_TOKEN = var.vault_root_token
}
scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")]
transport = {
ssh = {

View File

@ -5,7 +5,7 @@
set -e
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 2>&1
@ -33,17 +33,17 @@ retry() {
}
check_voter_status() {
voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected')
voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected')
if [[ "$voter_status" != 'true' ]]; then
fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')"
fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')"
fi
}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# Retry a few times because it can take some time for things to settle after
# all the nodes are unsealed

View File

@ -24,21 +24,16 @@ function retry {
return 0
}
function fail {
echo "$1" 1>&2
exit 1
}
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2
return 1
}
binpath="${VAULT_INSTALL_DIR}/vault"
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
# To keep the authentication method and module verification consistent between all
# Enos scenarios we authenticate using testuser created by vault_verify_write_data module
retry 5 $binpath login -method=userpass username=testuser password=passuser1
retry 5 $binpath kv get secret/test
retry 5 "$binpath" login -method=userpass username=testuser password=passuser1
retry 5 "$binpath" kv get secret/test

View File

@ -22,9 +22,11 @@ locals {
resource "enos_remote_exec" "smoke-verify-replication" {
for_each = local.instances
content = templatefile("${path.module}/templates/smoke-verify-replication.sh", {
vault_edition = var.vault_edition
})
environment = {
VAULT_EDITION = var.vault_edition
}
scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")]
transport = {
ssh = {

View File

@ -8,18 +8,16 @@
set -e
edition=${vault_edition}
function fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
# Replication status endpoint should have data.mode disabled for OSS release
# Replication status endpoint should have data.mode disabled for CE release
status=$(curl -s http://localhost:8200/v1/sys/replication/status)
if [ "$edition" == "oss" ]; then
if [ "$VAULT_EDITION" == "ce" ]; then
if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then
fail "replication data mode is not disabled for OSS release!"
fail "replication data mode is not disabled for CE release!"
fi
else
if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then

View File

@ -22,9 +22,11 @@ locals {
resource "enos_remote_exec" "smoke-verify-ui" {
for_each = local.instances
content = templatefile("${path.module}/templates/smoke-verify-ui.sh", {
vault_install_dir = var.vault_install_dir,
})
environment = {
VAULT_ADDR = var.vault_addr,
}
scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")]
transport = {
ssh = {

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null)
expected="${VAULT_ADDR}/ui/"
if [ "${url_effective}" != "${expected}" ]; then
fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}"
fi
if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then
fail "Vault UI is not available"
fi

View File

@ -1,17 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then
fail "Port 8200 not redirecting to UI"
fi
if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then
fail "Vault UI is not available"
fi

View File

@ -1,11 +1,10 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
variable "vault_install_dir" {
variable "vault_addr" {
type = string
description = "The directory where the Vault binary will be installed"
default = null
description = "The vault cluster address"
default = "http://localhost:8200"
}
variable "vault_instance_count" {

View File

@ -45,8 +45,9 @@ resource "enos_remote_exec" "smoke-verify-undo-logs" {
for_each = local.public_ips
environment = {
VAULT_TOKEN = var.vault_root_token
VAULT_ADDR = "http://localhost:8200"
VAULT_ADDR = "http://localhost:8200"
VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_TOKEN = var.vault_root_token
}
scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")]

View File

@ -2,29 +2,35 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
function fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
count=0
retries=20
retries=5
while :; do
leader_address=$(curl -H "X-Vault-Request: true" -H "X-Vault-Token: $VAULT_TOKEN" "$VAULT_ADDR/v1/sys/leader" | jq '.leader_address' | sed 's/\"//g')
state=$(curl --header "X-Vault-Token: $VAULT_TOKEN" "$leader_address/v1/sys/metrics" | jq -r '.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")')
target_undo_logs_status="$(jq -r '.Value' <<< "$state")"
state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")')
target_undo_logs_status="$(jq -r '.Value' <<< "$state")"
if [ "$target_undo_logs_status" == "1" ]; then
exit 0
fi
if [ "$target_undo_logs_status" == "1" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "$state"
sleep "$wait"
else
fail "Undo_logs did not get into the correct status"
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "Waiting for vault.core.replication.write_undo_logs to have Value:1"
echo "$state"
sleep "$wait"
else
fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value:1"
fi
done

View File

@ -45,11 +45,12 @@ locals {
resource "enos_remote_exec" "verify_node_unsealed" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", {
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
})
scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")]
environment = {
VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
VAULT_INSTALL_DIR = var.vault_install_dir
}
transport = {
ssh = {

View File

@ -4,8 +4,7 @@
set -e
# shellcheck disable=SC2154
binpath=${vault_install_dir}/vault
binpath=${VAULT_INSTALL_DIR}/vault
fail() {
echo "$1" 1>&2
@ -14,12 +13,12 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_ADDR=http://localhost:8200
count=0
retries=4
while :; do
health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.')
health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.')
unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected')
if [[ "$unseal_status" == 'true' ]]; then
echo "$health_status"
@ -31,7 +30,6 @@ while :; do
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
# shellcheck disable=SC2154
fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status"
fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status"
fi
done

View File

@ -69,14 +69,16 @@ locals {
resource "enos_remote_exec" "verify_all_nodes_have_updated_version" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-cluster-version.sh", {
vault_install_dir = var.vault_install_dir,
vault_build_date = var.vault_build_date,
vault_version = var.vault_product_version,
vault_edition = var.vault_edition,
vault_revision = var.vault_revision,
vault_token = var.vault_root_token,
})
environment = {
VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_BUILD_DATE = var.vault_build_date,
VAULT_VERSION = var.vault_product_version,
VAULT_EDITION = var.vault_edition,
VAULT_REVISION = var.vault_revision,
VAULT_TOKEN = var.vault_root_token,
}
scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")]
transport = {
ssh = {

View File

@ -7,26 +7,27 @@
# revision SHA, and edition metadata.
set -e
binpath=${vault_install_dir}/vault
edition=${vault_edition}
version=${vault_version}
sha=${vault_revision}
build_date=${vault_build_date}
binpath=${VAULT_INSTALL_DIR}/vault
edition=${VAULT_EDITION}
version=${VAULT_VERSION}
sha=${VAULT_REVISION}
build_date=${VAULT_BUILD_DATE}
# VAULT_TOKEN must also be set
fail() {
echo "$1" 1>&2
exit 1
echo "$1" 1>&2
exit 1
}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
version_expected="Vault v$version ($sha), built $build_date"
case "$edition" in
*oss) ;;
*ce) ;;
*ent) ;;
*ent.hsm) version_expected="$version_expected (cgo)";;
*ent.fips1402) version_expected="$version_expected (cgo)" ;;

View File

@ -5,7 +5,7 @@
set -e
function retry {
retry() {
local retries=$1
shift
local count=0
@ -24,11 +24,15 @@ function retry {
return 0
}
function fail {
echo "$1" 1>&2
exit 1
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
@ -36,16 +40,16 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry 5 "$binpath" status > /dev/null 2>&1
# Create user policy
retry 5 $binpath policy write reguser -<<EOF
retry 5 "$binpath" policy write reguser -<<EOF
path "*" {
capabilities = ["read", "list"]
}
EOF
# Enable the userpass auth method
retry 5 $binpath auth enable userpass > /dev/null 2>&1
retry 5 "$binpath" auth enable userpass > /dev/null 2>&1
# Create new user and attach reguser policy
retry 5 $binpath write auth/userpass/users/testuser password="passuser1" policies="reguser"
retry 5 "$binpath" write auth/userpass/users/testuser password="passuser1" policies="reguser"
retry 5 $binpath secrets enable -path="secret" kv
retry 5 "$binpath" secrets enable -path="secret" kv

View File

@ -5,7 +5,7 @@
set -e
function retry {
retry() {
local retries=$1
shift
local count=0
@ -24,15 +24,19 @@ function retry {
return 0
}
function fail {
echo "$1" 1>&2
exit 1
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$TEST_KEY" ]] && fail "TEST_KEY env variable has not been set"
[[ -z "$TEST_VALUE" ]] && fail "TEST_VALUE env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
testkey=${TEST_KEY}
testvalue=${TEST_VALUE}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry 5 $binpath kv put secret/test $testkey=$testvalue
retry 5 "$binpath" kv put secret/test "$TEST_KEY=$TEST_VALUE"

View File

@ -0,0 +1,68 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_root_token" {
type = string
description = "The vault root token"
}
variable "vault_instance_count" {
type = number
description = "The number of instances in the vault cluster"
}
variable "vault_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster hosts that can be expected as a leader"
}
variable "timeout" {
type = number
description = "The max number of seconds to wait before timing out"
default = 60
}
variable "retry_interval" {
type = number
description = "How many seconds to wait between each retry"
default = 2
}
locals {
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
}
resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" {
environment = {
RETRY_INTERVAL = var.retry_interval
TIMEOUT_SECONDS = var.timeout
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")]
transport = {
ssh = {
host = var.vault_hosts[0].public_ip
}
}
}

View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
findLeaderInPrivateIPs() {
# Find the leader private IP address
local leader_private_ip
if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') ; then
# Some older versions of vault don't support reading sys/leader. Fallback to the cli status.
if leader_private_ip=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
return 1
fi
fi
if isIn=$(jq -r --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then
if [[ "$isIn" == "true" ]]; then
echo "$leader_private_ip"
return 0
fi
fi
return 1
}
begin_time=$(date +%s)
end_time=$((begin_time + TIMEOUT_SECONDS))
while [ "$(date +%s)" -lt "$end_time" ]; do
if findLeaderInPrivateIPs; then
exit 0
fi
sleep "$RETRY_INTERVAL"
done
fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader."