open-vault/enos/enos-variables.hcl

193 lines
5.2 KiB
HCL
Raw Normal View History

# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
variable "artifactory_username" {
type = string
description = "The username to use when testing an artifact from artifactory"
default = null
sensitive = true
}
variable "artifactory_token" {
type = string
description = "The token to use when authenticating to artifactory"
default = null
sensitive = true
}
variable "artifactory_host" {
type = string
description = "The artifactory host to search for vault artifacts"
default = "https://artifactory.hashicorp.engineering/artifactory"
}
variable "artifactory_repo" {
type = string
description = "The artifactory repo to search for vault artifacts"
default = "hashicorp-crt-stable-local*"
}
variable "aws_region" {
description = "The AWS region where we'll create infrastructure"
type = string
default = "us-east-1"
}
variable "aws_ssh_keypair_name" {
description = "The AWS keypair to use for SSH"
type = string
default = "enos-ci-ssh-key"
}
variable "aws_ssh_private_key_path" {
description = "The path to the AWS keypair private key"
type = string
default = "./support/private_key.pem"
}
variable "backend_edition" {
description = "The backend release edition if applicable"
type = string
Backport [QT-602] Run `proxy` and `agent` test scenarios (#23176) into release/1.14.x (#23302) * [QT-602] Run `proxy` and `agent` test scenarios (#23176) Update our `proxy` and `agent` scenarios to support new variants and perform baseline verification and their scenario specific verification. We integrate these updated scenarios into the pipeline by adding them to artifact samples. We've also improved the reliability of the `autopilot` and `replication` scenarios by refactoring our IP address gathering. Previously, we'd ask vault for the primary IP address and use some Terraform logic to determine followers. The leader IP address gathering script was also implicitly responsible for ensuring that a found leader was within a given group of hosts, and thus waiting for a given cluster to have a leader, and also for doing some arithmetic and outputting `replication` specific output data. We've broken these responsibilities into individual modules, improved their error messages, and fixed various races and bugs, including: * Fix a race between creating the file audit device and installing and starting vault in the `replication` scenario. * Fix how we determine our leader and follower IP addresses. We now query vault instead of a prior implementation that inferred the followers and sometimes did not allow all nodes to be an expected leader. * Fix a bug where we'd always always fail on the first wrong condition in the `vault_verify_performance_replication` module. We also performed some maintenance tasks on Enos scenarios byupdating our references from `oss` to `ce` to handle the naming and license changes. We also enabled `shellcheck` linting for enos module scripts. * Rename `oss` to `ce` for license and naming changes. * Convert template enos scripts to scripts that take environment variables. * Add `shellcheck` linting for enos module scripts. * Add additional `backend` and `seal` support to `proxy` and `agent` scenarios. * Update scenarios to include all baseline verification. * Add `proxy` and `agent` scenarios to artifact samples. * Remove IP address verification from the `vault_get_cluster_ips` modules and implement a new `vault_wait_for_leader` module. * Determine follower IP addresses by querying vault in the `vault_get_cluster_ips` module. * Move replication specific behavior out of the `vault_get_cluster_ips` module and into it's own `replication_data` module. * Extend initial version support for the `upgrade` and `autopilot` scenarios. We also discovered an issue with undo_logs that has been described in the VAULT-20259. As such, we've disabled the undo_logs check until it has been fixed. * actions: fix actionlint error and linting logic (#23305) Signed-off-by: Ryan Cragun <me@ryan.ec>
2023-09-27 16:53:12 +00:00
default = "ce" // or "ent"
}
variable "backend_instance_type" {
description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible"
type = string
default = "t4g.small"
}
variable "backend_license_path" {
description = "The license for the backend if applicable (Consul Enterprise)"
type = string
default = null
}
variable "backend_log_level" {
description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'"
type = string
default = "trace"
}
variable "project_name" {
description = "The description of the project"
type = string
default = "vault-enos-integration"
}
variable "rhel_distro_version" {
description = "The version of RHEL to use"
type = string
default = "9.1" // or "8.8"
}
variable "tags" {
description = "Tags that will be applied to infrastructure resources that support tagging"
type = map(string)
default = null
}
variable "terraform_plugin_cache_dir" {
description = "The directory to cache Terraform modules and providers"
type = string
default = null
}
variable "tfc_api_token" {
description = "The Terraform Cloud QTI Organization API token. This is used to download the enos Terraform provider."
type = string
sensitive = true
}
variable "ubuntu_distro_version" {
description = "The version of ubuntu to use"
type = string
default = "22.04" // or "20.04", "18.04"
}
variable "ui_test_filter" {
type = string
description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"<filter>\"'"
default = null
}
variable "ui_run_tests" {
type = bool
description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run"
default = true
}
variable "vault_artifact_type" {
description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
default = "bundle"
}
variable "vault_artifact_path" {
description = "Path to CRT generated or local vault.zip bundle"
type = string
default = "/tmp/vault.zip"
}
variable "vault_build_date" {
description = "The build date for Vault artifact"
type = string
default = ""
}
variable "vault_enable_audit_devices" {
description = "If true every audit device will be enabled"
type = bool
default = true
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
default = "/opt/vault/bin"
}
variable "vault_instance_count" {
description = "How many instances to create for the Vault cluster"
type = number
default = 3
}
variable "vault_license_path" {
Backport [QT-602] Run `proxy` and `agent` test scenarios (#23176) into release/1.14.x (#23302) * [QT-602] Run `proxy` and `agent` test scenarios (#23176) Update our `proxy` and `agent` scenarios to support new variants and perform baseline verification and their scenario specific verification. We integrate these updated scenarios into the pipeline by adding them to artifact samples. We've also improved the reliability of the `autopilot` and `replication` scenarios by refactoring our IP address gathering. Previously, we'd ask vault for the primary IP address and use some Terraform logic to determine followers. The leader IP address gathering script was also implicitly responsible for ensuring that a found leader was within a given group of hosts, and thus waiting for a given cluster to have a leader, and also for doing some arithmetic and outputting `replication` specific output data. We've broken these responsibilities into individual modules, improved their error messages, and fixed various races and bugs, including: * Fix a race between creating the file audit device and installing and starting vault in the `replication` scenario. * Fix how we determine our leader and follower IP addresses. We now query vault instead of a prior implementation that inferred the followers and sometimes did not allow all nodes to be an expected leader. * Fix a bug where we'd always always fail on the first wrong condition in the `vault_verify_performance_replication` module. We also performed some maintenance tasks on Enos scenarios byupdating our references from `oss` to `ce` to handle the naming and license changes. We also enabled `shellcheck` linting for enos module scripts. * Rename `oss` to `ce` for license and naming changes. * Convert template enos scripts to scripts that take environment variables. * Add `shellcheck` linting for enos module scripts. * Add additional `backend` and `seal` support to `proxy` and `agent` scenarios. * Update scenarios to include all baseline verification. * Add `proxy` and `agent` scenarios to artifact samples. * Remove IP address verification from the `vault_get_cluster_ips` modules and implement a new `vault_wait_for_leader` module. * Determine follower IP addresses by querying vault in the `vault_get_cluster_ips` module. * Move replication specific behavior out of the `vault_get_cluster_ips` module and into it's own `replication_data` module. * Extend initial version support for the `upgrade` and `autopilot` scenarios. We also discovered an issue with undo_logs that has been described in the VAULT-20259. As such, we've disabled the undo_logs check until it has been fixed. * actions: fix actionlint error and linting logic (#23305) Signed-off-by: Ryan Cragun <me@ryan.ec>
2023-09-27 16:53:12 +00:00
description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions"
type = string
default = null
}
variable "vault_local_build_tags" {
description = "The build tags to pass to the Go compiler for builder:local variants"
type = list(string)
default = null
}
variable "vault_log_level" {
description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err."
type = string
default = "trace"
}
variable "vault_product_version" {
description = "The version of Vault we are testing"
type = string
default = null
}
variable "vault_revision" {
description = "The git sha of Vault artifact we are testing"
type = string
default = null
}
variable "vault_upgrade_initial_release" {
description = "The Vault release to deploy before upgrading"
default = {
Backport [QT-602] Run `proxy` and `agent` test scenarios (#23176) into release/1.14.x (#23302) * [QT-602] Run `proxy` and `agent` test scenarios (#23176) Update our `proxy` and `agent` scenarios to support new variants and perform baseline verification and their scenario specific verification. We integrate these updated scenarios into the pipeline by adding them to artifact samples. We've also improved the reliability of the `autopilot` and `replication` scenarios by refactoring our IP address gathering. Previously, we'd ask vault for the primary IP address and use some Terraform logic to determine followers. The leader IP address gathering script was also implicitly responsible for ensuring that a found leader was within a given group of hosts, and thus waiting for a given cluster to have a leader, and also for doing some arithmetic and outputting `replication` specific output data. We've broken these responsibilities into individual modules, improved their error messages, and fixed various races and bugs, including: * Fix a race between creating the file audit device and installing and starting vault in the `replication` scenario. * Fix how we determine our leader and follower IP addresses. We now query vault instead of a prior implementation that inferred the followers and sometimes did not allow all nodes to be an expected leader. * Fix a bug where we'd always always fail on the first wrong condition in the `vault_verify_performance_replication` module. We also performed some maintenance tasks on Enos scenarios byupdating our references from `oss` to `ce` to handle the naming and license changes. We also enabled `shellcheck` linting for enos module scripts. * Rename `oss` to `ce` for license and naming changes. * Convert template enos scripts to scripts that take environment variables. * Add `shellcheck` linting for enos module scripts. * Add additional `backend` and `seal` support to `proxy` and `agent` scenarios. * Update scenarios to include all baseline verification. * Add `proxy` and `agent` scenarios to artifact samples. * Remove IP address verification from the `vault_get_cluster_ips` modules and implement a new `vault_wait_for_leader` module. * Determine follower IP addresses by querying vault in the `vault_get_cluster_ips` module. * Move replication specific behavior out of the `vault_get_cluster_ips` module and into it's own `replication_data` module. * Extend initial version support for the `upgrade` and `autopilot` scenarios. We also discovered an issue with undo_logs that has been described in the VAULT-20259. As such, we've disabled the undo_logs check until it has been fixed. * actions: fix actionlint error and linting logic (#23305) Signed-off-by: Ryan Cragun <me@ryan.ec>
2023-09-27 16:53:12 +00:00
edition = "ce"
// Vault 1.10.5 has a known issue with retry_join.
version = "1.10.4"
}
}