Add load testing framework (#8571)

This commit is contained in:
s-christoff 2020-10-05 20:16:09 -05:00 committed by GitHub
parent f3d991fb8f
commit e89eb9fb42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 1785 additions and 0 deletions

View File

@ -0,0 +1,16 @@
# Consul AMI
## Quick start
To build the Consul AMI:
1. `git clone` this repo to your computer.
2. Install [Packer](https://www.packer.io/).
3. Configure your AWS credentials using one of the [options supported by the AWS
SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to
set the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_DEFAULT_REGION` environment variables.
4. Update the `variables` section of the `consul.json` Packer template to configure the AWS region, Consul version, and datadog api key you would like to use. Feel free to reference this article to find your [datadog API key](https://docs.datadoghq.com/account_management/api-app-keys/#api-keys).
5. For additional customization you can add [tags](https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=noncontainerizedenvironments) within the `scripts/datadog.yaml` file. One example of a tag could be `"consul_version" : "consulent_175"`. These tags are searchable through the datadog dashboard. Another form of customization is changing the datacenter tag within `scripts/telemetry.json`, however it is defaulted to `us-east-1`.
6. Run `packer build consul.json`.
When the build finishes, it will output the IDs of the new AMI. Add this AMI ID in the `consul_ami_id` variable in the `vars.tfvars` file.

View File

@ -0,0 +1,67 @@
{
"min_packer_version": "1.5.4",
"variables": {
"aws_region": "{{env `AWS_DEFAULT_REGION`}}",
"consul_version": "1.5.1",
"download_url": "{{env `CONSUL_DOWNLOAD_URL`}}",
"dd_api_key": "{{env `DD_API_KEY`}}"
},
"builders": [{
"name": "ubuntu18-ami",
"ami_name": "consul-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}",
"ami_description": "An Ubuntu 18.04 AMI that has Consul installed.",
"instance_type": "t2.micro",
"region": "{{user `aws_region`}}",
"associate_public_ip_address": true,
"type": "amazon-ebs",
"source_ami_filter": {
"filters": {
"virtualization-type": "hvm",
"architecture": "x86_64",
"name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*",
"block-device-mapping.volume-type": "gp2",
"root-device-type": "ebs"
},
"owners": ["099720109477"],
"most_recent": true
},
"ssh_username": "ubuntu"
}],
"provisioners": [{
"type": "shell",
"inline": ["mkdir -p /home/ubuntu/scripts"]
},{
"type": "file",
"source": "{{template_dir}}/scripts",
"destination": "/home/ubuntu",
"pause_before": "30s"
},{
"type": "shell",
"inline": [
"if test -n \"{{user `download_url`}}\"; then",
"/home/ubuntu/scripts/install-consul --download-url {{user `download_url`}};",
"else",
"/home/ubuntu/scripts/install-consul --version {{user `consul_version`}};",
"fi"
],
"pause_before": "30s"
},{
"type": "shell",
"inline": [
"/home/ubuntu/scripts/setup-systemd-resolved"
],
"pause_before": "30s"
},{
"type": "shell",
"inline": [
"DD_AGENT_MAJOR_VERSION=7 DD_API_KEY={{user `dd_api_key`}} bash -c \"$(curl -L https://raw.githubusercontent.com/DataDog/datadog-agent/master/cmd/agent/install_script.sh)\""
]
},{
"type": "shell",
"execute_command": "sudo sh -c '{{ .Vars }} {{ .Path }}'",
"environment_vars": [
"DD_API_KEY={{user `dd_api_key`}}"
],
"script": "{{template_dir}}/scripts/move-files.sh"
}]
}

View File

@ -0,0 +1,8 @@
init_config:
instances:
- url: http://localhost:8500
self_leader_check: true
network_latency_checks: true
catalog_checks: true

View File

@ -0,0 +1,37 @@
#########################
## Basic Configuration ##
#########################
## @param api_key - string - required
## The Datadog API key to associate your Agent's data with your organization.
## Create a new API key here: https://app.datadoghq.com/account/settings
#
tags:
api_key:
logs_enabled: true
dogstatsd_mapper_profiles:
- name: consul
prefix: "consul."
mappings:
- match: 'consul\.http\.([a-zA-Z]+)\.(.*)'
match_type: "regex"
name: "consul.http.request"
tags:
http_method: "$1"
path: "$2"
- match: 'consul\.raft\.replication\.appendEntries\.logs\.([0-9a-f-]+)'
match_type: "regex"
name: "consul.raft.replication.appendEntries.logs"
tags:
consul_node_id: "$1"
- match: 'consul\.raft\.replication\.appendEntries\.rpc\.([0-9a-f-]+)'
match_type: "regex"
name: "consul.raft.replication.appendEntries.rpc"
tags:
consul_node_id: "$1"
- match: 'consul\.raft\.replication\.heartbeat\.([0-9a-f-]+)'
match_type: "regex"
name: "consul.raft.replication.heartbeat"
tags:
consul_node_id: "$1"

View File

@ -0,0 +1,312 @@
#!/bin/bash
# SOURCE: GRUNTWORKS
# This script can be used to install Consul and its dependencies. This script has been tested with the following
# operating systems:
#
# 1. Ubuntu 16.04
# 1. Ubuntu 18.04
# 1. Amazon Linux 2
set -e
readonly DEFAULT_INSTALL_PATH="/opt/consul"
readonly DEFAULT_CONSUL_USER="consul"
readonly DOWNLOAD_PACKAGE_PATH="/tmp/consul.zip"
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SYSTEM_BIN_DIR="/usr/local/bin"
readonly SCRIPT_NAME="$(basename "$0")"
function print_usage {
echo
echo "Usage: install-consul [OPTIONS]"
echo
echo "This script can be used to install Consul and its dependencies. This script has been tested with Ubuntu 16.04 and Amazon Linux 2."
echo
echo "Options:"
echo
echo -e " --version\t\tThe version of Consul to install. Optional if download-url is provided."
echo -e " --download-url\t\tUrl to exact Consul package to be installed. Optional if version is provided."
echo -e " --path\t\tThe path where Consul should be installed. Optional. Default: $DEFAULT_INSTALL_PATH."
echo -e " --user\t\tThe user who will own the Consul install directories. Optional. Default: $DEFAULT_CONSUL_USER."
echo -e " --ca-file-path\t\tPath to a PEM-encoded certificate authority used to encrypt and verify authenticity of client and server connections. Will be installed under <install-path>/tls/ca."
echo -e " --cert-file-path\t\tPath to a PEM-encoded certificate, which will be provided to clients or servers to verify the agent's authenticity. Will be installed under <install-path>/tls. Must be provided along with --key-file-path."
echo -e " --key-file-path\t\tPath to a PEM-encoded private key, used with the certificate to verify the agent's authenticity. Will be installed under <install-path>/tls. Must be provided along with --cert-file-path"
echo
echo "Example:"
echo
echo " install-consul --version 1.2.2"
}
function log {
local -r level="$1"
local -r message="$2"
local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S")
>&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}"
}
function log_info {
local -r message="$1"
log "INFO" "$message"
}
function log_warn {
local -r message="$1"
log "WARN" "$message"
}
function log_error {
local -r message="$1"
log "ERROR" "$message"
}
function assert_not_empty {
local -r arg_name="$1"
local -r arg_value="$2"
if [[ -z "$arg_value" ]]; then
log_error "The value for '$arg_name' cannot be empty"
print_usage
exit 1
fi
}
function assert_either_or {
local -r arg1_name="$1"
local -r arg1_value="$2"
local -r arg2_name="$3"
local -r arg2_value="$4"
if [[ -z "$arg1_value" && -z "$arg2_value" ]]; then
log_error "Either the value for '$arg1_name' or '$arg2_name' must be passed, both cannot be empty"
print_usage
exit 1
fi
}
# A retry function that attempts to run a command a number of times and returns the output
function retry {
local -r cmd="$1"
local -r description="$2"
for i in $(seq 1 5); do
log_info "$description"
# The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the
# beginning of this script which exits the script immediatelly for error status while not losing the exit status code
output=$(eval "$cmd") && exit_status=0 || exit_status=$?
log_info "$output"
if [[ $exit_status -eq 0 ]]; then
echo "$output"
return
fi
log_warn "$description failed. Will sleep for 10 seconds and try again."
sleep 10
done;
log_error "$description failed after 5 attempts."
exit $exit_status
}
function has_yum {
[ -n "$(command -v yum)" ]
}
function has_apt_get {
[ -n "$(command -v apt-get)" ]
}
function install_dependencies {
log_info "Installing dependencies"
if $(has_apt_get); then
sudo apt-get update -y
sudo apt-get install -y awscli curl unzip jq
elif $(has_yum); then
sudo yum update -y
sudo yum install -y aws curl unzip jq
else
log_error "Could not find apt-get or yum. Cannot install dependencies on this OS."
exit 1
fi
}
function user_exists {
local -r username="$1"
id "$username" >/dev/null 2>&1
}
function create_consul_user {
local -r username="$1"
if $(user_exists "$username"); then
echo "User $username already exists. Will not create again."
else
log_info "Creating user named $username"
sudo useradd "$username"
fi
}
function create_consul_install_paths {
local -r path="$1"
local -r username="$2"
log_info "Creating install dirs for Consul at $path"
sudo mkdir -p "$path"
sudo mkdir -p "$path/bin"
sudo mkdir -p "$path/config"
sudo mkdir -p "$path/data"
sudo mkdir -p "$path/tls/ca"
log_info "Changing ownership of $path to $username"
sudo chown -R "$username:$username" "$path"
}
function fetch_binary {
local -r version="$1"
local download_url="$2"
if [[ -z "$download_url" && -n "$version" ]]; then
download_url="https://releases.hashicorp.com/consul/${version}/consul_${version}_linux_amd64.zip"
fi
retry \
"curl -o '$DOWNLOAD_PACKAGE_PATH' '$download_url' --location --silent --fail --show-error" \
"Downloading Consul to $DOWNLOAD_PACKAGE_PATH"
}
function install_binary {
local -r install_path="$1"
local -r username="$2"
local -r bin_dir="$install_path/bin"
local -r consul_dest_path="$bin_dir/consul"
local -r run_consul_dest_path="$bin_dir/run-consul"
unzip -d /tmp "$DOWNLOAD_PACKAGE_PATH"
log_info "Moving Consul binary to $consul_dest_path"
sudo mv "/tmp/consul" "$consul_dest_path"
sudo chown "$username:$username" "$consul_dest_path"
sudo chmod a+x "$consul_dest_path"
local -r symlink_path="$SYSTEM_BIN_DIR/consul"
if [[ -f "$symlink_path" ]]; then
log_info "Symlink $symlink_path already exists. Will not add again."
else
log_info "Adding symlink to $consul_dest_path in $symlink_path"
sudo ln -s "$consul_dest_path" "$symlink_path"
fi
log_info "Copying Consul run script to $run_consul_dest_path"
sudo cp "$SCRIPT_DIR/run-consul" "$run_consul_dest_path"
sudo chown "$username:$username" "$run_consul_dest_path"
sudo chmod a+x "$run_consul_dest_path"
}
function install_tls_certificates {
local -r path="$1"
local -r user="$2"
local -r ca_file_path="$3"
local -r cert_file_path="$4"
local -r key_file_path="$5"
local -r consul_tls_certs_path="$path/tls"
local -r ca_certs_path="$consul_tls_certs_path/ca"
log_info "Moving TLS certs to $consul_tls_certs_path and $ca_certs_path"
sudo mkdir -p "$ca_certs_path"
sudo mv "$ca_file_path" "$ca_certs_path/"
sudo mv "$cert_file_path" "$consul_tls_certs_path/"
sudo mv "$key_file_path" "$consul_tls_certs_path/"
sudo chown -R "$user:$user" "$consul_tls_certs_path/"
sudo find "$consul_tls_certs_path/" -type f -exec chmod u=r,g=,o= {} \;
}
function install {
local version=""
local download_url=""
local path="$DEFAULT_INSTALL_PATH"
local user="$DEFAULT_CONSUL_USER"
local ca_file_path=""
local cert_file_path=""
local key_file_path=""
while [[ $# > 0 ]]; do
local key="$1"
case "$key" in
--version)
version="$2"
shift
;;
--download-url)
download_url="$2"
shift
;;
--path)
path="$2"
shift
;;
--user)
user="$2"
shift
;;
--ca-file-path)
assert_not_empty "$key" "$2"
ca_file_path="$2"
shift
;;
--cert-file-path)
assert_not_empty "$key" "$2"
cert_file_path="$2"
shift
;;
--key-file-path)
assert_not_empty "$key" "$2"
key_file_path="$2"
shift
;;
--help)
print_usage
exit
;;
*)
log_error "Unrecognized argument: $key"
print_usage
exit 1
;;
esac
shift
done
assert_either_or "--version" "$version" "--download-url" "$download_url"
assert_not_empty "--path" "$path"
assert_not_empty "--user" "$user"
log_info "Starting Consul install"
install_dependencies
create_consul_user "$user"
create_consul_install_paths "$path" "$user"
fetch_binary "$version" "$download_url"
install_binary "$path" "$user"
if [[ -n "$ca_file_path" || -n "$cert_file_path" || -n "$key_file_path" ]]; then
install_tls_certificates "$path" "$user" "$ca_file_path" "$cert_file_path" "$key_file_path"
fi
if command -v consul; then
log_info "Consul install complete!";
else
log_info "Could not find consul command. Aborting.";
exit 1;
fi
}
install "$@"

View File

@ -0,0 +1,16 @@
#!/bin/bash -e
##Move datadog files
mv /home/ubuntu/scripts/conf.yaml /etc/datadog-agent/conf.d/consul.d/
mv /home/ubuntu/scripts/datadog.yaml /etc/datadog-agent/
##Move Consul Config that hooks up to datadog
mv /home/ubuntu/scripts/telemetry.json /opt/consul/config/
chown consul:consul /opt/consul/config/telemetry.json
## Let everyone own their stuff now
chown dd-agent:dd-agent /etc/datadog-agent/conf.d/consul.d/conf.yaml
chown dd-agent:dd-agent /etc/datadog-agent/datadog.yaml
## Put the key in the datadog.yaml
sed -i "s/api_key:.*/api_key: ${DD_API_KEY}/" /etc/datadog-agent/datadog.yaml

View File

@ -0,0 +1,654 @@
#!/bin/bash
# This script is used to configure and run Consul on an AWS server.
# SOURCE: GRUNTWORKS
set -e
readonly AWS_ASG_TAG_KEY="aws:autoscaling:groupName"
readonly CONSUL_CONFIG_FILE="default.json"
readonly CONSUL_GOSSIP_ENCRYPTION_CONFIG_FILE="gossip-encryption.json"
readonly CONSUL_RPC_ENCRYPTION_CONFIG_FILE="rpc-encryption.json"
readonly SYSTEMD_CONFIG_PATH="/etc/systemd/system/consul.service"
readonly EC2_INSTANCE_METADATA_URL="http://169.254.169.254/latest/meta-data"
readonly EC2_INSTANCE_DYNAMIC_DATA_URL="http://169.254.169.254/latest/dynamic"
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SCRIPT_NAME="$(basename "$0")"
readonly MAX_RETRIES=30
readonly SLEEP_BETWEEN_RETRIES_SEC=10
readonly DEFAULT_AUTOPILOT_CLEANUP_DEAD_SERVERS="true"
readonly DEFAULT_AUTOPILOT_LAST_CONTACT_THRESHOLD="200ms"
readonly DEFAULT_AUTOPILOT_MAX_TRAILING_LOGS="250"
readonly DEFAULT_AUTOPILOT_SERVER_STABILIZATION_TIME="10s"
readonly DEFAULT_AUTOPILOT_REDUNDANCY_ZONE_TAG="az"
readonly DEFAULT_AUTOPILOT_DISABLE_UPGRADE_MIGRATION="false"
function print_usage {
echo
echo "Usage: run-consul [OPTIONS]"
echo
echo "This script is used to configure and run Consul on an AWS server."
echo
echo "Options:"
echo
echo -e " --server\t\tIf set, run in server mode. Optional. Exactly one of --server or --client must be set."
echo -e " --client\t\tIf set, run in client mode. Optional. Exactly one of --server or --client must be set."
echo -e " --cluster-tag-key\tAutomatically form a cluster with Instances that have this tag key and the tag value in --cluster-tag-value. Optional."
echo -e " --cluster-tag-value\tAutomatically form a cluster with Instances that have the tag key in --cluster-tag-key and this tag value. Optional."
echo -e " --datacenter\t\tThe name of the datacenter Consul is running in. Optional. If not specified, will default to AWS region name."
echo -e " --config-dir\t\tThe path to the Consul config folder. Optional. Default is the absolute path of '../config', relative to this script."
echo -e " --data-dir\t\tThe path to the Consul data folder. Optional. Default is the absolute path of '../data', relative to this script."
echo -e " --systemd-stdout\t\tThe StandardOutput option of the systemd unit. Optional. If not configured, uses systemd's default (journal)."
echo -e " --systemd-stderr\t\tThe StandardError option of the systemd unit. Optional. If not configured, uses systemd's default (inherit)."
echo -e " --bin-dir\t\tThe path to the folder with Consul binary. Optional. Default is the absolute path of the parent folder of this script."
echo -e " --user\t\tThe user to run Consul as. Optional. Default is to use the owner of --config-dir."
echo -e " --enable-gossip-encryption\t\tEnable encryption of gossip traffic between nodes. Optional. Must also specify --gossip-encryption-key."
echo -e " --gossip-encryption-key\t\tThe key to use for encrypting gossip traffic. Optional. Must be specified with --enable-gossip-encryption."
echo -e " --enable-rpc-encryption\t\tEnable encryption of RPC traffic between nodes. Optional. Must also specify --ca-file-path, --cert-file-path and --key-file-path."
echo -e " --ca-path\t\tPath to the directory of CA files used to verify outgoing connections. Optional. Must be specified with --enable-rpc-encryption."
echo -e " --cert-file-path\tPath to the certificate file used to verify incoming connections. Optional. Must be specified with --enable-rpc-encryption and --key-file-path."
echo -e " --key-file-path\tPath to the certificate key used to verify incoming connections. Optional. Must be specified with --enable-rpc-encryption and --cert-file-path."
echo -e " --environment\t\tA single environment variable in the key/value pair form 'KEY=\"val\"' to pass to Consul as environment variable when starting it up. Repeat this option for additional variables. Optional."
echo -e " --skip-consul-config\tIf this flag is set, don't generate a Consul configuration file. Optional. Default is false."
echo -e " --recursor\tThis flag provides address of upstream DNS server that is used to recursively resolve queries if they are not inside the service domain for Consul. Repeat this option for additional variables. Optional."
echo
echo "Options for Consul Autopilot:"
echo
echo -e " --autopilot-cleanup-dead-servers\tSet to true or false to control the automatic removal of dead server nodes periodically and whenever a new server is added to the cluster. Defaults to $DEFAULT_AUTOPILOT_CLEANUP_DEAD_SERVERS. Optional."
echo -e " --autopilot-last-contact-threshold\tControls the maximum amount of time a server can go without contact from the leader before being considered unhealthy. Must be a duration value such as 10s. Defaults to $DEFAULT_AUTOPILOT_LAST_CONTACT_THRESHOLD. Optional."
echo -e " --autopilot-max-trailing-logs\t\tControls the maximum number of log entries that a server can trail the leader by before being considered unhealthy. Defaults to $DEFAULT_AUTOPILOT_MAX_TRAILING_LOGS. Optional."
echo -e " --autopilot-server-stabilization-time\tControls the minimum amount of time a server must be stable in the 'healthy' state before being added to the cluster. Only takes effect if all servers are running Raft protocol version 3 or higher. Must be a duration value such as 30s. Defaults to $DEFAULT_AUTOPILOT_SERVER_STABILIZATION_TIME. Optional."
echo -e " --autopilot-redundancy-zone-tag\t\t(Enterprise-only) This controls the -node-meta key to use when Autopilot is separating servers into zones for redundancy. Only one server in each zone can be a voting member at one time. If left blank, this feature will be disabled. Defaults to $DEFAULT_AUTOPILOT_REDUNDANCY_ZONE_TAG. Optional."
echo -e " --autopilot-disable-upgrade-migration\t(Enterprise-only) If this flag is set, this will disable Autopilot's upgrade migration strategy in Consul Enterprise of waiting until enough newer-versioned servers have been added to the cluster before promoting any of them to voters. Defaults to $DEFAULT_AUTOPILOT_DISABLE_UPGRADE_MIGRATION. Optional."
echo -e " --autopilot-upgrade-version-tag\t\t(Enterprise-only) That tag to be used to override the version information used during a migration. Optional."
echo
echo
echo "Example:"
echo
echo " run-consul --server --config-dir /custom/path/to/consul/config"
}
function log {
local -r level="$1"
local -r message="$2"
local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S")
>&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}"
}
function log_info {
local -r message="$1"
log "INFO" "$message"
}
function log_warn {
local -r message="$1"
log "WARN" "$message"
}
function log_error {
local -r message="$1"
log "ERROR" "$message"
}
# Based on code from: http://stackoverflow.com/a/16623897/483528
function strip_prefix {
local -r str="$1"
local -r prefix="$2"
echo "${str#$prefix}"
}
function assert_not_empty {
local -r arg_name="$1"
local -r arg_value="$2"
if [[ -z "$arg_value" ]]; then
log_error "The value for '$arg_name' cannot be empty"
print_usage
exit 1
fi
}
function lookup_path_in_instance_metadata {
local -r path="$1"
curl --silent --show-error --location "$EC2_INSTANCE_METADATA_URL/$path/"
}
function lookup_path_in_instance_dynamic_data {
local -r path="$1"
curl --silent --show-error --location "$EC2_INSTANCE_DYNAMIC_DATA_URL/$path/"
}
function get_instance_ip_address {
lookup_path_in_instance_metadata "local-ipv4"
}
function get_instance_id {
lookup_path_in_instance_metadata "instance-id"
}
function get_instance_region {
lookup_path_in_instance_dynamic_data "instance-identity/document" | jq -r ".region"
}
function get_instance_tags {
local -r instance_id="$1"
local -r instance_region="$2"
local tags=""
local count_tags=""
log_info "Looking up tags for Instance $instance_id in $instance_region"
for (( i=1; i<="$MAX_RETRIES"; i++ )); do
tags=$(aws ec2 describe-tags \
--region "$instance_region" \
--filters "Name=resource-type,Values=instance" "Name=resource-id,Values=${instance_id}")
count_tags=$(echo $tags | jq -r ".Tags? | length")
if [[ "$count_tags" -gt 0 ]]; then
log_info "This Instance $instance_id in $instance_region has Tags."
echo "$tags"
return
else
log_warn "This Instance $instance_id in $instance_region does not have any Tags."
log_warn "Will sleep for $SLEEP_BETWEEN_RETRIES_SEC seconds and try again."
sleep "$SLEEP_BETWEEN_RETRIES_SEC"
fi
done
log_error "Could not find Instance Tags for $instance_id in $instance_region after $MAX_RETRIES retries."
exit 1
}
function get_asg_size {
local -r asg_name="$1"
local -r aws_region="$2"
local asg_json=""
log_info "Looking up the size of the Auto Scaling Group $asg_name in $aws_region"
asg_json=$(aws autoscaling describe-auto-scaling-groups --region "$aws_region" --auto-scaling-group-names "$asg_name")
echo "$asg_json" | jq -r '.AutoScalingGroups[0].DesiredCapacity'
}
function get_cluster_size {
local -r instance_tags="$1"
local -r aws_region="$2"
local asg_name=""
asg_name=$(get_tag_value "$instance_tags" "$AWS_ASG_TAG_KEY")
if [[ -z "$asg_name" ]]; then
log_warn "This EC2 Instance does not appear to be part of an Auto Scaling Group, so cannot determine cluster size. Setting cluster size to 1."
echo 1
else
get_asg_size "$asg_name" "$aws_region"
fi
}
# Get the value for a specific tag from the tags JSON returned by the AWS describe-tags:
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-tags.html
function get_tag_value {
local -r tags="$1"
local -r tag_key="$2"
echo "$tags" | jq -r ".Tags[] | select(.Key == \"$tag_key\") | .Value"
}
function assert_is_installed {
local -r name="$1"
if [[ ! $(command -v ${name}) ]]; then
log_error "The binary '$name' is required by this script but is not installed or in the system's PATH."
exit 1
fi
}
function split_by_lines {
local prefix="$1"
shift
for var in "$@"; do
echo "${prefix}${var}"
done
}
function generate_consul_config {
local -r server="${1}"
local -r config_dir="${2}"
local -r user="${3}"
local -r cluster_tag_key="${4}"
local -r cluster_tag_value="${5}"
local -r datacenter="${6}"
local -r enable_gossip_encryption="${7}"
local -r gossip_encryption_key="${8}"
local -r enable_rpc_encryption="${9}"
local -r ca_path="${10}"
local -r cert_file_path="${11}"
local -r key_file_path="${12}"
local -r cleanup_dead_servers="${13}"
local -r last_contact_threshold="${14}"
local -r max_trailing_logs="${15}"
local -r server_stabilization_time="${16}"
local -r redundancy_zone_tag="${17}"
local -r disable_upgrade_migration="${18}"
local -r upgrade_version_tag=${19}
local -r config_path="$config_dir/$CONSUL_CONFIG_FILE"
shift 19
local -r recursors=("$@")
local instance_id=""
local instance_ip_address=""
local instance_region=""
local ui="false"
instance_id=$(get_instance_id)
instance_ip_address=$(get_instance_ip_address)
instance_region=$(get_instance_region)
local retry_join_json=""
if [[ -z "$cluster_tag_key" || -z "$cluster_tag_value" ]]; then
log_warn "Either the cluster tag key ($cluster_tag_key) or value ($cluster_tag_value) is empty. Will not automatically try to form a cluster based on EC2 tags."
else
retry_join_json=$(cat <<EOF
"retry_join": ["provider=aws region=$instance_region tag_key=$cluster_tag_key tag_value=$cluster_tag_value"],
EOF
)
fi
local recursors_config=""
if (( ${#recursors[@]} != 0 )); then
recursors_config="\"recursors\" : [ "
for recursor in ${recursors[@]}
do
recursors_config="${recursors_config}\"${recursor}\", "
done
recursors_config=$(echo "${recursors_config}"| sed 's/, $//')" ],"
fi
local bootstrap_expect=""
if [[ "$server" == "true" ]]; then
local instance_tags=""
local cluster_size=""
instance_tags=$(get_instance_tags "$instance_id" "$instance_region")
cluster_size=$(get_cluster_size "$instance_tags" "$instance_region")
bootstrap_expect="\"bootstrap_expect\": $cluster_size,"
ui="true"
fi
local autopilot_configuration=$(cat <<EOF
"autopilot": {
"cleanup_dead_servers": $cleanup_dead_servers,
"last_contact_threshold": "$last_contact_threshold",
"max_trailing_logs": $max_trailing_logs,
"server_stabilization_time": "$server_stabilization_time",
"redundancy_zone_tag": "$redundancy_zone_tag",
"disable_upgrade_migration": $disable_upgrade_migration,
"upgrade_version_tag": "$upgrade_version_tag"
},
EOF
)
local gossip_encryption_configuration=""
if [[ "$enable_gossip_encryption" == "true" && ! -z "$gossip_encryption_key" ]]; then
log_info "Creating gossip encryption configuration"
gossip_encryption_configuration="\"encrypt\": \"$gossip_encryption_key\","
fi
local rpc_encryption_configuration=""
if [[ "$enable_rpc_encryption" == "true" && ! -z "$ca_path" && ! -z "$cert_file_path" && ! -z "$key_file_path" ]]; then
log_info "Creating RPC encryption configuration"
rpc_encryption_configuration=$(cat <<EOF
"verify_outgoing": true,
"verify_incoming": true,
"ca_path": "$ca_path",
"cert_file": "$cert_file_path",
"key_file": "$key_file_path",
EOF
)
fi
log_info "Creating default Consul configuration"
local default_config_json=$(cat <<EOF
{
"advertise_addr": "$instance_ip_address",
"bind_addr": "$instance_ip_address",
$bootstrap_expect
"client_addr": "0.0.0.0",
"datacenter": "$datacenter",
"node_name": "$instance_id",
$recursors_config
$retry_join_json
"server": $server,
$gossip_encryption_configuration
$rpc_encryption_configuration
$autopilot_configuration
"ui": $ui
}
EOF
)
log_info "Installing Consul config file in $config_path"
echo "$default_config_json" | jq '.' > "$config_path"
chown "$user:$user" "$config_path"
}
function generate_systemd_config {
local -r systemd_config_path="$1"
local -r consul_config_dir="$2"
local -r consul_data_dir="$3"
local -r consul_systemd_stdout="$4"
local -r consul_systemd_stderr="$5"
local -r consul_bin_dir="$6"
local -r consul_user="$7"
shift 7
local -r environment=("$@")
local -r config_path="$consul_config_dir/$CONSUL_CONFIG_FILE"
log_info "Creating systemd config file to run Consul in $systemd_config_path"
local -r unit_config=$(cat <<EOF
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=$config_path
EOF
)
local -r service_config=$(cat <<EOF
[Service]
Type=notify
User=$consul_user
Group=$consul_user
ExecStart=$consul_bin_dir/consul agent -config-dir $consul_config_dir -data-dir $consul_data_dir
ExecReload=$consul_bin_dir/consul reload
KillMode=process
Restart=on-failure
TimeoutSec=300s
LimitNOFILE=65536
$(split_by_lines "Environment=" "${environment[@]}")
EOF
)
local log_config=""
if [[ ! -z $consul_systemd_stdout ]]; then
log_config+="StandardOutput=$consul_systemd_stdout\n"
fi
if [[ ! -z $consul_systemd_stderr ]]; then
log_config+="StandardError=$consul_systemd_stderr\n"
fi
local -r install_config=$(cat <<EOF
[Install]
WantedBy=multi-user.target
EOF
)
echo -e "$unit_config" > "$systemd_config_path"
echo -e "$service_config" >> "$systemd_config_path"
echo -e "$log_config" >> "$systemd_config_path"
echo -e "$install_config" >> "$systemd_config_path"
}
function start_consul {
log_info "Reloading systemd config and starting Consul"
sudo systemctl daemon-reload
sudo systemctl enable consul.service
sudo systemctl restart consul.service
}
# Based on: http://unix.stackexchange.com/a/7732/215969
function get_owner_of_path {
local -r path="$1"
ls -ld "$path" | awk '{print $3}'
}
function run {
local server="false"
local client="false"
local config_dir=""
local data_dir=""
local systemd_stdout=""
local systemd_stderr=""
local bin_dir=""
local user=""
local cluster_tag_key=""
local cluster_tag_value=""
local datacenter=""
local upgrade_version_tag=""
local enable_gossip_encryption="false"
local gossip_encryption_key=""
local enable_rpc_encryption="false"
local ca_path=""
local cert_file_path=""
local key_file_path=""
local environment=()
local skip_consul_config="false"
local recursors=()
local all_args=()
local cleanup_dead_servers="$DEFAULT_AUTOPILOT_CLEANUP_DEAD_SERVERS"
local last_contact_threshold="$DEFAULT_AUTOPILOT_LAST_CONTACT_THRESHOLD"
local max_trailing_logs="$DEFAULT_AUTOPILOT_MAX_TRAILING_LOGS"
local server_stabilization_time="$DEFAULT_AUTOPILOT_SERVER_STABILIZATION_TIME"
local redundancy_zone_tag="$DEFAULT_AUTOPILOT_REDUNDANCY_ZONE_TAG"
local disable_upgrade_migration="$DEFAULT_AUTOPILOT_DISABLE_UPGRADE_MIGRATION"
while [[ $# > 0 ]]; do
local key="$1"
case "$key" in
--server)
server="true"
;;
--client)
client="true"
;;
--config-dir)
assert_not_empty "$key" "$2"
config_dir="$2"
shift
;;
--data-dir)
assert_not_empty "$key" "$2"
data_dir="$2"
shift
;;
--systemd-stdout)
assert_not_empty "$key" "$2"
systemd_stdout="$2"
shift
;;
--systemd-stderr)
assert_not_empty "$key" "$2"
systemd_stderr="$2"
shift
;;
--bin-dir)
assert_not_empty "$key" "$2"
bin_dir="$2"
shift
;;
--user)
assert_not_empty "$key" "$2"
user="$2"
shift
;;
--cluster-tag-key)
assert_not_empty "$key" "$2"
cluster_tag_key="$2"
shift
;;
--cluster-tag-value)
assert_not_empty "$key" "$2"
cluster_tag_value="$2"
shift
;;
--datacenter)
assert_not_empty "$key" "$2"
datacenter="$2"
shift
;;
--autopilot-cleanup-dead-servers)
assert_not_empty "$key" "$2"
cleanup_dead_servers="$2"
shift
;;
--autopilot-last-contact-threshold)
assert_not_empty "$key" "$2"
last_contact_threshold="$2"
shift
;;
--autopilot-max-trailing-logs)
assert_not_empty "$key" "$2"
max_trailing_logs="$2"
shift
;;
--autopilot-server-stabilization-time)
assert_not_empty "$key" "$2"
server_stabilization_time="$2"
shift
;;
--autopilot-redundancy-zone-tag)
assert_not_empty "$key" "$2"
redundancy_zone_tag="$2"
shift
;;
--autopilot-disable-upgrade-migration)
disable_upgrade_migration="true"
shift
;;
--autopilot-upgrade-version-tag)
assert_not_empty "$key" "$2"
upgrade_version_tag="$2"
shift
;;
--enable-gossip-encryption)
enable_gossip_encryption="true"
;;
--gossip-encryption-key)
assert_not_empty "$key" "$2"
gossip_encryption_key="$2"
shift
;;
--enable-rpc-encryption)
enable_rpc_encryption="true"
;;
--ca-path)
assert_not_empty "$key" "$2"
ca_path="$2"
shift
;;
--cert-file-path)
assert_not_empty "$key" "$2"
cert_file_path="$2"
shift
;;
--key-file-path)
assert_not_empty "$key" "$2"
key_file_path="$2"
shift
;;
--environment)
assert_not_empty "$key" "$2"
environment+=("$2")
shift
;;
--skip-consul-config)
skip_consul_config="true"
;;
--recursor)
assert_not_empty "$key" "$2"
recursors+=("$2")
shift
;;
--help)
print_usage
exit
;;
*)
log_error "Unrecognized argument: $key"
print_usage
exit 1
;;
esac
shift
done
if [[ ("$server" == "true" && "$client" == "true") || ("$server" == "false" && "$client" == "false") ]]; then
log_error "Exactly one of --server or --client must be set."
exit 1
fi
assert_is_installed "systemctl"
assert_is_installed "aws"
assert_is_installed "curl"
assert_is_installed "jq"
if [[ -z "$config_dir" ]]; then
config_dir=$(cd "$SCRIPT_DIR/../config" && pwd)
fi
if [[ -z "$data_dir" ]]; then
data_dir=$(cd "$SCRIPT_DIR/../data" && pwd)
fi
# If $systemd_stdout and/or $systemd_stderr are empty, we leave them empty so that generate_systemd_config will use systemd's defaults (journal and inherit, respectively)
if [[ -z "$bin_dir" ]]; then
bin_dir=$(cd "$SCRIPT_DIR/../bin" && pwd)
fi
if [[ -z "$user" ]]; then
user=$(get_owner_of_path "$config_dir")
fi
if [[ -z "$datacenter" ]]; then
datacenter=$(get_instance_region)
fi
if [[ "$skip_consul_config" == "true" ]]; then
log_info "The --skip-consul-config flag is set, so will not generate a default Consul config file."
else
if [[ "$enable_gossip_encryption" == "true" ]]; then
assert_not_empty "--gossip-encryption-key" "$gossip_encryption_key"
fi
if [[ "$enable_rpc_encryption" == "true" ]]; then
assert_not_empty "--ca-path" "$ca_path"
assert_not_empty "--cert-file-path" "$cert_file_path"
assert_not_empty "--key_file_path" "$key_file_path"
fi
generate_consul_config "$server" \
"$config_dir" \
"$user" \
"$cluster_tag_key" \
"$cluster_tag_value" \
"$datacenter" \
"$enable_gossip_encryption" \
"$gossip_encryption_key" \
"$enable_rpc_encryption" \
"$ca_path" \
"$cert_file_path" \
"$key_file_path" \
"$cleanup_dead_servers" \
"$last_contact_threshold" \
"$max_trailing_logs" \
"$server_stabilization_time" \
"$redundancy_zone_tag" \
"$disable_upgrade_migration" \
"$upgrade_version_tag" \
"${recursors[@]}"
fi
generate_systemd_config "$SYSTEMD_CONFIG_PATH" "$config_dir" "$data_dir" "$systemd_stdout" "$systemd_stderr" "$bin_dir" "$user" "${environment[@]}"
start_consul
}
run "$@"

View File

@ -0,0 +1,144 @@
#!/bin/bash
# Configure systemd-resolved it to forward requests for a specific domain to Consul. This script has been tested
# with the following operating systems:
#
# 1. Ubuntu 18.04
# See https://learn.hashicorp.com/consul/security-networking/forwarding#systemd-resolved-setup for more details
# Github Issue: https://github.com/hashicorp/consul/issues/4155
set -e
readonly DEFAULT_CONSUL_DOMAIN="consul"
readonly DEFAULT_CONSUL_IP="127.0.0.1"
readonly DEFAULT_CONSUL_DNS_PORT=8600
readonly SYSTEMD_RESVOLDED_CONFIG_FILE="/etc/systemd/resolved.conf"
readonly SCRIPT_NAME="$(basename "$0")"
function print_usage {
echo
echo "Usage: setup-systemd-resolved [OPTIONS]"
echo
echo "Configure systemd-resolved to forward requests for a specific domain to Consul. This script has been tested with Ubuntu 18.04."
echo
echo "Options:"
echo
echo -e " --consul-domain\tThe domain name to point to Consul. Optional. Default: $DEFAULT_CONSUL_DOMAIN."
echo -e " --consul-ip\t\tThe IP address to use for Consul. Optional. Default: $DEFAULT_CONSUL_IP."
echo -e " --consul-dns-port\tThe port Consul uses for DNS. Optional. Default: $DEFAULT_CONSUL_DNS_PORT."
echo
echo "Example:"
echo
echo " setup-systemd-resolved"
}
function log {
local -r level="$1"
local -r message="$2"
local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S")
>&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}"
}
function log_info {
local -r message="$1"
log "INFO" "$message"
}
function log_warn {
local -r message="$1"
log "WARN" "$message"
}
function log_error {
local -r message="$1"
log "ERROR" "$message"
}
function assert_not_empty {
local -r arg_name="$1"
local -r arg_value="$2"
if [[ -z "$arg_value" ]]; then
log_error "The value for '$arg_name' cannot be empty"
print_usage
exit 1
fi
}
function install_dependencies {
local -r consul_ip="$1"
log_info "Installing dependencies"
sudo apt-get update -y
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
sudo apt-get install -y iptables-persistent
}
function configure_systemd_resolved {
local -r consul_domain="$1"
local -r consul_ip="$2"
local -r consul_port="$3"
UBUNTU_VERSION=`lsb_release -s -r`
if [ "$UBUNTU_VERSION" == "18.04" ]; then
log_info "Configuring systemd-resolved to forward lookups of the '$consul_domain' domain to $consul_ip:$consul_port in $CONSUL_DNS_MASQ_CONFIG_FILE"
sudo iptables -t nat -A OUTPUT -d localhost -p udp -m udp --dport 53 -j REDIRECT --to-ports $consul_port
sudo iptables -t nat -A OUTPUT -d localhost -p tcp -m tcp --dport 53 -j REDIRECT --to-ports $consul_port
sudo iptables-save | sudo tee /etc/iptables/rules.v4
sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
sudo sed -i "s/#DNS=/DNS=${consul_ip}/g" "$SYSTEMD_RESVOLDED_CONFIG_FILE"
sudo sed -i "s/#Domains=/Domains=~${consul_domain}/g" "$SYSTEMD_RESVOLDED_CONFIG_FILE"
else
log_error "Cannot install on this OS."
exit 1
fi
}
function install {
local consul_domain="$DEFAULT_CONSUL_DOMAIN"
local consul_ip="$DEFAULT_CONSUL_IP"
local consul_dns_port="$DEFAULT_CONSUL_DNS_PORT"
while [[ $# > 0 ]]; do
local key="$1"
case "$key" in
--consul-domain)
assert_not_empty "$key" "$2"
consul_domain="$2"
shift
;;
--consul-ip)
assert_not_empty "$key" "$2"
consul_ip="$2"
shift
;;
--consul-dns-port)
assert_not_empty "$key" "$2"
consul_dns_port="$2"
shift
;;
--help)
print_usage
exit
;;
*)
log_error "Unrecognized argument: $key"
print_usage
exit 1
;;
esac
shift
done
log_info "Configuring systemd-resolved"
install_dependencies
configure_systemd_resolved "$consul_domain" "$consul_ip" "$consul_dns_port"
log_info "systemd-resolved configured!"
}
install "$@"

View File

@ -0,0 +1,7 @@
{
"datacenter": "us-east-2",
"telemetry": {
"dogstatsd_addr": "127.0.0.1:8125"
}
}

View File

@ -0,0 +1,6 @@
## Load Test AMI
This AMI will be used for all load test servers. Currently it copies the `/scripts` and installs [k6](https://k6.io), so if any additional files are desired place them in that directory.
# How to use
1) Set the AWS region in the `loadtest.json` file
2) Run the command `packer build loadtest.json`

View File

@ -0,0 +1,41 @@
{
"min_packer_version": "1.5.4",
"variables": {
"aws_region": "{{env `AWS_DEFAULT_REGION`}}"
},
"builders": [{
"name": "ubuntu18-ami",
"ami_name": "consul-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}",
"ami_description": "An Ubuntu 18.04 AMI that has hey installed.",
"instance_type": "t2.micro",
"region": "{{user `aws_region`}}",
"associate_public_ip_address": true,
"type": "amazon-ebs",
"source_ami_filter": {
"filters": {
"virtualization-type": "hvm",
"architecture": "x86_64",
"name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*",
"block-device-mapping.volume-type": "gp2",
"root-device-type": "ebs"
},
"owners": ["099720109477"],
"most_recent": true
},
"ssh_username": "ubuntu"
}],
"provisioners": [{
"type": "shell",
"inline": ["mkdir -p /home/ubuntu/scripts/"]
},{
"type": "file",
"source": "{{template_dir}}/scripts",
"destination": "/home/ubuntu",
"pause_before": "30s"
},{
"type": "shell",
"execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'",
"script": "./scripts/install-k6.sh"
}]
}

View File

@ -0,0 +1,17 @@
#!/bin/bash -e
# set new limit
echo "fs.file-max = 2097152" >> /etc/sysctl.conf
ulimit -Sn 100000
sysctl -p
# download k6
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61
echo "deb https://dl.bintray.com/loadimpact/deb stable main" | sudo tee -a /etc/apt/sources.list
sudo apt-get update
sudo apt-get install k6
# move service file
mv /home/ubuntu/scripts/loadtest.service /etc/systemd/system/loadtest.service
chmod 755 /home/ubuntu/scripts/puts_script.js
chmod 755 /home/ubuntu/scripts/run-k6.sh

View File

@ -0,0 +1,7 @@
[Unit]
Description=Execute run-k6.
[Service]
Type=simple
ExecStart=/bin/bash -c 'exec /home/ubuntu/scripts/run-k6.sh'
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,25 @@
import http from 'k6/http';
import { uuidv4 } from "https://jslib.k6.io/k6-utils/1.0.0/index.js";
export default function() {
const key = uuidv4();
const ipaddress = `http://${__ENV.LB_ENDPOINT}:8500`;
const uri = '/v1/kv/';
const value = { data: uuidv4() };
const address = `${ipaddress + uri + key}`
const res = http.put(address, JSON.stringify(value));
console.log(JSON.parse(res.body));
}
export let options = {
// 1 virtual user
vus: 100,
// 1 minute
duration: "15m",
// 95% of requests must complete below 0.280s
thresholds: { http_req_duration: ["p(95)<280"] },
};

View File

@ -0,0 +1,3 @@
#!/bin/bash
k6 run /home/ubuntu/scripts/puts_script.js

37
test/load/terraform/.gitignore vendored Normal file
View File

@ -0,0 +1,37 @@
keys/
*.pem
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
*.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc

View File

@ -0,0 +1,33 @@
## Terraform Consul Load Testing
# How to use
1. Build an image with the desired Consul version and a loadtest image in the Packer folder here.
2. Create your own `vars.tfvars` file in this directory.
3. Place the appropriate AMI IDs in the `consul_ami_id` and `test_server_ami` variables, here is an example of a `vars.tfvars`:
```
vpc_name = "consul-test-vpc"
vpc_cidr = "11.0.0.0/16"
public_subnet_cidrs = ["11.0.1.0/24", "11.0.3.0/24"]
private_subnet_cidrs = ["11.0.2.0/24"]
vpc_az = ["us-east-2a", "us-east-2b"]
test_instance_type = "t2.micro"
## This is found from building the image in packer/loadtest-ami
test_server_ami = "ami-0ad7711e837ebe166"
cluster_name = "ctest"
test_public_ip = "true"
instance_type = "t2.micro"
ami_owners = ["******"]
## This is found from building the image in packer/consul-ami
consul_ami_id = "ami-016d80ff5472346f0"
```
4. AWS Variables are set off of environment variables. Make sure to export nessecary variables [shown here](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables).
5. Run `terraform plan -var-file=vars.tfvars`, and then `terraform apply -var-file=vars.tfvars` when ready.
6. Upon completion k6 should run and push metrics to desired Datadog dashboard.
# Customization
All customization for infrastructure that is available can be found by looking through the `variables.tf` file. However, if customization of tests is desired then the `start-k6.sh` leverages user-data to place a `puts_script.js` onto the loadtest servers for k6 to run. This can be customized.
# How to SSH
After `terraform apply` is ran Terraform should create a `keys/` directory which will give access to all instances created.

View File

@ -0,0 +1,100 @@
data "aws_ami" "consul" {
most_recent = true
owners = var.ami_owners
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "is-public"
values = ["false"]
}
filter {
name = "name"
values = ["consul-ubuntu-*"]
}
}
# ---------------------------------------------------------------------------------------------------------------------
# Deploy consul cluster
# ---------------------------------------------------------------------------------------------------------------------
module "consul" {
source = "hashicorp/consul/aws"
version = "0.7.9"
depends_on = [module.vpc.vpc_id]
ami_id = var.consul_ami_id
ssh_key_name = module.keys.key_name
vpc_id = module.vpc.vpc_id
cluster_name = var.cluster_name
num_clients = var.num_clients
num_servers = var.num_servers
}
# ---------------------------------------------------------------------------------------------------------------------
# This script will configure and start Consul agents
# ---------------------------------------------------------------------------------------------------------------------
data "template_file" "user_data_server" {
template = file("${path.module}/user-data-server.sh")
vars = {
cluster_tag_key = var.cluster_tag_key
cluster_tag_value = var.cluster_name
}
}
data "template_file" "user_data_client" {
template = file("${path.module}/user-data-client.sh")
vars = {
cluster_tag_key = var.cluster_tag_key
cluster_tag_value = var.cluster_name
}
}
#
# Set up ALB for test-servers to talk to consul clients
#
module "alb" {
source = "terraform-aws-modules/alb/aws"
version = "~> 5.0"
name = "${var.cluster_name}-${local.random_name}-alb"
load_balancer_type = "application"
vpc_id = module.vpc.vpc_id
subnets = module.vpc.public_subnets
security_groups = [module.consul.security_group_id_clients]
internal = true
target_groups = [
{
#name_prefix has a six char limit
name_prefix = "test-"
backend_protocol = "HTTP"
backend_port = 8500
target_type = "instance"
}
]
http_tcp_listeners = [
{
port = 8500
protocol = "HTTP"
target_group_index = 0
}
]
}
# Attach ALB to Consul clients
resource "aws_autoscaling_attachment" "asg_attachment_bar" {
autoscaling_group_name = module.consul.asg_name_clients
alb_target_group_arn = module.alb.target_group_arns[0]
}

View File

@ -0,0 +1,37 @@
terraform {
required_version = ">= 0.13"
}
# ---------------------------------------------------------------------------------------------------------------------
# Create variables and ssh keys
# ---------------------------------------------------------------------------------------------------------------------
resource "random_pet" "test" {
}
locals {
random_name = "${var.cluster_name}-${random_pet.test.id}"
}
module "keys" {
name = local.random_name
path = "${path.root}/keys"
source = "mitchellh/dynamic-keys/aws"
version = "v2.0.0"
}
# ---------------------------------------------------------------------------------------------------------------------
# Create VPC with public and also private subnets
# ---------------------------------------------------------------------------------------------------------------------
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.21.0"
name = "${local.random_name}-${var.vpc_name}"
cidr = var.vpc_cidr
azs = var.vpc_az
public_subnets = var.public_subnet_cidrs
private_subnets = var.private_subnet_cidrs
enable_nat_gateway = true
}

View File

View File

@ -0,0 +1,8 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}

View File

@ -0,0 +1,5 @@
#!/bin/bash
echo "LB_ENDPOINT=${lb_endpoint}" >> /etc/environment
systemctl start loadtest

View File

@ -0,0 +1,62 @@
# ---------------------------------------------------------------------------------------------------------------------
# Start up test servers to run tests from
# ---------------------------------------------------------------------------------------------------------------------
resource "aws_security_group" "test-servers" {
name = "${local.random_name}-test-server-sg"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 8500
to_port = 8500
security_groups = [module.consul.security_group_id_clients]
protocol = "6"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "6"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_launch_configuration" "test-servers" {
name_prefix = "${var.cluster_name}-${local.random_name}-test-"
image_id = var.test_server_ami
instance_type = var.test_instance_type
key_name = module.keys.key_name
security_groups = [aws_security_group.test-servers.id]
associate_public_ip_address = var.test_public_ip
lifecycle {
create_before_destroy = true
}
user_data = templatefile(
"./start-k6.sh",
{
lb_endpoint = module.alb.this_lb_dns_name
}
)
}
resource "aws_autoscaling_group" "test-servers" {
name = aws_launch_configuration.test-servers.name
launch_configuration = aws_launch_configuration.test-servers.id
min_size = 2
max_size = 5
desired_capacity = 2
wait_for_capacity_timeout = "480s"
health_check_grace_period = 15
health_check_type = "EC2"
vpc_zone_identifier = module.vpc.public_subnets
lifecycle {
create_before_destroy = true
}
}

View File

@ -0,0 +1,16 @@
#!/bin/bash
# SOURCE: GRUNTWORKS
# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the
# run-consul script to configure and start Consul in client mode. Note that this script assumes it's running in an AMI
# built from the Packer template in examples/consul-ami/consul.json.
set -e
# Send the log output from this script to user-data.log, syslog, and the console
# From: https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
# These variables are passed in via Terraform template interplation
/opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}"
# You could add commands to boot your other apps here

View File

@ -0,0 +1,14 @@
#!/bin/bash
# SOURCE: GRUNTWORKS
# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the
# run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI
# built from the Packer template in examples/consul-ami/consul.json.
set -e
# Send the log output from this script to user-data.log, syslog, and the console
# From: https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
# These variables are passed in via Terraform template interplation
/opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}"

View File

@ -0,0 +1,113 @@
# ---------------------------------------------------------------------------------------------------------------------
# ENVIRONMENT VARIABLES
# Define these secrets as environment variables
# ---------------------------------------------------------------------------------------------------------------------
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# AWS_DEFAULT_REGION
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------
variable "consul_ami_id" {
description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/consul-ami/consul.json. To keep this example simple, we run the same AMI on both server and client nodes, but in real-world usage, your client nodes would also run your apps. If the default value is used, Terraform will look up the latest AMI build automatically."
type = string
default = null
}
variable "cluster_name" {
description = "What to name the Consul cluster and all of its associated resources"
type = string
default = "consul-example"
}
variable "num_servers" {
description = "The number of Consul server nodes to deploy. We strongly recommend using 3 or 5."
type = number
default = 3
}
variable "num_clients" {
description = "The number of Consul client nodes to deploy. You typically run the Consul client alongside your apps, so set this value to however many Instances make sense for your app code."
type = number
default = 2
}
variable "cluster_tag_key" {
description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster."
type = string
default = "consul-servers"
}
variable "ssh_key_name" {
description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair."
type = string
default = null
}
variable "vpc_id" {
description = "The ID of the VPC in which the nodes will be deployed. Uses default VPC if not supplied."
type = string
default = null
}
variable "spot_price" {
description = "The maximum hourly price to pay for EC2 Spot Instances."
type = number
default = null
}
variable "vpc_az" {
type = list(string)
description = "VPC Availability Zone"
validation {
condition = length(var.vpc_az) == 2
error_message = "VPC needs at least two Availability Zones for ALB to work."
}
}
variable "vpc_name" {
description = "Name of the VPC"
}
variable "vpc_cidr" {
description = "List of CIDR blocks for the VPC module"
}
variable "public_subnet_cidrs" {
type = list(string)
description = "CIDR Block for the Public Subnet, must be within VPC CIDR range"
}
variable "private_subnet_cidrs" {
type = list(string)
description = "CIDR Block for the Private Subnet, must be within VPC CIDR range"
}
variable "test_server_ami" {
type = string
description = "The AMI ID from the Packer generated image"
}
variable "test_instance_type" {
type = string
description = "AWS Instance type for all test servers"
}
variable "test_public_ip" {
type = bool
description = "Should the test servers have a public IP?"
}
variable "instance_type" {
type = string
description = "Instance Type for all instances in the Consul Cluster"
}
variable "ami_owners" {
type = list(string)
description = "The account owner number which the desired AMI is in"
}