Merge pull request #5012 from hashicorp/f-e2e-provisioning

Terraform configs for e2e tests
This commit is contained in:
Preetha 2018-12-18 13:45:58 -06:00 committed by GitHub
commit 6c51232f55
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 966 additions and 0 deletions

3
.gitignore vendored
View file

@ -88,3 +88,6 @@ command/agent/bindata_assetfs.go
# auto-generated cert file for Terraform/Azure
azure-hashistack.pem
# generated keys for e2e tests
e2e/terraform/keys/

13
e2e/terraform/README.md Normal file
View file

@ -0,0 +1,13 @@
Terraform provisioner for end to end tests
==========================================
This folder contains terraform resources for provisioning a nomad cluster on AWS for end to end tests.
It uses a nomad binary identified by its commit SHA that's stored in a shared s3 bucket that Nomad team
developers can access.
```
$ cd e2e/terraform/
$ TF_VAR_nomad_sha=<nomad_sha> terraform apply
```
After this step, you should have a nomad client address to point the end to end tests in the `e2e` folder to.

124
e2e/terraform/compute.tf Normal file
View file

@ -0,0 +1,124 @@
data "template_file" "user_data_server" {
template = "${file("${path.root}/user-data-server.sh")}"
vars {
server_count = "${var.server_count}"
region = "${var.region}"
retry_join = "${var.retry_join}"
}
}
data "template_file" "user_data_client" {
template = "${file("${path.root}/user-data-client.sh")}"
count = "${var.client_count}"
vars {
region = "${var.region}"
retry_join = "${var.retry_join}"
}
}
data "template_file" "nomad_client_config" {
template = "${file("${path.root}/configs/client.hcl")}"
}
data "template_file" "nomad_server_config" {
template = "}"
}
resource "aws_instance" "server" {
ami = "${data.aws_ami.main.image_id}"
instance_type = "${var.instance_type}"
key_name = "${module.keys.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.server_count}"
# Instance tags
tags {
Name = "${local.random_name}-server-${count.index}"
ConsulAutoJoin = "auto-join"
}
user_data = "${data.template_file.user_data_server.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
provisioner "file" {
content = "${file("${path.root}/configs/${var.indexed == false ? "server.hcl" : "indexed/server-${count.index}.hcl"}")}"
destination = "/tmp/server.hcl"
connection {
user = "ubuntu"
private_key = "${module.keys.private_key_pem}"
}
}
provisioner "remote-exec" {
inline = [
"aws s3 cp s3://nomad-team-test-binary/builds-oss/${var.nomad_sha}.tar.gz nomad.tar.gz",
"sudo cp /ops/shared/config/nomad.service /etc/systemd/system/nomad.service",
"sudo tar -zxvf nomad.tar.gz -C /usr/local/bin/",
"sudo cp /tmp/server.hcl /etc/nomad.d/nomad.hcl",
"sudo chmod 0755 /usr/local/bin/nomad",
"sudo chown root:root /usr/local/bin/nomad",
"sudo systemctl start nomad.service"
]
connection {
user = "ubuntu"
private_key = "${module.keys.private_key_pem}"
}
}
}
resource "aws_instance" "client" {
ami = "${data.aws_ami.main.image_id}"
instance_type = "${var.instance_type}"
key_name = "${module.keys.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.client_count}"
depends_on = ["aws_instance.server"]
# Instance tags
tags {
Name = "${local.random_name}-client-${count.index}"
ConsulAutoJoin = "auto-join"
}
ebs_block_device = {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
}
user_data = "${element(data.template_file.user_data_client.*.rendered, count.index)}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
provisioner "file" {
content = "${file("${path.root}/configs/${var.indexed == false ? "client.hcl" : "indexed/client-${count.index}.hcl"}")}"
destination = "/tmp/client.hcl"
connection {
user = "ubuntu"
private_key = "${module.keys.private_key_pem}"
}
}
provisioner "remote-exec" {
inline = [
"aws s3 cp s3://nomad-team-test-binary/builds-oss/${var.nomad_sha}.tar.gz nomad.tar.gz",
"sudo tar -zxvf nomad.tar.gz -C /usr/local/bin/",
"sudo cp /ops/shared/config/nomad.service /etc/systemd/system/nomad.service",
"sudo cp /tmp/client.hcl /etc/nomad.d/nomad.hcl",
"sudo chmod 0755 /usr/local/bin/nomad",
"sudo chown root:root /usr/local/bin/nomad",
"sudo systemctl start nomad.service"
]
connection {
user = "ubuntu"
private_key = "${module.keys.private_key_pem}"
}
}
}

View file

@ -0,0 +1,20 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
}

View file

@ -0,0 +1,23 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
meta {
"rack" = "r1"
}
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
}

View file

@ -0,0 +1,23 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
meta {
"rack" = "r2"
}
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
}

View file

@ -0,0 +1,23 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
datacenter = "dc2"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
meta {
"rack" = "r1"
}
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
}

View file

@ -0,0 +1,23 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
datacenter = "dc2"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
meta {
"rack" = "r2"
}
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
}

View file

@ -0,0 +1,21 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the server
server {
enabled = true
bootstrap_expect = 3
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

View file

@ -0,0 +1,21 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the server
server {
enabled = true
bootstrap_expect = 3
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

View file

@ -0,0 +1,21 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the server
server {
enabled = true
bootstrap_expect = 3
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

View file

@ -0,0 +1,21 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the server
server {
enabled = true
bootstrap_expect = 3
}
consul {
address = "127.0.0.1:8500"
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

64
e2e/terraform/iam.tf Normal file
View file

@ -0,0 +1,64 @@
resource "aws_iam_instance_profile" "instance_profile" {
name_prefix = "${local.random_name}"
role = "${aws_iam_role.instance_role.name}"
}
resource "aws_iam_role" "instance_role" {
name_prefix = "${local.random_name}"
assume_role_policy = "${data.aws_iam_policy_document.instance_role.json}"
}
data "aws_iam_policy_document" "instance_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy" "auto_discover_cluster" {
name = "auto-discover-cluster"
role = "${aws_iam_role.instance_role.id}"
policy = "${data.aws_iam_policy_document.auto_discover_cluster.json}"
}
# Note: Overloading this instance profile to access
# test binaries, should be renamed.
data "aws_iam_policy_document" "auto_discover_cluster" {
statement {
effect = "Allow"
actions = [
"ec2:DescribeInstances",
"ec2:DescribeTags",
"autoscaling:DescribeAutoScalingGroups",
]
resources = ["*"]
}
statement {
effect = "Allow"
actions = [
"ec2:DescribeInstances",
"ec2:DescribeTags",
"autoscaling:DescribeAutoScalingGroups",
]
resources = ["*"]
}
statement {
effect = "Allow"
actions = [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
]
resources = ["arn:aws:s3:::nomad-team-test-binary/*"]
}
}

74
e2e/terraform/main.tf Normal file
View file

@ -0,0 +1,74 @@
variable "name" {
description = "Used to name various infrastructure components"
default = "nomad-e2e"
}
variable "region" {
description = "The AWS region to deploy to."
default = "us-east-1"
}
variable "indexed" {
description = "Different configurations per client/server"
default = true
}
variable "instance_type" {
description = "The AWS instance type to use for both clients and servers."
default = "t2.medium"
}
variable "server_count" {
description = "The number of servers to provision."
default = "3"
}
variable "client_count" {
description = "The number of clients to provision."
default = "4"
}
variable "retry_join" {
description = "Used by Consul to automatically form a cluster."
default = "provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"
}
variable "nomad_sha" {
description = "The sha of Nomad to run"
}
provider "aws" {
region = "${var.region}"
}
resource "random_pet" "e2e" {}
locals {
random_name = "${var.name}-${random_pet.e2e.id}"
}
# Generates keys to use for provisioning and access
module "keys" {
name = "nomad-e2e-${local.random_name}"
path = "${path.root}/keys"
source = "mitchellh/dynamic-keys/aws"
}
data "aws_ami" "main" {
most_recent = true
owners = ["self"]
filter {
name = "name"
values = ["nomad-e2e-*"]
}
}
output "servers" {
value = "${aws_instance.server.*.public_ip}"
}
output "clients" {
value = "${aws_instance.client.*.public_ip}"
}

78
e2e/terraform/network.tf Normal file
View file

@ -0,0 +1,78 @@
data "aws_vpc" "default" {
default = true
}
resource "aws_security_group" "primary" {
name = "${local.random_name}"
vpc_id = "${data.aws_vpc.default.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Nomad
ingress {
from_port = 4646
to_port = 4646
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Fabio
ingress {
from_port = 9998
to_port = 9999
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Consul
ingress {
from_port = 8500
to_port = 8500
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# HDFS NameNode UI
ingress {
from_port = 50070
to_port = 50070
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# HDFS DataNode UI
ingress {
from_port = 50075
to_port = 50075
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Spark history server UI
ingress {
from_port = 18080
to_port = 18080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

28
e2e/terraform/packer.json Normal file
View file

@ -0,0 +1,28 @@
{
"builders": [{
"type": "amazon-ebs",
"region": "us-east-1",
"source_ami": "ami-80861296",
"instance_type": "t2.medium",
"ssh_username": "ubuntu",
"ami_name": "nomad-e2e-{{timestamp}}",
"ami_groups": ["all"]
}],
"provisioners": [
{
"type": "shell",
"inline": [
"sudo mkdir /ops",
"sudo chmod 777 /ops"
]
},
{
"type": "file",
"source": "shared",
"destination": "/ops"
},
{
"type": "shell",
"script": "shared/scripts/setup.sh"
}]
}

View file

@ -0,0 +1,14 @@
{
"log_level": "INFO",
"server": true,
"ui": true,
"data_dir": "/opt/consul/data",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"advertise_addr": "IP_ADDRESS",
"bootstrap_expect": SERVER_COUNT,
"service": {
"name": "consul"
},
"retry_join": ["RETRY_JOIN"]
}

View file

@ -0,0 +1,16 @@
[Unit]
Description=Consul Agent
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -dns-port="53" -recursor="172.31.0.2"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,16 @@
[Unit]
Description=Consul Agent
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -dns-port="53" -recursor="168.63.129.16"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,9 @@
{
"ui": true,
"log_level": "INFO",
"data_dir": "/opt/consul/data",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"advertise_addr": "IP_ADDRESS",
"retry_join": ["RETRY_JOIN"]
}

View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hdfs.service.consul/</value>
</property>
</configuration>

View file

@ -0,0 +1,15 @@
[Unit]
Description=Nomad Agent
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
ExecStart=/usr/local/bin/nomad agent -config="/etc/nomad.d/nomad.hcl"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,12 @@
backend "consul" {
path = "vault/"
address = "IP_ADDRESS:8500"
cluster_addr = "https://IP_ADDRESS:8201"
redirect_addr = "http://IP_ADDRESS:8200"
}
listener "tcp" {
address = "IP_ADDRESS:8200"
cluster_address = "IP_ADDRESS:8201"
tls_disable = 1
}

View file

@ -0,0 +1,16 @@
[Unit]
Description=Vault Agent
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
Environment=GOMAXPROCS=nproc
ExecStart=/usr/local/bin/vault server -config="/etc/vault.d/vault.hcl"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,57 @@
#!/bin/bash
set -e
CONFIGDIR=/ops/shared/config
CONSULCONFIGDIR=/etc/consul.d
NOMADCONFIGDIR=/etc/nomad.d
HADOOP_VERSION=hadoop-2.7.6
HADOOPCONFIGDIR=/usr/local/$HADOOP_VERSION/etc/hadoop
HOME_DIR=ubuntu
# Wait for network
sleep 15
# IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
IP_ADDRESS="$(/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')"
DOCKER_BRIDGE_IP_ADDRESS=(`ifconfig docker0 2>/dev/null|awk '/inet addr:/ {print $2}'|sed 's/addr://'`)
CLOUD=$1
RETRY_JOIN=$2
# Consul
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul_client.json
sed -i "s/RETRY_JOIN/$RETRY_JOIN/g" $CONFIGDIR/consul_client.json
sudo cp $CONFIGDIR/consul_client.json $CONSULCONFIGDIR/consul.json
sudo cp $CONFIGDIR/consul_$CLOUD.service /etc/systemd/system/consul.service
sudo systemctl start consul.service
sleep 10
2export NOMAD_ADDR=http://$IP_ADDRESS:4646
# Add hostname to /etc/hosts
echo "127.0.0.1 $(hostname)" | sudo tee --append /etc/hosts
# Add Docker bridge network IP to /etc/resolv.conf (at the top)
echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | sudo tee /etc/resolv.conf.new
cat /etc/resolv.conf | sudo tee --append /etc/resolv.conf.new
sudo mv /etc/resolv.conf.new /etc/resolv.conf
# Hadoop config file to enable HDFS CLI
sudo cp $CONFIGDIR/core-site.xml $HADOOPCONFIGDIR
# Move examples directory to $HOME
sudo mv /ops/examples /home/$HOME_DIR
sudo chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/examples
sudo chmod -R 775 /home/$HOME_DIR/examples
# Set env vars for tool CLIs
echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre" | sudo tee --append /home/$HOME_DIR/.bashrc
# Update PATH
echo "export PATH=$PATH:/usr/local/bin/spark/bin:/usr/local/$HADOOP_VERSION/bin" | sudo tee --append /home/$HOME_DIR/.bashrc

View file

@ -0,0 +1,70 @@
#!/bin/bash
set -e
CONFIGDIR=/ops/shared/config
CONSULCONFIGDIR=/etc/consul.d
VAULTCONFIGDIR=/etc/vault.d
NOMADCONFIGDIR=/etc/nomad.d
HADOOP_VERSION=hadoop-2.7.6
HADOOPCONFIGDIR=/usr/local/$HADOOP_VERSION/etc/hadoop
HOME_DIR=ubuntu
# Wait for network
sleep 15
# IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
IP_ADDRESS="$(/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')"
DOCKER_BRIDGE_IP_ADDRESS=(`ifconfig docker0 2>/dev/null|awk '/inet addr:/ {print $2}'|sed 's/addr://'`)
CLOUD=$1
SERVER_COUNT=$2
RETRY_JOIN=$3
# Consul
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
sed -i "s/RETRY_JOIN/$RETRY_JOIN/g" $CONFIGDIR/consul.json
sudo cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
sudo cp $CONFIGDIR/consul_$CLOUD.service /etc/systemd/system/consul.service
sudo systemctl start consul.service
sleep 10
export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
export CONSUL_RPC_ADDR=$IP_ADDRESS:8400
# Vault
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/vault.hcl
sudo cp $CONFIGDIR/vault.hcl $VAULTCONFIGDIR
sudo cp $CONFIGDIR/vault.service /etc/systemd/system/vault.service
sudo systemctl start vault.service
export NOMAD_ADDR=http://$IP_ADDRESS:4646
# Add hostname to /etc/hosts
echo "127.0.0.1 $(hostname)" | sudo tee --append /etc/hosts
# Add Docker bridge network IP to /etc/resolv.conf (at the top)
echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | sudo tee /etc/resolv.conf.new
cat /etc/resolv.conf | sudo tee --append /etc/resolv.conf.new
sudo mv /etc/resolv.conf.new /etc/resolv.conf
# Hadoop
sudo cp $CONFIGDIR/core-site.xml $HADOOPCONFIGDIR
# Move examples directory to $HOME
sudo mv /ops/examples /home/$HOME_DIR
sudo chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/examples
sudo chmod -R 775 /home/$HOME_DIR/examples
# Set env vars for tool CLIs
echo "export CONSUL_RPC_ADDR=$IP_ADDRESS:8400" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre" | sudo tee --append /home/$HOME_DIR/.bashrc
# Update PATH
echo "export PATH=$PATH:/usr/local/bin/spark/bin:/usr/local/$HADOOP_VERSION/bin" | sudo tee --append /home/$HOME_DIR/.bashrc

View file

@ -0,0 +1,137 @@
#!/bin/bash
set -e
# Disable interactive apt prompts
export DEBIAN_FRONTEND=noninteractive
cd /ops
CONFIGDIR=/ops/shared/config
CONSULVERSION=1.3.1
CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
CONSULCONFIGDIR=/etc/consul.d
CONSULDIR=/opt/consul
VAULTVERSION=0.11.4
VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
VAULTCONFIGDIR=/etc/vault.d
VAULTDIR=/opt/vault
NOMADCONFIGDIR=/etc/nomad.d
NOMADDIR=/opt/nomad
HADOOP_VERSION=2.7.6
# Dependencies
sudo apt-get install -y software-properties-common
sudo apt-get update
sudo apt-get install -y unzip tree redis-tools jq curl tmux awscli
# Numpy (for Spark)
sudo apt-get install -y python-setuptools
sudo easy_install pip
sudo pip install numpy
# Disable the firewall
sudo ufw disable || echo "ufw not installed"
# Consul
curl -L $CONSULDOWNLOAD > consul.zip
## Install
sudo unzip consul.zip -d /usr/local/bin
sudo chmod 0755 /usr/local/bin/consul
sudo chown root:root /usr/local/bin/consul
## Configure
sudo mkdir -p $CONSULCONFIGDIR
sudo chmod 755 $CONSULCONFIGDIR
sudo mkdir -p $CONSULDIR
sudo chmod 755 $CONSULDIR
# Vault
curl -L $VAULTDOWNLOAD > vault.zip
## Install
sudo unzip vault.zip -d /usr/local/bin
sudo chmod 0755 /usr/local/bin/vault
sudo chown root:root /usr/local/bin/vault
## Configure
sudo mkdir -p $VAULTCONFIGDIR
sudo chmod 755 $VAULTCONFIGDIR
sudo mkdir -p $VAULTDIR
sudo chmod 755 $VAULTDIR
## Install
sudo unzip nomad.zip -d /usr/local/bin
sudo chmod 0755 /usr/local/bin/nomad
sudo chown root:root /usr/local/bin/nomad
## Configure
sudo mkdir -p $NOMADCONFIGDIR
sudo chmod 755 $NOMADCONFIGDIR
sudo mkdir -p $NOMADDIR
sudo chmod 755 $NOMADDIR
# Docker
distro=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
sudo apt-get install -y apt-transport-https ca-certificates gnupg2
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/${distro} $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y docker-ce
# rkt
VERSION=1.29.0
DOWNLOAD=https://github.com/rkt/rkt/releases/download/v${VERSION}/rkt-v${VERSION}.tar.gz
function install_rkt() {
wget -q -O /tmp/rkt.tar.gz "${DOWNLOAD}"
tar -C /tmp -xvf /tmp/rkt.tar.gz
sudo mv /tmp/rkt-v${VERSION}/rkt /usr/local/bin
sudo mv /tmp/rkt-v${VERSION}/*.aci /usr/local/bin
}
function configure_rkt_networking() {
sudo mkdir -p /etc/rkt/net.d
sudo bash -c 'cat << EOT > /etc/rkt/net.d/99-network.conf
{
"name": "default",
"type": "ptp",
"ipMasq": false,
"ipam": {
"type": "host-local",
"subnet": "172.16.28.0/24",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
}
EOT'
}
install_rkt
configure_rkt_networking
# Java
sudo add-apt-repository -y ppa:openjdk-r/ppa
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk
JAVA_HOME=$(readlink -f /usr/bin/java | sed "s:bin/java::")
# Spark
sudo wget -P /ops/examples/spark https://s3.amazonaws.com/nomad-spark/spark-2.2.0-bin-nomad-0.7.0.tgz
sudo tar -xvf /ops/examples/spark/spark-2.2.0-bin-nomad-0.7.0.tgz --directory /ops/examples/spark
sudo mv /ops/examples/spark/spark-2.2.0-bin-nomad-0.7.0 /usr/local/bin/spark
sudo chown -R root:root /usr/local/bin/spark
# Hadoop (to enable the HDFS CLI)
wget -O - http://apache.mirror.iphh.net/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz | sudo tar xz -C /usr/local/

View file

@ -0,0 +1,4 @@
region = "us-east-1"
instance_type = "t2.medium"
server_count = "3"
client_count = "4"

View file

@ -0,0 +1,6 @@
#!/bin/bash
set -e
exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
sudo bash /ops/shared/scripts/client.sh "aws" "${retry_join}"

View file

@ -0,0 +1,6 @@
#!/bin/bash
set -e
exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
sudo bash /ops/shared/scripts/server.sh "aws" "${server_count}" "${retry_join}"