cleanup Vagrantfile, Terraform configs and Nomad job files

This commit is contained in:
Rob Genova 2017-06-25 11:10:14 -07:00
parent dbbaea9e09
commit 7a938fde20
6 changed files with 141 additions and 110 deletions

View File

@ -1,14 +1,9 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/trusty64"
# config.vm.network :forwarded_port, guest: 22, host: 1234
config.vm.provision "shell", inline: <<-SHELL
cd /tmp

View File

@ -1,25 +1,25 @@
variable "region" {
description = "The AWS region to deploy to."
default = "us-east-1"
default = "us-east-1"
}
variable "ami" { }
variable "ami" {}
variable "instance_type" {
description = "The AWS instance type to use for both clients and servers."
default = "t2.medium"
default = "t2.medium"
}
variable "key_name" { }
variable "key_name" {}
variable "server_count" {
description = "The number of servers to provision."
default = "3"
default = "3"
}
variable "client_count" {
description = "The number of clients to provision."
default = "4"
default = "4"
}
variable "cluster_tag_value" {
@ -32,19 +32,29 @@ provider "aws" {
}
module "hashistack" {
source = "../../modules/hashistack"
region = "${var.region}"
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
server_count = "${var.server_count}"
client_count = "${var.client_count}"
cluster_tag_value = "${var.cluster_tag_value}"
region = "${var.region}"
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
server_count = "${var.server_count}"
client_count = "${var.client_count}"
cluster_tag_value = "${var.cluster_tag_value}"
}
output "primary_server_private_ips" { value = "${module.hashistack.primary_server_private_ips}" }
output "primary_server_public_ips" { value = "${module.hashistack.primary_server_public_ips}" }
output "client_private_ips" { value = "${module.hashistack.client_private_ips}" }
output "client_public_ips" { value = "${module.hashistack.client_public_ips}" }
output "primary_server_private_ips" {
value = "${module.hashistack.primary_server_private_ips}"
}
output "primary_server_public_ips" {
value = "${module.hashistack.primary_server_public_ips}"
}
output "client_private_ips" {
value = "${module.hashistack.client_private_ips}"
}
output "client_public_ips" {
value = "${module.hashistack.client_public_ips}"
}

View File

@ -1,7 +1,7 @@
region = "us-east-1"
ami = "ami-feac99e8"
instance_type = "t2.medium"
key_name = "KEY_NAME"
server_count = "1"
client_count = "4"
cluster_tag_value = "auto-join"
region = "us-east-1"
ami = "ami-feac99e8"
instance_type = "t2.medium"
key_name = "KEY_NAME"
server_count = "1"
client_count = "4"
cluster_tag_value = "auto-join"

View File

@ -1,19 +1,19 @@
variable "region" { }
variable "ami" { }
variable "instance_type" { }
variable "key_name" { }
variable "server_count" { }
variable "client_count" { }
variable "cluster_tag_value" { }
variable "region" {}
variable "ami" {}
variable "instance_type" {}
variable "key_name" {}
variable "server_count" {}
variable "client_count" {}
variable "cluster_tag_value" {}
data "aws_vpc" "default" {
default = true
}
resource "aws_security_group" "primary" {
name = "hashistack"
vpc_id = "${data.aws_vpc.default.id}"
name = "hashistack"
vpc_id = "${data.aws_vpc.default.id}"
ingress {
from_port = 22
to_port = 22
@ -21,6 +21,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"]
}
# HDFS NameNode UI
ingress {
from_port = 50070
to_port = 50070
@ -28,6 +29,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"]
}
# HDFS DataNode UI
ingress {
from_port = 50075
to_port = 50075
@ -35,6 +37,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"]
}
# Spark history server UI
ingress {
from_port = 18080
to_port = 18080
@ -43,17 +46,17 @@ resource "aws_security_group" "primary" {
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
@ -61,9 +64,9 @@ data "template_file" "user_data_server_primary" {
template = "${file("${path.root}/user-data-server.sh")}"
vars {
server_count = "${var.server_count}"
region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}"
server_count = "${var.server_count}"
region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}"
}
}
@ -71,51 +74,49 @@ data "template_file" "user_data_client" {
template = "${file("${path.root}/user-data-client.sh")}"
vars {
region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}"
region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}"
}
}
resource "aws_instance" "primary" {
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.server_count}"
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.server_count}"
#Instance tags
tags {
Name = "hashistack-server-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}"
}
user_data = "${data.template_file.user_data_server_primary.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
#Instance tags
tags {
Name = "hashistack-server-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}"
}
user_data = "${data.template_file.user_data_server_primary.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
}
resource "aws_instance" "client" {
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.client_count}"
depends_on = ["aws_instance.primary"]
ami = "${var.ami}"
instance_type = "${var.instance_type}"
key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.client_count}"
depends_on = ["aws_instance.primary"]
#Instance tags
tags {
Name = "hashistack-client-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}"
}
user_data = "${data.template_file.user_data_client.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
#Instance tags
tags {
Name = "hashistack-client-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}"
}
user_data = "${data.template_file.user_data_client.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
}
resource "aws_iam_instance_profile" "instance_profile" {
name_prefix = "hashistack"
role = "${aws_iam_role.instance_role.name}"
role = "${aws_iam_role.instance_role.name}"
}
resource "aws_iam_role" "instance_role" {
@ -155,7 +156,18 @@ data "aws_iam_policy_document" "auto_discover_cluster" {
}
}
output "primary_server_private_ips" { value = ["${aws_instance.primary.*.private_ip}"] }
output "primary_server_public_ips" { value = ["${aws_instance.primary.*.public_ip}"] }
output "client_private_ips" { value = ["${aws_instance.client.*.private_ip}"] }
output "client_public_ips" { value = ["${aws_instance.client.*.public_ip}"] }
output "primary_server_private_ips" {
value = ["${aws_instance.primary.*.private_ip}"]
}
output "primary_server_public_ips" {
value = ["${aws_instance.primary.*.public_ip}"]
}
output "client_private_ips" {
value = ["${aws_instance.client.*.private_ip}"]
}
output "client_public_ips" {
value = ["${aws_instance.client.*.public_ip}"]
}

View File

@ -3,11 +3,27 @@ job "hdfs" {
datacenters = [ "dc1" ]
group "NameNode" {
constraint {
operator = "distinct_hosts"
value = "true"
}
task "NameNode" {
driver = "docker"
config {
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = [ "-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false" ]
network_mode = "host"
port_map {
ipc = 8020
ui = 50070
}
}
resources {
memory = 500
network {
@ -19,17 +35,7 @@ job "hdfs" {
}
}
}
driver = "docker"
config {
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = [ "-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false" ]
network_mode = "host"
port_map {
ipc = 8020
ui = 50070
}
}
service {
name = "hdfs"
port = "ipc"
@ -38,12 +44,32 @@ job "hdfs" {
}
group "DataNode" {
count = 3
constraint {
operator = "distinct_hosts"
value = "true"
}
task "DataNode" {
driver = "docker"
config {
network_mode = "host"
image = "rcgenova/hadoop-2.7.3"
args = [ "hdfs", "datanode"
, "-D", "fs.defaultFS=hdfs://hdfs.service.consul/"
, "-D", "dfs.permissions.enabled=false"
]
port_map {
data = 50010
ipc = 50020
ui = 50075
}
}
resources {
memory = 500
network {
@ -58,20 +84,7 @@ job "hdfs" {
}
}
}
driver = "docker"
config {
network_mode = "host"
image = "rcgenova/hadoop-2.7.3"
args = [ "hdfs", "datanode"
, "-D", "fs.defaultFS=hdfs://hdfs.service.consul/"
, "-D", "dfs.permissions.enabled=false"
]
port_map {
data = 50010
ipc = 50020
ui = 50075
}
}
}
}

View File

@ -7,6 +7,7 @@ job "spark-history-server" {
task "history-server" {
driver = "docker"
config {
image = "barnardb/spark"
command = "/spark/spark-2.1.0-bin-nomad/bin/spark-class"