cleanup Vagrantfile, Terraform configs and Nomad job files

This commit is contained in:
Rob Genova 2017-06-25 11:10:14 -07:00
parent dbbaea9e09
commit 7a938fde20
6 changed files with 141 additions and 110 deletions

View File

@ -1,14 +1,9 @@
# -*- mode: ruby -*- # -*- mode: ruby -*-
# vi: set ft=ruby : # vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config| Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/trusty64" config.vm.box = "ubuntu/trusty64"
# config.vm.network :forwarded_port, guest: 22, host: 1234
config.vm.provision "shell", inline: <<-SHELL config.vm.provision "shell", inline: <<-SHELL
cd /tmp cd /tmp

View File

@ -1,25 +1,25 @@
variable "region" { variable "region" {
description = "The AWS region to deploy to." description = "The AWS region to deploy to."
default = "us-east-1" default = "us-east-1"
} }
variable "ami" { } variable "ami" {}
variable "instance_type" { variable "instance_type" {
description = "The AWS instance type to use for both clients and servers." description = "The AWS instance type to use for both clients and servers."
default = "t2.medium" default = "t2.medium"
} }
variable "key_name" { } variable "key_name" {}
variable "server_count" { variable "server_count" {
description = "The number of servers to provision." description = "The number of servers to provision."
default = "3" default = "3"
} }
variable "client_count" { variable "client_count" {
description = "The number of clients to provision." description = "The number of clients to provision."
default = "4" default = "4"
} }
variable "cluster_tag_value" { variable "cluster_tag_value" {
@ -32,19 +32,29 @@ provider "aws" {
} }
module "hashistack" { module "hashistack" {
source = "../../modules/hashistack" source = "../../modules/hashistack"
region = "${var.region}" region = "${var.region}"
ami = "${var.ami}" ami = "${var.ami}"
instance_type = "${var.instance_type}" instance_type = "${var.instance_type}"
key_name = "${var.key_name}" key_name = "${var.key_name}"
server_count = "${var.server_count}" server_count = "${var.server_count}"
client_count = "${var.client_count}" client_count = "${var.client_count}"
cluster_tag_value = "${var.cluster_tag_value}" cluster_tag_value = "${var.cluster_tag_value}"
} }
output "primary_server_private_ips" { value = "${module.hashistack.primary_server_private_ips}" } output "primary_server_private_ips" {
output "primary_server_public_ips" { value = "${module.hashistack.primary_server_public_ips}" } value = "${module.hashistack.primary_server_private_ips}"
output "client_private_ips" { value = "${module.hashistack.client_private_ips}" } }
output "client_public_ips" { value = "${module.hashistack.client_public_ips}" }
output "primary_server_public_ips" {
value = "${module.hashistack.primary_server_public_ips}"
}
output "client_private_ips" {
value = "${module.hashistack.client_private_ips}"
}
output "client_public_ips" {
value = "${module.hashistack.client_public_ips}"
}

View File

@ -1,7 +1,7 @@
region = "us-east-1" region = "us-east-1"
ami = "ami-feac99e8" ami = "ami-feac99e8"
instance_type = "t2.medium" instance_type = "t2.medium"
key_name = "KEY_NAME" key_name = "KEY_NAME"
server_count = "1" server_count = "1"
client_count = "4" client_count = "4"
cluster_tag_value = "auto-join" cluster_tag_value = "auto-join"

View File

@ -1,19 +1,19 @@
variable "region" { } variable "region" {}
variable "ami" { } variable "ami" {}
variable "instance_type" { } variable "instance_type" {}
variable "key_name" { } variable "key_name" {}
variable "server_count" { } variable "server_count" {}
variable "client_count" { } variable "client_count" {}
variable "cluster_tag_value" { } variable "cluster_tag_value" {}
data "aws_vpc" "default" { data "aws_vpc" "default" {
default = true default = true
} }
resource "aws_security_group" "primary" { resource "aws_security_group" "primary" {
name = "hashistack" name = "hashistack"
vpc_id = "${data.aws_vpc.default.id}" vpc_id = "${data.aws_vpc.default.id}"
ingress { ingress {
from_port = 22 from_port = 22
to_port = 22 to_port = 22
@ -21,6 +21,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
# HDFS NameNode UI
ingress { ingress {
from_port = 50070 from_port = 50070
to_port = 50070 to_port = 50070
@ -28,6 +29,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
# HDFS DataNode UI
ingress { ingress {
from_port = 50075 from_port = 50075
to_port = 50075 to_port = 50075
@ -35,6 +37,7 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
# Spark history server UI
ingress { ingress {
from_port = 18080 from_port = 18080
to_port = 18080 to_port = 18080
@ -43,17 +46,17 @@ resource "aws_security_group" "primary" {
} }
ingress { ingress {
from_port = 0 from_port = 0
to_port = 0 to_port = 0
protocol = "-1" protocol = "-1"
self = true self = true
} }
egress { egress {
from_port = 0 from_port = 0
to_port = 0 to_port = 0
protocol = "-1" protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
} }
@ -61,9 +64,9 @@ data "template_file" "user_data_server_primary" {
template = "${file("${path.root}/user-data-server.sh")}" template = "${file("${path.root}/user-data-server.sh")}"
vars { vars {
server_count = "${var.server_count}" server_count = "${var.server_count}"
region = "${var.region}" region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}" cluster_tag_value = "${var.cluster_tag_value}"
} }
} }
@ -71,51 +74,49 @@ data "template_file" "user_data_client" {
template = "${file("${path.root}/user-data-client.sh")}" template = "${file("${path.root}/user-data-client.sh")}"
vars { vars {
region = "${var.region}" region = "${var.region}"
cluster_tag_value = "${var.cluster_tag_value}" cluster_tag_value = "${var.cluster_tag_value}"
} }
} }
resource "aws_instance" "primary" { resource "aws_instance" "primary" {
ami = "${var.ami}" ami = "${var.ami}"
instance_type = "${var.instance_type}" instance_type = "${var.instance_type}"
key_name = "${var.key_name}" key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"] vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.server_count}" count = "${var.server_count}"
#Instance tags #Instance tags
tags { tags {
Name = "hashistack-server-${count.index}" Name = "hashistack-server-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}" ConsulAutoJoin = "${var.cluster_tag_value}"
} }
user_data = "${data.template_file.user_data_server_primary.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
user_data = "${data.template_file.user_data_server_primary.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
} }
resource "aws_instance" "client" { resource "aws_instance" "client" {
ami = "${var.ami}" ami = "${var.ami}"
instance_type = "${var.instance_type}" instance_type = "${var.instance_type}"
key_name = "${var.key_name}" key_name = "${var.key_name}"
vpc_security_group_ids = ["${aws_security_group.primary.id}"] vpc_security_group_ids = ["${aws_security_group.primary.id}"]
count = "${var.client_count}" count = "${var.client_count}"
depends_on = ["aws_instance.primary"] depends_on = ["aws_instance.primary"]
#Instance tags #Instance tags
tags { tags {
Name = "hashistack-client-${count.index}" Name = "hashistack-client-${count.index}"
ConsulAutoJoin = "${var.cluster_tag_value}" ConsulAutoJoin = "${var.cluster_tag_value}"
} }
user_data = "${data.template_file.user_data_client.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
user_data = "${data.template_file.user_data_client.rendered}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
} }
resource "aws_iam_instance_profile" "instance_profile" { resource "aws_iam_instance_profile" "instance_profile" {
name_prefix = "hashistack" name_prefix = "hashistack"
role = "${aws_iam_role.instance_role.name}" role = "${aws_iam_role.instance_role.name}"
} }
resource "aws_iam_role" "instance_role" { resource "aws_iam_role" "instance_role" {
@ -155,7 +156,18 @@ data "aws_iam_policy_document" "auto_discover_cluster" {
} }
} }
output "primary_server_private_ips" { value = ["${aws_instance.primary.*.private_ip}"] } output "primary_server_private_ips" {
output "primary_server_public_ips" { value = ["${aws_instance.primary.*.public_ip}"] } value = ["${aws_instance.primary.*.private_ip}"]
output "client_private_ips" { value = ["${aws_instance.client.*.private_ip}"] } }
output "client_public_ips" { value = ["${aws_instance.client.*.public_ip}"] }
output "primary_server_public_ips" {
value = ["${aws_instance.primary.*.public_ip}"]
}
output "client_private_ips" {
value = ["${aws_instance.client.*.private_ip}"]
}
output "client_public_ips" {
value = ["${aws_instance.client.*.public_ip}"]
}

View File

@ -3,11 +3,27 @@ job "hdfs" {
datacenters = [ "dc1" ] datacenters = [ "dc1" ]
group "NameNode" { group "NameNode" {
constraint { constraint {
operator = "distinct_hosts" operator = "distinct_hosts"
value = "true" value = "true"
} }
task "NameNode" { task "NameNode" {
driver = "docker"
config {
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = [ "-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false" ]
network_mode = "host"
port_map {
ipc = 8020
ui = 50070
}
}
resources { resources {
memory = 500 memory = 500
network { network {
@ -19,17 +35,7 @@ job "hdfs" {
} }
} }
} }
driver = "docker"
config {
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = [ "-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false" ]
network_mode = "host"
port_map {
ipc = 8020
ui = 50070
}
}
service { service {
name = "hdfs" name = "hdfs"
port = "ipc" port = "ipc"
@ -38,12 +44,32 @@ job "hdfs" {
} }
group "DataNode" { group "DataNode" {
count = 3 count = 3
constraint { constraint {
operator = "distinct_hosts" operator = "distinct_hosts"
value = "true" value = "true"
} }
task "DataNode" { task "DataNode" {
driver = "docker"
config {
network_mode = "host"
image = "rcgenova/hadoop-2.7.3"
args = [ "hdfs", "datanode"
, "-D", "fs.defaultFS=hdfs://hdfs.service.consul/"
, "-D", "dfs.permissions.enabled=false"
]
port_map {
data = 50010
ipc = 50020
ui = 50075
}
}
resources { resources {
memory = 500 memory = 500
network { network {
@ -58,20 +84,7 @@ job "hdfs" {
} }
} }
} }
driver = "docker"
config {
network_mode = "host"
image = "rcgenova/hadoop-2.7.3"
args = [ "hdfs", "datanode"
, "-D", "fs.defaultFS=hdfs://hdfs.service.consul/"
, "-D", "dfs.permissions.enabled=false"
]
port_map {
data = 50010
ipc = 50020
ui = 50075
}
}
} }
} }

View File

@ -7,6 +7,7 @@ job "spark-history-server" {
task "history-server" { task "history-server" {
driver = "docker" driver = "docker"
config { config {
image = "barnardb/spark" image = "barnardb/spark"
command = "/spark/spark-2.1.0-bin-nomad/bin/spark-class" command = "/spark/spark-2.1.0-bin-nomad/bin/spark-class"