Format Terraform files (#11099)

Also format terraform scripts with hclfmt, equivalent to terraform fmt.

I opted not to use terraform fmt, because I didn't want to introduce dev dependency on the terraform CLI.

Also, I've optimized the find command to ignore spurious directories (e.g. .git, node_modules) that seem to be populated with too many files! make hclfmt takes 0.3s on my mac down from 7 seconds!
This commit is contained in:
Mahmood Ali 2021-09-01 15:15:06 -04:00 committed by GitHub
parent eb0ed980a5
commit 906528c197
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 102 additions and 95 deletions

View File

@ -154,7 +154,7 @@ check: ## Lint the source code
@echo "==> Check format of jobspecs and HCL files..."
@$(MAKE) hclfmt
@if (git status -s | grep -q -e '\.hcl$$' -e '\.nomad$$'); then echo the following HCL files are out of sync; git status -s | grep -e '\.hcl$$' -e '\.nomad$$'; exit 1; fi
@if (git status -s | grep -q -e '\.hcl$$' -e '\.nomad$$' -e '\.tf$$'); then echo the following HCL files are out of sync; git status -s | grep -e '\.hcl$$' -e '\.nomad$$' -e '\.tf$$'; exit 1; fi
@echo "==> Check API package is isolated from rest"
@cd ./api && if go list --test -f '{{ join .Deps "\n" }}' . | grep github.com/hashicorp/nomad/ | grep -v -e /nomad/api/ -e nomad/api.test; then echo " /api package depends the ^^ above internal nomad packages. Remove such dependency"; exit 1; fi
@ -213,8 +213,15 @@ changelog:
.PHONY: hclfmt
hclfmt:
@echo "--> Formatting HCL"
@find . -path ./terraform -prune -o -name 'upstart.nomad' -prune -o \( -name '*.nomad' -o -name '*.hcl' \) -exec \
sh -c 'hclfmt -w {} || echo in path {}' ';'
@find . -name '.terraform' -prune \
-o -name 'upstart.nomad' -prune \
-o -name '.git' -prune \
-o -name 'node_modules' -prune \
-o -name '.next' -prune \
-o -path './ui/dist' -prune \
-o -path './website/out' -prune \
-o \( -name '*.nomad' -o -name '*.hcl' -o -name '*.tf' \) \
-print0 | xargs -0 hclfmt -w
.PHONY: tidy
tidy:

View File

@ -313,11 +313,11 @@ resource "aws_elb" "server_lb" {
}
output "server_public_ips" {
value = aws_instance.server[*].public_ip
value = aws_instance.server[*].public_ip
}
output "client_public_ips" {
value = aws_instance.client[*].public_ip
value = aws_instance.client[*].public_ip
}
output "server_lb_ip" {

View File

@ -111,7 +111,7 @@ resource "azurerm_network_interface" "hashistack-server-ni" {
name = "hashistack-ipc"
subnet_id = "${azurerm_subnet.hashistack-sn.id}"
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${element(azurerm_public_ip.hashistack-server-public-ip.*.id,count.index)}"
public_ip_address_id = "${element(azurerm_public_ip.hashistack-server-public-ip.*.id, count.index)}"
}
tags {
@ -123,7 +123,7 @@ resource "azurerm_virtual_machine" "server" {
name = "hashistack-server-${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.hashistack.name}"
network_interface_ids = ["${element(azurerm_network_interface.hashistack-server-ni.*.id,count.index)}"]
network_interface_ids = ["${element(azurerm_network_interface.hashistack-server-ni.*.id, count.index)}"]
vm_size = "${var.vm_size}"
count = "${var.server_count}"
@ -189,7 +189,7 @@ resource "azurerm_network_interface" "hashistack-client-ni" {
name = "hashistack-ipc"
subnet_id = "${azurerm_subnet.hashistack-sn.id}"
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${element(azurerm_public_ip.hashistack-client-public-ip.*.id,count.index)}"
public_ip_address_id = "${element(azurerm_public_ip.hashistack-client-public-ip.*.id, count.index)}"
}
tags {
@ -201,7 +201,7 @@ resource "azurerm_virtual_machine" "client" {
name = "hashistack-client-${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.hashistack.name}"
network_interface_ids = ["${element(azurerm_network_interface.hashistack-client-ni.*.id,count.index)}"]
network_interface_ids = ["${element(azurerm_network_interface.hashistack-client-ni.*.id, count.index)}"]
vm_size = "${var.vm_size}"
count = "${var.client_count}"
depends_on = ["azurerm_virtual_machine.server"]

View File

@ -1,12 +1,12 @@
job "hdfs" {
datacenters = [ "dc1" ]
datacenters = ["dc1"]
group "NameNode" {
constraint {
operator = "distinct_hosts"
value = "true"
operator = "distinct_hosts"
value = "true"
}
task "NameNode" {
@ -14,13 +14,13 @@ job "hdfs" {
driver = "docker"
config {
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = [ "-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false" ]
image = "rcgenova/hadoop-2.7.3"
command = "bash"
args = ["-c", "hdfs namenode -format && exec hdfs namenode -D fs.defaultFS=hdfs://${NOMAD_ADDR_ipc}/ -D dfs.permissions.enabled=false"]
network_mode = "host"
port_map {
ipc = 8020
ui = 50070
ui = 50070
}
}
@ -48,25 +48,25 @@ job "hdfs" {
count = 3
constraint {
operator = "distinct_hosts"
value = "true"
operator = "distinct_hosts"
value = "true"
}
task "DataNode" {
driver = "docker"
config {
network_mode = "host"
image = "rcgenova/hadoop-2.7.3"
args = [ "hdfs", "datanode"
image = "rcgenova/hadoop-2.7.3"
args = ["hdfs", "datanode"
, "-D", "fs.defaultFS=hdfs://hdfs.service.consul/"
, "-D", "dfs.permissions.enabled=false"
]
port_map {
data = 50010
ipc = 50020
ui = 50075
ipc = 50020
ui = 50075
}
}

View File

@ -1,17 +1,17 @@
job "spark-history-server" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "server" {
count = 1
task "history-server" {
driver = "docker"
config {
image = "barnardb/spark"
image = "barnardb/spark"
command = "/spark/spark-2.1.0-bin-nomad/bin/spark-class"
args = [ "org.apache.spark.deploy.history.HistoryServer" ]
args = ["org.apache.spark.deploy.history.HistoryServer"]
port_map {
ui = 18080
}

View File

@ -5,23 +5,23 @@ job "tensorrt" {
task "rtserver" {
driver = "docker"
config {
image = "nvcr.io/nvidia/tensorrtserver:19.02-py3"
image = "nvcr.io/nvidia/tensorrtserver:19.02-py3"
command = "trtserver"
args = [
"--model-store=${NOMAD_TASK_DIR}/models"
]
shm_size=1024
port_map {
http = 8000
grpc = 8001
metrics = 8002
shm_size = 1024
port_map {
http = 8000
grpc = 8001
metrics = 8002
}
ulimit {
memlock = "-1"
stack = "67108864"
stack = "67108864"
}
}
service {
port = "http"
tags = ["http"]
@ -36,35 +36,35 @@ job "tensorrt" {
grace = "30s"
}
}
# load the example model into ${NOMAD_TASK_DIR}/models
artifact {
source = "http://download.caffe2.ai.s3.amazonaws.com/models/resnet50/predict_net.pb"
destination = "local/models/resnet50_netdef/1/model.netdef"
mode = "file"
mode = "file"
}
artifact {
source = "http://download.caffe2.ai.s3.amazonaws.com/models/resnet50/init_net.pb"
destination = "local/models/resnet50_netdef/1/init_model.netdef"
mode = "file"
mode = "file"
}
artifact {
source = "https://raw.githubusercontent.com/NVIDIA/tensorrt-inference-server/v1.0.0/docs/examples/model_repository/resnet50_netdef/config.pbtxt"
destination = "local/models/resnet50_netdef/config.pbtxt"
mode = "file"
mode = "file"
}
artifact {
source = "https://raw.githubusercontent.com/NVIDIA/tensorrt-inference-server/v1.0.0/docs/examples/model_repository/resnet50_netdef/resnet50_labels.txt"
destination = "local/models/resnet50_netdef/resnet50_labels.txt"
mode = "file"
mode = "file"
}
resources {
cpu = 8192
cpu = 8192
memory = 8192
network {
mbits = 10
port "http" {}
port "http" {}
}
# an Nvidia GPU with >= 4GiB memory, preferably a Tesla
@ -77,7 +77,7 @@ job "tensorrt" {
}
affinity {
attribute = "${device.model}"
operator = "regexp"
operator = "regexp"
value = "Tesla"
}
}
@ -95,13 +95,13 @@ job "tensorrt" {
args = [
"main.py", "${RTSERVER}"
]
port_map {
port_map {
http = 5000
}
}
resources {
cpu = 1024
cpu = 1024
memory = 1024
network {
mbits = 10
@ -110,11 +110,11 @@ job "tensorrt" {
}
template {
data = <<EOH
data = <<EOH
RTSERVER = {{ with service "tensorrt-back-rtserver" }}{{ with index . 0 }} http://{{.Address }}:{{.Port }} {{ end }}{{ end }}
EOH
destination = "local/rtserver.env"
env = true
env = true
}
}

View File

@ -15,9 +15,9 @@ variable "credentials" {
}
variable "region" {
type = string
default = "us-east1"
description = "The GCP region to deploy resources in."
type = string
default = "us-east1"
description = "The GCP region to deploy resources in."
}
variable "vm_disk_size_gb" {
@ -36,8 +36,8 @@ variable "client_count" {
}
provider "google" {
project = var.project
credentials = file(var.credentials)
project = var.project
credentials = file(var.credentials)
}
module "hashistack" {

View File

@ -9,15 +9,15 @@ variable "credentials" {
}
variable "name" {
type = string
default = "hashistack"
description = "The default name to use for resources."
type = string
default = "hashistack"
description = "The default name to use for resources."
}
variable "region" {
type = string
default = "us-east1"
description = "The GCP region to deploy resources in."
type = string
default = "us-east1"
description = "The GCP region to deploy resources in."
}
variable "zone" {
@ -38,27 +38,27 @@ variable "router_asn" {
}
variable "image" {
type = string
default = "hashistack"
description = "The GCP image name (built with Packer)."
type = string
default = "hashistack"
description = "The GCP image name (built with Packer)."
}
variable "enable_preemptible" {
type = bool
default = false
description = "Use preemptible VM instances, which will be cheaper to run."
type = bool
default = false
description = "Use preemptible VM instances, which will be cheaper to run."
}
variable "server_count" {
type = number
default = 3
description = "The number of server instances to deploy (always use odd number)."
type = number
default = 3
description = "The number of server instances to deploy (always use odd number)."
}
variable "client_count" {
type = number
default = 5
description = "The number of client instances to deploy."
type = number
default = 5
description = "The number of client instances to deploy."
}
variable "server_machine_type" {
@ -86,7 +86,7 @@ variable "client_disk_size_gb" {
}
resource "google_compute_network" "hashistack" {
name = var.name
name = var.name
auto_create_subnetworks = false
}
@ -120,8 +120,8 @@ resource "google_compute_router_nat" "hashistack" {
}
resource "google_compute_firewall" "allow-ssh" {
name = "${var.name}-allow-ssh"
network = google_compute_network.hashistack.name
name = "${var.name}-allow-ssh"
network = google_compute_network.hashistack.name
allow {
protocol = "tcp"
@ -130,9 +130,9 @@ resource "google_compute_firewall" "allow-ssh" {
}
resource "google_compute_firewall" "allow-http-external" {
name = "${var.name}-allow-http-external"
network = google_compute_network.hashistack.name
target_tags = ["server"]
name = "${var.name}-allow-http-external"
network = google_compute_network.hashistack.name
target_tags = ["server"]
allow {
protocol = "tcp"
@ -153,7 +153,7 @@ resource "google_compute_firewall" "allow-all-internal" {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
@ -165,11 +165,11 @@ locals {
}
locals {
server_metadata_startup_script = <<EOF
server_metadata_startup_script = <<EOF
sudo bash /ops/shared/scripts/server.sh "gce" "${var.server_count}" "${local.retry_join}"
EOF
client_metadata_startup_script = <<EOF
client_metadata_startup_script = <<EOF
sudo bash /ops/shared/scripts/client.sh "gce" "${local.retry_join}"
EOF
}
@ -199,7 +199,7 @@ resource "google_compute_instance" "server" {
}
scheduling {
preemptible = var.enable_preemptible
preemptible = var.enable_preemptible
# scheduling must have automatic_restart be false when preemptible is true.
automatic_restart = ! var.enable_preemptible
}
@ -243,7 +243,7 @@ resource "google_compute_instance" "client" {
}
scheduling {
preemptible = var.enable_preemptible
preemptible = var.enable_preemptible
# scheduling must have automatic_restart be false when preemptible is true.
automatic_restart = ! var.enable_preemptible
}

View File

@ -1,12 +1,12 @@
vault {
address = "http://active.vault.service.consul:8200"
token=""
grace = "1s"
address = "http://active.vault.service.consul:8200"
token = ""
grace = "1s"
unwrap_token = false
renew_token = true
renew_token = true
}
syslog {
enabled = true
enabled = true
facility = "LOCAL5"
}

View File

@ -1,9 +1,9 @@
data_dir = "/opt/nomad/data"
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the server
server {
enabled = true
enabled = true
bootstrap_expect = SERVER_COUNT
}
@ -12,10 +12,10 @@ consul {
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
token = ""
}

View File

@ -1,11 +1,11 @@
data_dir = "/opt/nomad/data"
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
# Enable the client
client {
enabled = true
options {
"driver.raw_exec.enable" = "1"
"driver.raw_exec.enable" = "1"
"docker.privileged.enabled" = "true"
}
}