e2e: purge bionic packer image scripts (#17559)

Bionic is dead, long live the Jammy!
This commit is contained in:
Seth Hoenig 2023-06-15 15:15:01 -05:00 committed by GitHub
parent df366df1cd
commit c7b44a57a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 0 additions and 378 deletions

View File

@ -1,59 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
variable "build_sha" {
type = string
description = "the revision of the packer scripts building this image"
}
locals {
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
distro = "ubuntu-bionic-18.04-amd64-server-*"
version = "v3"
}
source "amazon-ebs" "latest_ubuntu_bionic" {
ami_name = "nomad-e2e-${local.version}-ubuntu-bionic-amd64-${local.timestamp}"
iam_instance_profile = "packer_build" // defined in nomad-e2e repo
instance_type = "t2.medium"
region = "us-east-1"
ssh_username = "ubuntu"
ssh_interface = "public_ip"
source_ami_filter {
filters = {
architecture = "x86_64"
"block-device-mapping.volume-type" = "gp2"
name = "ubuntu/images/hvm-ssd/${local.distro}"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"] // Canonical
}
tags = {
OS = "Ubuntu"
Version = "Bionic"
BuilderSha = var.build_sha
}
}
build {
sources = ["source.amazon-ebs.latest_ubuntu_bionic"]
provisioner "file" {
destination = "/tmp/linux"
source = "./ubuntu-bionic-amd64"
}
// cloud-init modifies the apt sources, so we need to wait
// before running our setup
provisioner "shell-local" {
inline = ["sleep 30"]
}
provisioner "shell" {
script = "./ubuntu-bionic-amd64/setup.sh"
}
}

View File

@ -1,16 +0,0 @@
[Unit]
Description=Consul Agent
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
[Install]
WantedBy=multi-user.target

View File

@ -1,55 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
set -e
# These tasks can't be executed during AMI builds because they rely on
# instance-specific data.
mkdir -p /var/run/dnsmasq
mkdir -p /etc/dnsmasq.d
# Add hostname to /etc/hosts
echo "127.0.0.1 $(hostname)" | tee --append /etc/hosts
# this script should run after docker.service but we can't guarantee
# it's created docker0 yet, so wait to make sure
while ! (ip link | grep -q docker0)
do
sleep 1
done
# Use dnsmasq first and then docker bridge network for DNS resolution
DOCKER_BRIDGE_IP_ADDRESS=$(/usr/local/bin/sockaddr eval 'GetInterfaceIP "docker0"')
cat <<EOF > /tmp/resolv.conf
nameserver 127.0.0.1
nameserver $DOCKER_BRIDGE_IP_ADDRESS
EOF
cp /tmp/resolv.conf /etc/resolv.conf
# need to get the interface for dnsmasq config so that we can
# accomodate both "predictable" and old-style interface names
IFACE=$(/usr/local/bin/sockaddr eval 'GetDefaultInterfaces | attr "Name"')
cat <<EOF > /tmp/dnsmasq
port=53
resolv-file=/var/run/dnsmasq/resolv.conf
bind-interfaces
interface=docker0
interface=lo
interface=$IFACE
listen-address=127.0.0.1
server=/consul/127.0.0.1#8600
EOF
cp /tmp/dnsmasq /etc/dnsmasq.d/default
# need to get the AWS DNS address from the VPC...
# this is pretty hacky but will work for any typical case
MAC=$(curl -s --fail http://169.254.169.254/latest/meta-data/mac)
CIDR_BLOCK=$(curl -s --fail "http://169.254.169.254/latest/meta-data/network/interfaces/macs/$MAC/vpc-ipv4-cidr-block")
VPC_DNS_ROOT=$(echo "$CIDR_BLOCK" | cut -d'.' -f1-3)
echo "nameserver ${VPC_DNS_ROOT}.2" > /tmp/dnsmasq-resolv.conf
cp /tmp/dnsmasq-resolv.conf /var/run/dnsmasq/resolv.conf
/usr/sbin/dnsmasq --test

View File

@ -1,8 +0,0 @@
port=53
resolv-file=/var/run/dnsmasq/resolv.conf
bind-interfaces
interface=docker0
interface=lo
interface=eth0
listen-address=127.0.0.1
server=/consul/127.0.0.1#8600

View File

@ -1,37 +0,0 @@
[Unit]
Description=dnsmasq - A lightweight DHCP and caching DNS server
Requires=network.target
Wants=nss-lookup.target
Before=nss-lookup.target
After=network.target
After=docker.service
[Service]
Type=forking
PIDFile=/run/dnsmasq/dnsmasq.pid
# Configure our hosts and resolver file with info from the host,
# then test the resulting config file before starting
ExecStartPre=/usr/local/bin/dnsconfig.sh
# (from upstream)
# We run dnsmasq via the /etc/init.d/dnsmasq script which acts as a
# wrapper picking up extra configuration files and then execs dnsmasq
# itself, when called with the "systemd-exec" function.
ExecStart=/etc/init.d/dnsmasq systemd-exec
# (from upstream)
# The systemd-*-resolvconf functions configure (and deconfigure)
# resolvconf to work with the dnsmasq DNS server. They're called like
# this to get correct error handling (ie don't start-resolvconf if the
# dnsmasq daemon fails to start.
ExecStartPost=/etc/init.d/dnsmasq systemd-start-resolvconf
# We need to tell docker to pick up the changes
ExecStartPost=/bin/systemctl restart docker
ExecStop=/etc/init.d/dnsmasq systemd-stop-resolvconf
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target

View File

@ -1,15 +0,0 @@
[Unit]
Description=Podman Remote API Service
Requires=io.podman.socket
After=io.podman.socket
Documentation=man:podman-varlink(1)
[Service]
Type=simple
ExecStart=/usr/bin/podman varlink unix:%t/podman/io.podman --timeout=60000
TimeoutStopSec=30
KillMode=process
[Install]
WantedBy=multi-user.target
Also=io.podman.socket

View File

@ -1,10 +0,0 @@
[Unit]
Description=Podman Remote API Socket
Documentation=man:podman-varlink(1) https://podman.io/blogs/2019/01/16/podman-varlink.html
[Socket]
ListenStream=%t/podman/io.podman
SocketMode=0600
[Install]
WantedBy=sockets.target

View File

@ -1,21 +0,0 @@
[Unit]
Description=Nomad Agent
Requires=network-online.target
After=network-online.target
StartLimitIntervalSec=0
StartLimitBurst=3
[Service]
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
EnvironmentFile=-/etc/nomad.d/.environment
KillMode=process
KillSignal=SIGINT
LimitNOFILE=65536
LimitNPROC=infinity
TasksMax=infinity
Restart=on-failure
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@ -1,157 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
# setup script for Ubuntu Linux 18.04. Assumes that Packer has placed
# build-time config files at /tmp/linux
set -e
NOMAD_PLUGIN_DIR=/opt/nomad/plugins/
mkdir_for_root() {
sudo mkdir -p "$1"
sudo chmod 755 "$1"
}
# Disable interactive apt prompts
export DEBIAN_FRONTEND=noninteractive
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
mkdir_for_root /opt
mkdir_for_root /srv/data # for host volumes
# Dependencies
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y \
software-properties-common \
dnsmasq unzip tree redis-tools jq curl tmux awscli nfs-common \
apt-transport-https ca-certificates gnupg2
# Install sockaddr
aws s3 cp "s3://nomad-team-dev-test-binaries/tools/sockaddr_linux_amd64" /tmp/sockaddr
sudo mv /tmp/sockaddr /usr/local/bin
sudo chmod +x /usr/local/bin/sockaddr
sudo chown root:root /usr/local/bin/sockaddr
# Disable the firewall
sudo ufw disable || echo "ufw not installed"
echo "Install HashiCorp apt repositories"
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
sudo apt-get update
echo "Install Consul and Nomad"
sudo apt-get install -y \
consul-enterprise \
nomad
# Note: neither service will start on boot because we haven't enabled
# the systemd unit file and we haven't uploaded any configuration
# files for Consul and Nomad
echo "Configure Consul"
mkdir_for_root /etc/consul.d
mkdir_for_root /opt/consul
sudo mv /tmp/linux/consul.service /etc/systemd/system/consul.service
echo "Configure Nomad"
mkdir_for_root /etc/nomad.d
mkdir_for_root /opt/nomad
mkdir_for_root $NOMAD_PLUGIN_DIR
sudo mv /tmp/linux/nomad.service /etc/systemd/system/nomad.service
echo "Installing third-party apt repositories"
# Docker
distro=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/${distro} $(lsb_release -cs) stable"
# Java
sudo add-apt-repository -y ppa:openjdk-r/ppa
# Podman
. /etc/os-release
curl -fsSL "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key" | sudo apt-key add -
sudo add-apt-repository "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /"
sudo apt-get update
echo "Installing Docker"
sudo apt-get install -y docker-ce
echo "Installing Java"
sudo apt-get install -y openjdk-14-jdk-headless
echo "Installing CNI plugins"
sudo mkdir -p /opt/cni/bin
wget -q -O - \
https://github.com/containernetworking/plugins/releases/download/v1.0.0/cni-plugins-linux-amd64-v1.0.0.tgz \
| sudo tar -C /opt/cni/bin -xz
echo "Installing Podman"
sudo apt-get -y install podman
# get catatonit (to check podman --init switch)
wget -q -P /tmp https://github.com/openSUSE/catatonit/releases/download/v0.1.4/catatonit.x86_64
mkdir -p /usr/libexec/podman
sudo mv /tmp/catatonit* /usr/libexec/podman/catatonit
sudo chmod +x /usr/libexec/podman/catatonit
echo "Installing latest podman task driver"
# install nomad-podman-driver and move to plugin dir
latest_podman=$(curl -s https://releases.hashicorp.com/nomad-driver-podman/index.json | jq --raw-output '.versions |= with_entries(select(.key|match("^\\d+\\.\\d+\\.\\d+$"))) | .versions | keys[]' | sort -rV | head -n1)
wget -q -P /tmp "https://releases.hashicorp.com/nomad-driver-podman/${latest_podman}/nomad-driver-podman_${latest_podman}_linux_amd64.zip"
sudo unzip -q "/tmp/nomad-driver-podman_${latest_podman}_linux_amd64.zip" -d "$NOMAD_PLUGIN_DIR"
sudo chmod +x "${NOMAD_PLUGIN_DIR}/nomad-driver-podman"
# enable varlink socket (not included in ubuntu package)
sudo mv /tmp/linux/io.podman.service /etc/systemd/system/io.podman.service
sudo mv /tmp/linux/io.podman.socket /etc/systemd/system/io.podman.socket
if [ -a "/tmp/linux/nomad-driver-ecs" ]; then
echo "Installing nomad-driver-ecs"
sudo install --mode=0755 --owner=ubuntu /tmp/linux/nomad-driver-ecs "$NOMAD_PLUGIN_DIR"
else
echo "nomad-driver-ecs not found: skipping install"
fi
echo "Configuring dnsmasq"
# disable systemd-resolved and configure dnsmasq to forward local requests to
# consul. the resolver files need to dynamic configuration based on the VPC
# address and docker bridge IP, so those will be rewritten at boot time.
sudo systemctl disable systemd-resolved.service
sudo mv /tmp/linux/dnsmasq /etc/dnsmasq.d/default
sudo chown root:root /etc/dnsmasq.d/default
# this is going to be overwritten at provisioning time, but we need something
# here or we can't fetch binaries to do the provisioning
echo 'nameserver 8.8.8.8' > /tmp/resolv.conf
sudo mv /tmp/resolv.conf /etc/resolv.conf
sudo mv /tmp/linux/dnsmasq.service /etc/systemd/system/dnsmasq.service
sudo mv /tmp/linux/dnsconfig.sh /usr/local/bin/dnsconfig.sh
sudo chmod +x /usr/local/bin/dnsconfig.sh
sudo systemctl daemon-reload
echo "Updating boot parameters"
# enable cgroup_memory and swap
sudo sed -i 's/GRUB_CMDLINE_LINUX="[^"]*/& cgroup_enable=memory swapaccount=1/' /etc/default/grub
sudo update-grub
echo "Configuring user shell"
sudo tee -a /home/ubuntu/.bashrc << 'EOF'
IP_ADDRESS=$(/usr/local/bin/sockaddr eval 'GetPrivateIP')
export CONSUL_RPC_ADDR=$IP_ADDRESS:8400
export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
export VAULT_ADDR=http://$IP_ADDRESS:8200
export NOMAD_ADDR=http://$IP_ADDRESS:4646
export JAVA_HOME=/usr/lib/jvm/java-14-openjdk-amd64/bin
EOF