From e6ad072542dd9caefb91eca3bbf5522529bc120c Mon Sep 17 00:00:00 2001 From: Alexander Scheel Date: Fri, 13 May 2022 11:21:15 -0400 Subject: [PATCH] UBI Containerfile - CRT Enablement (#15272) * Copy UBI Dockerfile into Vault This Dockerfile was modeled off of the existing Alpine Dockerfile (in this repo) and the external Dockerfile from the docker-vault repo: > https://github.com/hashicorp/docker-vault/blob/master/ubi/Dockerfile We also import the UBI-specific docker-entrypoint.sh, as certain RHEL/Alpine changes (like interpreter) require a separate entry script. Signed-off-by: Alexander Scheel * Add UBI build to CRT pipeline Also adds workflow_dispatch to the CRT pipeline, to allow manually triggering CRT from PRs, when desired. Signed-off-by: Alexander Scheel * Update Dockerfile Co-authored-by: Sam Salisbury * Update Dockerfile Co-authored-by: Sam Salisbury * Update Dockerfile Co-authored-by: Sam Salisbury * Update Dockerfile * Update Dockerfile * Update build.yml Allow for both push to arbitrary branch plus workflow dispatch, per Newsletter article. Co-authored-by: Sam Salisbury --- .github/workflows/build.yml | 30 ++++-- .release/docker/ubi-docker-entrypoint.sh | 113 +++++++++++++++++++++++ Dockerfile | 100 +++++++++++++++++++- 3 files changed, 235 insertions(+), 8 deletions(-) create mode 100755 .release/docker/ubi-docker-entrypoint.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6914f6022..5f3c1a3bd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,11 +1,6 @@ name: build -on: - push: - # Sequence of patterns matched against refs/heads - branches: - # Push events on main branch - - main +on: [ workflow_dispatch, push ] env: PKG_NAME: "vault" @@ -257,3 +252,26 @@ jobs: tags: | docker.io/hashicorp/${{env.repo}}:${{env.version}} public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}} + + build-ubi: + name: Red Hat UBI ${{ matrix.arch }} build + needs: + - get-product-version + - build-linux + runs-on: ubuntu-latest + strategy: + matrix: + arch: ["amd64"] + env: + repo: ${{github.event.repository.name}} + version: ${{needs.get-product-version.outputs.product-version}} + steps: + - uses: actions/checkout@v2 + - name: Docker Build (Action) + uses: hashicorp/actions-docker-build@v1 + with: + version: ${{env.version}} + target: ubi + arch: ${{matrix.arch}} + zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip + redhat_tag: scan.connect.redhat.com/ospid-f0a92725-d8c6-4023-9a87-ba785b94c3fd/${{env.repo}}:${{env.version}}-ubi diff --git a/.release/docker/ubi-docker-entrypoint.sh b/.release/docker/ubi-docker-entrypoint.sh new file mode 100755 index 000000000..52d2dac46 --- /dev/null +++ b/.release/docker/ubi-docker-entrypoint.sh @@ -0,0 +1,113 @@ +#!/bin/sh +set -e + +# Prevent core dumps +ulimit -c 0 + +# Allow setting VAULT_REDIRECT_ADDR and VAULT_CLUSTER_ADDR using an interface +# name instead of an IP address. The interface name is specified using +# VAULT_REDIRECT_INTERFACE and VAULT_CLUSTER_INTERFACE environment variables. If +# VAULT_*_ADDR is also set, the resulting URI will combine the protocol and port +# number with the IP of the named interface. +get_addr () { + local if_name=$1 + local uri_template=$2 + ip addr show dev $if_name | awk -v uri=$uri_template '/\s*inet\s/ { \ + ip=gensub(/(.+)\/.+/, "\\1", "g", $2); \ + print gensub(/^(.+:\/\/).+(:.+)$/, "\\1" ip "\\2", "g", uri); \ + exit}' +} + +if [ -n "$VAULT_REDIRECT_INTERFACE" ]; then + export VAULT_REDIRECT_ADDR=$(get_addr $VAULT_REDIRECT_INTERFACE ${VAULT_REDIRECT_ADDR:-"http://0.0.0.0:8200"}) + echo "Using $VAULT_REDIRECT_INTERFACE for VAULT_REDIRECT_ADDR: $VAULT_REDIRECT_ADDR" +fi +if [ -n "$VAULT_CLUSTER_INTERFACE" ]; then + export VAULT_CLUSTER_ADDR=$(get_addr $VAULT_CLUSTER_INTERFACE ${VAULT_CLUSTER_ADDR:-"https://0.0.0.0:8201"}) + echo "Using $VAULT_CLUSTER_INTERFACE for VAULT_CLUSTER_ADDR: $VAULT_CLUSTER_ADDR" +fi + +# VAULT_CONFIG_DIR isn't exposed as a volume but you can compose additional +# config files in there if you use this image as a base, or use +# VAULT_LOCAL_CONFIG below. +VAULT_CONFIG_DIR=/vault/config + +# You can also set the VAULT_LOCAL_CONFIG environment variable to pass some +# Vault configuration JSON without having to bind any volumes. +if [ -n "$VAULT_LOCAL_CONFIG" ]; then + echo "$VAULT_LOCAL_CONFIG" > "$VAULT_CONFIG_DIR/local.json" +fi + +# Due to OpenShift environment compatibility, we have to allow group write +# access to the Vault configuration. This requires us to disable the stricter +# file permissions checks introduced in Vault v1.11.0. +export VAULT_DISABLE_FILE_PERMISSIONS_CHECK=1 + +# If the user is trying to run Vault directly with some arguments, then +# pass them to Vault. +if [ "${1:0:1}" = '-' ]; then + set -- vault "$@" +fi + +# Look for Vault subcommands. +if [ "$1" = 'server' ]; then + shift + set -- vault server \ + -config="$VAULT_CONFIG_DIR" \ + -dev-root-token-id="$VAULT_DEV_ROOT_TOKEN_ID" \ + -dev-listen-address="${VAULT_DEV_LISTEN_ADDRESS:-"0.0.0.0:8200"}" \ + "$@" +elif [ "$1" = 'version' ]; then + # This needs a special case because there's no help output. + set -- vault "$@" +elif vault --help "$1" 2>&1 | grep -q "vault $1"; then + # We can't use the return code to check for the existence of a subcommand, so + # we have to use grep to look for a pattern in the help output. + set -- vault "$@" +fi + +# If we are running Vault, make sure it executes as the proper user. +if [ "$1" = 'vault' ]; then + if [ -z "$SKIP_CHOWN" ]; then + # If the config dir is bind mounted then chown it + if [ "$(stat -c %u /vault/config)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/config || echo "Could not chown /vault/config (may not have appropriate permissions)" + fi + + # If the logs dir is bind mounted then chown it + if [ "$(stat -c %u /vault/logs)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/logs + fi + + # If the file dir is bind mounted then chown it + if [ "$(stat -c %u /vault/file)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/file + fi + fi + + if [ -z "$SKIP_SETCAP" ]; then + # Allow mlock to avoid swapping Vault memory to disk + setcap cap_ipc_lock=+ep $(readlink -f /bin/vault) + + # In the case vault has been started in a container without IPC_LOCK privileges + if ! vault -version 1>/dev/null 2>/dev/null; then + >&2 echo "Couldn't start vault with IPC_LOCK. Disabling IPC_LOCK, please use --cap-add IPC_LOCK" + setcap cap_ipc_lock=-ep $(readlink -f /bin/vault) + fi + fi +fi + +# In case of Docker, where swap may be enabled, we +# still require mlocking to be available. So this script +# was executed as root to make this happen, however, +# we're now rerunning the entrypoint script as the Vault +# user but no longer need to run setup code for setcap +# or chowning directories (previously done on the first run). +if [[ "$(id -u)" == '0' ]] +then + export SKIP_CHOWN="true" + export SKIP_SETCAP="true" + exec su vault -p "$0" -- "$@" +else + exec "$@" +fi diff --git a/Dockerfile b/Dockerfile index 7748bb3f2..04ed1bb41 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,4 @@ +## DOCKERHUB DOCKERFILE ## FROM alpine:3.15 as default ARG BIN_NAME @@ -5,11 +6,22 @@ ARG BIN_NAME # and the version to download. Example: NAME=vault PRODUCT_VERSION=1.2.3. ARG NAME=vault ARG PRODUCT_VERSION +ARG PRODUCT_REVISION # TARGETARCH and TARGETOS are set automatically when --platform is provided. ARG TARGETOS TARGETARCH -LABEL maintainer="Vault Team " -LABEL version=${PRODUCT_VERSION} +# Additional metadata labels used by container registries, platforms +# and certification scanners. +LABEL name="Vault" \ + maintainer="Vault Team " \ + vendor="HashiCorp" \ + version=${PRODUCT_VERSION} \ + release=${PRODUCT_REVISION} \ + revision=${PRODUCT_REVISION} \ + summary="Vault is a tool for securely accessing secrets." \ + description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." + +COPY LICENSE /licenses/mozilla.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD ENV NAME=$NAME @@ -56,3 +68,87 @@ ENTRYPOINT ["docker-entrypoint.sh"] # # By default you'll get a single-node development server that stores everything # # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"] + + +## UBI DOCKERFILE ## +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5 as ubi + +ARG BIN_NAME +# PRODUCT_VERSION is the version built dist/$TARGETOS/$TARGETARCH/$BIN_NAME, +# which we COPY in later. Example: PRODUCT_VERSION=1.2.3. +ARG PRODUCT_VERSION +ARG PRODUCT_REVISION +# TARGETARCH and TARGETOS are set automatically when --platform is provided. +ARG TARGETOS TARGETARCH + +# Additional metadata labels used by container registries, platforms +# and certification scanners. +LABEL name="Vault" \ + maintainer="Vault Team " \ + vendor="HashiCorp" \ + version=${PRODUCT_VERSION} \ + release=${PRODUCT_REVISION} \ + revision=${PRODUCT_REVISION} \ + summary="Vault is a tool for securely accessing secrets." \ + description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." + +COPY LICENSE /licenses/mozilla.txt + +# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD +ENV NAME=$NAME +ENV VERSION=$VERSION + +# Set up certificates, our base tools, and Vault. Unlike the other version of +# this (https://github.com/hashicorp/docker-vault/blob/master/ubi/Dockerfile), +# we copy in the Vault binary from CRT. +RUN set -eux; \ + microdnf install -y ca-certificates gnupg openssl libcap tzdata procps shadow-utils util-linux + +# Create a non-root user to run the software. +RUN groupadd --gid 1000 vault && \ + adduser --uid 100 --system -g vault vault && \ + usermod -a -G root vault + +# Copy in the new Vault from CRT pipeline, rather than fetching it from our +# public releases. +COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/ + +# /vault/logs is made available to use as a location to store audit logs, if +# desired; /vault/file is made available to use as a location with the file +# storage backend, if desired; the server will be started with /vault/config as +# the configuration directory so you can add additional config files in that +# location. +RUN mkdir -p /vault/logs && \ + mkdir -p /vault/file && \ + mkdir -p /vault/config && \ + mkdir -p $HOME && \ + chown -R vault /vault && chown -R vault $HOME && \ + chgrp -R 0 $HOME && chmod -R g+rwX $HOME && \ + chgrp -R 0 /vault && chmod -R g+rwX /vault + +# Expose the logs directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/logs + +# Expose the file directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/file + +# 8200/tcp is the primary interface that applications use to interact with +# Vault. +EXPOSE 8200 + +# The entry point script uses dumb-init as the top-level process to reap any +# zombie processes created by Vault sub-processes. +# +# For production derivatives of this container, you shoud add the IPC_LOCK +# capability so that Vault can mlock memory. +COPY .release/docker/ubi-docker-entrypoint.sh /usr/local/bin/ +ENTRYPOINT ["ubi-docker-entrypoint.sh"] + +# Use the Vault user as the default user for starting this container. +USER vault + +# # By default you'll get a single-node development server that stores everything +# # in RAM and bootstraps itself. Don't use this configuration for production. +CMD ["server", "-dev"]