This commit is contained in:
Heather Simon 2021-12-06 10:09:49 -08:00
commit 04d634d9d2
29 changed files with 803 additions and 6378 deletions

1623
.circleci/config.yml generated

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

242
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,242 @@
name: build
on:
push:
# Sequence of patterns matched against refs/heads
branches:
# Push events on main branch
- main
env:
PKG_NAME: "vault"
GO_TAGS: "ui"
jobs:
get-product-version:
runs-on: ubuntu-latest
outputs:
product-version: ${{ steps.get-product-version.outputs.product-version }}
steps:
- uses: actions/checkout@v2
- name: get product version
id: get-product-version
run: |
make version
echo "::set-output name=product-version::$(make version)"
generate-metadata-file:
needs: get-product-version
runs-on: ubuntu-latest
outputs:
filepath: ${{ steps.generate-metadata-file.outputs.filepath }}
steps:
- name: 'Checkout directory'
uses: actions/checkout@v2
- name: Generate metadata file
id: generate-metadata-file
uses: hashicorp/actions-generate-metadata@main
with:
version: ${{ needs.get-product-version.outputs.product-version }}
product: ${{ env.PKG_NAME }}
- uses: actions/upload-artifact@v2
with:
name: metadata.json
path: ${{ steps.generate-metadata-file.outputs.filepath }}
build-other:
needs: get-product-version
runs-on: ubuntu-latest
strategy:
matrix:
goos: [ freebsd, windows, netbsd, openbsd, solaris ]
goarch: [ "386", "amd64", "arm" ]
go: [ "1.17.2" ]
exclude:
- goos: solaris
goarch: 386
- goos: solaris
goarch: arm
- goos: windows
goarch: arm
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
node-version: '14'
cache: 'yarn'
cache-dependency-path: 'ui/yarn.lock'
- name: UI Build
run: |
cd ui
yarn install --ignore-optional
npm rebuild node-sass
yarn --verbose run build
cd ..
- name: Build
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 0
run: |
mkdir dist out
VAULT_VERSION=${{ needs.get-product-version.outputs.product-version }} VAULT_COMMIT=${GITHUB_SHA} GO_TAGS="${{ env.GO_TAGS }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
build-linux:
needs: get-product-version
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux]
goarch: ["arm", "arm64", "386", "amd64"]
go: ["1.17.2"]
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
node-version: '14'
cache: 'yarn'
cache-dependency-path: 'ui/yarn.lock'
- name: UI Build
run: |
cd ui
yarn install --ignore-optional
npm rebuild node-sass
yarn --verbose run build
cd ..
- name: Build
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 0
run: |
mkdir dist out
VAULT_VERSION=${{ needs.get-product-version.outputs.product-version }} VAULT_COMMIT=${GITHUB_SHA} GO_TAGS="${{ env.GO_TAGS }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
- name: Package
uses: hashicorp/actions-packaging-linux@v1.2
with:
name: ${{ github.event.repository.name }}
description: "Vault is a tool for secrets management, encryption as a service, and privileged access management."
arch: ${{ matrix.goarch }}
version: ${{ needs.get-product-version.outputs.product-version }}
maintainer: "HashiCorp"
homepage: "https://github.com/hashicorp/vault"
license: "MPL-2.0"
binary: "dist/${{ env.PKG_NAME }}"
deb_depends: "openssl"
rpm_depends: "openssl"
config_dir: ".release/linux/package/"
preinstall: ".release/linux/preinst"
postinstall: ".release/linux/postinst"
postremove: ".release/linux/postrm"
- name: Add Package names to env
run: |
echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV
echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.RPM_PACKAGE }}
path: out/${{ env.RPM_PACKAGE }}
- uses: actions/upload-artifact@v2
with:
name: ${{ env.DEB_PACKAGE }}
path: out/${{ env.DEB_PACKAGE }}
build-darwin:
needs: get-product-version
runs-on: macos-latest
strategy:
matrix:
goos: [ darwin ]
goarch: [ "amd64", "arm64" ]
go: [ "1.17.2" ]
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
node-version: '14'
cache: 'yarn'
cache-dependency-path: 'ui/yarn.lock'
- name: UI Build
run: |
cd ui
yarn install --ignore-optional
npm rebuild node-sass
yarn --verbose run build
cd ..
- name: Build
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 0
run: |
mkdir dist out
VAULT_VERSION=${{ needs.get-product-version.outputs.product-version }} VAULT_COMMIT=${GITHUB_SHA} GO_TAGS="${{ env.GO_TAGS }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
build-docker:
name: Docker ${{ matrix.arch }} build
needs:
- get-product-version
- build-linux
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["arm", "arm64", "386", "amd64"]
env:
repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}}
steps:
- uses: actions/checkout@v2
- name: Docker Build (Action)
uses: hashicorp/actions-docker-build@v1
with:
version: ${{env.version}}
target: default
arch: ${{matrix.arch}}
zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip
tags: |
docker.io/hashicorp/${{env.repo}}:${{env.version}}
ecr.public.aws/hashicorp/${{env.repo}}:${{env.version}}

2
.gitignore vendored
View File

@ -50,6 +50,8 @@ Vagrantfile
# Configs
*.hcl
!.release/ci.hcl
!.release/linux/package/etc/vault.d/vault.hcl
!command/agent/config/test-fixtures/*.hcl
!command/server/test-fixtures/**/*.hcl

216
.release/ci.hcl Normal file
View File

@ -0,0 +1,216 @@
schema = "1"
project "vault" {
team = "vault"
slack {
notification_channel = "CRF6FFKEW" // #vault-releases
}
github {
organization = "hashicorp"
repository = "vault"
release_branches = [
"main",
"release/1.6.x",
"release/1.7.x",
"release/1.8.x",
"release/1.9.x"
]
}
}
event "merge" {
// "entrypoint" to use if build is not run automatically
// i.e. send "merge" complete signal to orchestrator to trigger build
}
event "build" {
depends = ["merge"]
action "build" {
organization = "hashicorp"
repository = "vault"
workflow = "build"
}
}
event "upload-dev" {
depends = ["build"]
action "upload-dev" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "upload-dev"
depends = ["build"]
}
notification {
on = "fail"
}
}
event "quality-tests" {
depends = ["upload-dev"]
action "quality-tests" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "quality-tests"
}
notification {
on = "fail"
}
}
event "security-scan" {
depends = ["quality-tests"]
action "security-scan" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "security-scan"
}
notification {
on = "fail"
}
}
event "notarize-darwin-amd64" {
depends = ["security-scan"]
action "notarize-darwin-amd64" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "notarize-darwin-amd64"
}
notification {
on = "fail"
}
}
event "notarize-darwin-arm64" {
depends = ["notarize-darwin-amd64"]
action "notarize-darwin-arm64" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "notarize-darwin-arm64"
}
notification {
on = "fail"
}
}
event "notarize-windows-386" {
depends = ["notarize-darwin-arm64"]
action "notarize-windows-386" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "notarize-windows-386"
}
notification {
on = "fail"
}
}
event "notarize-windows-amd64" {
depends = ["notarize-windows-386"]
action "notarize-windows-amd64" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "notarize-windows-amd64"
}
notification {
on = "fail"
}
}
event "sign" {
depends = ["notarize-windows-amd64"]
action "sign" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "sign"
}
notification {
on = "fail"
}
}
event "sign-linux-rpms" {
depends = ["sign"]
action "sign-linux-rpms" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "sign-linux-rpms"
}
notification {
on = "fail"
}
}
event "verify" {
depends = ["sign-linux-rpms"]
action "verify" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "verify"
}
notification {
on = "fail"
}
}
event "promote-staging" {
action "promote-staging" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "promote-staging"
}
notification {
on = "fail"
}
notification {
on = "success"
}
}
event "promote-production" {
action "promote-production" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "promote-production"
}
notification {
on = "fail"
}
notification {
on = "success"
}
}
event "post-publish" {
depends = ["promote-production"]
action "post-publish" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "post-publish"
}
notification {
on = "fail"
}
notification {
on = "success"
}
}

View File

@ -0,0 +1,104 @@
#!/usr/bin/dumb-init /bin/sh
set -e
# Note above that we run dumb-init as PID 1 in order to reap zombie processes
# as well as forward signals to all processes in its session. Normally, sh
# wouldn't do either of these functions so we'd leak zombies as well as do
# unclean termination of all our sub-processes.
# Prevent core dumps
ulimit -c 0
# Allow setting VAULT_REDIRECT_ADDR and VAULT_CLUSTER_ADDR using an interface
# name instead of an IP address. The interface name is specified using
# VAULT_REDIRECT_INTERFACE and VAULT_CLUSTER_INTERFACE environment variables. If
# VAULT_*_ADDR is also set, the resulting URI will combine the protocol and port
# number with the IP of the named interface.
get_addr () {
local if_name=$1
local uri_template=$2
ip addr show dev $if_name | awk -v uri=$uri_template '/\s*inet\s/ { \
ip=gensub(/(.+)\/.+/, "\\1", "g", $2); \
print gensub(/^(.+:\/\/).+(:.+)$/, "\\1" ip "\\2", "g", uri); \
exit}'
}
if [ -n "$VAULT_REDIRECT_INTERFACE" ]; then
export VAULT_REDIRECT_ADDR=$(get_addr $VAULT_REDIRECT_INTERFACE ${VAULT_REDIRECT_ADDR:-"http://0.0.0.0:8200"})
echo "Using $VAULT_REDIRECT_INTERFACE for VAULT_REDIRECT_ADDR: $VAULT_REDIRECT_ADDR"
fi
if [ -n "$VAULT_CLUSTER_INTERFACE" ]; then
export VAULT_CLUSTER_ADDR=$(get_addr $VAULT_CLUSTER_INTERFACE ${VAULT_CLUSTER_ADDR:-"https://0.0.0.0:8201"})
echo "Using $VAULT_CLUSTER_INTERFACE for VAULT_CLUSTER_ADDR: $VAULT_CLUSTER_ADDR"
fi
# VAULT_CONFIG_DIR isn't exposed as a volume but you can compose additional
# config files in there if you use this image as a base, or use
# VAULT_LOCAL_CONFIG below.
VAULT_CONFIG_DIR=/vault/config
# You can also set the VAULT_LOCAL_CONFIG environment variable to pass some
# Vault configuration JSON without having to bind any volumes.
if [ -n "$VAULT_LOCAL_CONFIG" ]; then
echo "$VAULT_LOCAL_CONFIG" > "$VAULT_CONFIG_DIR/local.json"
fi
# If the user is trying to run Vault directly with some arguments, then
# pass them to Vault.
if [ "${1:0:1}" = '-' ]; then
set -- vault "$@"
fi
# Look for Vault subcommands.
if [ "$1" = 'server' ]; then
shift
set -- vault server \
-config="$VAULT_CONFIG_DIR" \
-dev-root-token-id="$VAULT_DEV_ROOT_TOKEN_ID" \
-dev-listen-address="${VAULT_DEV_LISTEN_ADDRESS:-"0.0.0.0:8200"}" \
"$@"
elif [ "$1" = 'version' ]; then
# This needs a special case because there's no help output.
set -- vault "$@"
elif vault --help "$1" 2>&1 | grep -q "vault $1"; then
# We can't use the return code to check for the existence of a subcommand, so
# we have to use grep to look for a pattern in the help output.
set -- vault "$@"
fi
# If we are running Vault, make sure it executes as the proper user.
if [ "$1" = 'vault' ]; then
if [ -z "$SKIP_CHOWN" ]; then
# If the config dir is bind mounted then chown it
if [ "$(stat -c %u /vault/config)" != "$(id -u vault)" ]; then
chown -R vault:vault /vault/config || echo "Could not chown /vault/config (may not have appropriate permissions)"
fi
# If the logs dir is bind mounted then chown it
if [ "$(stat -c %u /vault/logs)" != "$(id -u vault)" ]; then
chown -R vault:vault /vault/logs
fi
# If the file dir is bind mounted then chown it
if [ "$(stat -c %u /vault/file)" != "$(id -u vault)" ]; then
chown -R vault:vault /vault/file
fi
fi
if [ -z "$SKIP_SETCAP" ]; then
# Allow mlock to avoid swapping Vault memory to disk
setcap cap_ipc_lock=+ep $(readlink -f $(which vault))
# In the case vault has been started in a container without IPC_LOCK privileges
if ! vault -version 1>/dev/null 2>/dev/null; then
>&2 echo "Couldn't start vault with IPC_LOCK. Disabling IPC_LOCK, please use --privileged or --cap-add IPC_LOCK"
setcap cap_ipc_lock=-ep $(readlink -f $(which vault))
fi
fi
if [ "$(id -u)" = '0' ]; then
set -- su-exec vault "$@"
fi
fi
exec "$@"

View File

@ -0,0 +1,47 @@
# Full configuration options can be found at https://www.vaultproject.io/docs/configuration
ui = true
#mlock = true
#disable_mlock = true
storage "file" {
path = "/opt/vault/data"
}
#storage "consul" {
# address = "127.0.0.1:8500"
# path = "vault"
#}
# HTTP listener
#listener "tcp" {
# address = "127.0.0.1:8200"
# tls_disable = 1
#}
# HTTPS listener
listener "tcp" {
address = "0.0.0.0:8200"
tls_cert_file = "/opt/vault/tls/tls.crt"
tls_key_file = "/opt/vault/tls/tls.key"
}
# Enterprise license_path
# This will be required for enterprise as of v1.8
#license_path = "/etc/vault.d/vault.hclic"
# Example AWS KMS auto unseal
#seal "awskms" {
# region = "us-east-1"
# kms_key_id = "REPLACE-ME"
#}
# Example HSM auto unseal
#seal "pkcs11" {
# lib = "/usr/vault/lib/libCryptoki2_64.so"
# slot = "0"
# pin = "AAAA-BBBB-CCCC-DDDD"
# key_label = "vault-hsm-key"
# hmac_key_label = "vault-hsm-hmac-key"
#}

View File

@ -0,0 +1,33 @@
[Unit]
Description="HashiCorp Vault - A tool for managing secrets"
Documentation=https://www.vaultproject.io/docs/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/vault.d/vault.hcl
StartLimitIntervalSec=60
StartLimitBurst=3
[Service]
EnvironmentFile=/etc/vault.d/vault.env
User=vault
Group=vault
ProtectSystem=full
ProtectHome=read-only
PrivateTmp=yes
PrivateDevices=yes
SecureBits=keep-caps
AmbientCapabilities=CAP_IPC_LOCK
CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK
NoNewPrivileges=yes
ExecStart=/usr/bin/vault server -config=/etc/vault.d/vault.hcl
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGINT
Restart=on-failure
RestartSec=5
TimeoutStopSec=30
LimitNOFILE=65536
LimitMEMLOCK=infinity
[Install]
WantedBy=multi-user.target

47
.release/linux/postinst Normal file
View File

@ -0,0 +1,47 @@
#!/bin/bash
if [[ -f /opt/vault/tls/tls.crt ]] && [[ -f /opt/vault/tls/tls.key ]]; then
echo "Vault TLS key and certificate already exist. Exiting."
exit 0
fi
echo "Generating Vault TLS key and self-signed certificate..."
# Create TLS and Data directory
mkdir --parents /opt/vault/tls
mkdir --parents /opt/vault/data
# Generate TLS key and certificate
cd /opt/vault/tls
openssl req \
-out tls.crt \
-new \
-keyout tls.key \
-newkey rsa:4096 \
-nodes \
-sha256 \
-x509 \
-subj "/O=HashiCorp/CN=Vault" \
-days 1095 # 3 years
# Update file permissions
chown --recursive vault:vault /etc/vault.d
chown --recursive vault:vault /opt/vault
chmod 600 /opt/vault/tls/tls.crt /opt/vault/tls/tls.key
chmod 700 /opt/vault/tls
echo "Vault TLS key and self-signed certificate have been generated in '/opt/vault/tls'."
# Set IPC_LOCK capabilities on vault
setcap cap_ipc_lock=+ep /usr/bin/vault
if [ -d /run/systemd/system ]; then
systemctl --system daemon-reload >/dev/null || true
fi
if [[ $(vault version) == *+ent* ]]; then
echo "
The following shall apply unless your organization has a separately signed Enterprise License Agreement or Evaluation Agreement governing your use of the software:
Software in this repository is subject to the license terms located in the software, copies of which are also available at https://eula.hashicorp.com/ClickThruELA-Global.pdf or https://www.hashicorp.com/terms-of-evaluation as applicable. Please read the license terms prior to using the software. Your installation and use of the software constitutes your acceptance of these terms. If you do not accept the terms, do not use the software.
"
fi

8
.release/linux/postrm Normal file
View File

@ -0,0 +1,8 @@
#!/bin/bash
if [ "$1" = "purge" ]
then
userdel vault
fi
exit 0

13
.release/linux/preinst Normal file
View File

@ -0,0 +1,13 @@
#!/bin/bash
set -eu
USER="vault"
if ! id -u $USER > /dev/null 2>&1; then
useradd \
--system \
--user-group \
--shell /bin/false \
$USER
fi

58
Dockerfile Normal file
View File

@ -0,0 +1,58 @@
FROM alpine:3.14 as default
ARG BIN_NAME
# NAME and VERSION are the name of the software in releases.hashicorp.com
# and the version to download. Example: NAME=vault VERSION=1.2.3.
ARG NAME=vault
ARG VERSION
# TARGETARCH and TARGETOS are set automatically when --platform is provided.
ARG TARGETOS TARGETARCH
LABEL maintainer="Vault Team <vault@hashicorp.com>"
LABEL version=$VERSION
# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD
ENV NAME=$NAME
ENV VERSION=$VERSION
# Create a non-root user to run the software.
RUN addgroup ${NAME} && adduser -S -G ${NAME} ${NAME}
RUN apk add --no-cache libcap su-exec dumb-init tzdata
COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/
# /vault/logs is made available to use as a location to store audit logs, if
# desired; /vault/file is made available to use as a location with the file
# storage backend, if desired; the server will be started with /vault/config as
# the configuration directory so you can add additional config files in that
# location.
RUN mkdir -p /vault/logs && \
mkdir -p /vault/file && \
mkdir -p /vault/config && \
chown -R ${NAME}:${NAME} /vault
# Expose the logs directory as a volume since there's potentially long-running
# state in there
VOLUME /vault/logs
# Expose the file directory as a volume since there's potentially long-running
# state in there
VOLUME /vault/file
# 8200/tcp is the primary interface that applications use to interact with
# Vault.
EXPOSE 8200
# The entry point script uses dumb-init as the top-level process to reap any
# zombie processes created by Vault sub-processes.
#
# For production derivatives of this container, you shoud add the IPC_LOCK
# capability so that Vault can mlock memory.
COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
# # By default you'll get a single-node development server that stores everything
# # in RAM and bootstraps itself. Don't use this configuration for production.
CMD ["server", "-dev"]

View File

@ -244,12 +244,6 @@ hana-database-plugin:
mongodb-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin
# Tell packagespec where to write its CircleCI config.
PACKAGESPEC_CIRCLECI_CONFIG := .circleci/config/@build-release.yml
# Tell packagespec to re-run 'make ci-config' whenever updating its own CI config.
PACKAGESPEC_HOOK_POST_CI_CONFIG := $(MAKE) ci-config
.PHONY: ci-config
ci-config:
@$(MAKE) -C .circleci ci-config
@ -261,4 +255,13 @@ ci-verify:
.NOTPARALLEL: ember-dist ember-dist-dev
-include packagespec.mk
.PHONY: build
# This is used for release builds by .github/workflows/build.yml
build:
@echo "--> Building Vault $(VAULT_VERSION)"
@go build -v -tags "$(GO_TAGS)" -ldflags " -X $VERSION_PKG_PATH.Version=$(VAULT_VERSION) -X $VERSION_PKG_PATH.GitCommit=$(VAULT_COMMIT)" -o dist/
.PHONY: version
# This is used for release builds by .github/workflows/build.yml
version:
@$(CURDIR)/scripts/version.sh sdk/version/version_base.go

3
changelog/13236.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint
```

View File

@ -1,4 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
* linguist-generated

View File

@ -1,312 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
include $(shell git rev-parse --show-toplevel)/packages*.lock/config.mk
.PHONY: packages commands build package write-builder-cache-keys \
write-all-package-cache-keys build-all
GOOS ?= $(shell go env GOOS 2>/dev/null || echo linux)
GOARCH ?= $(shell go env GOARCH 2>/dev/null || echo amd64)
DEFAULT_PACKAGE_YQ := [ .packages[] | select(.inputs.GOOS=="$(GOOS)" and .inputs.GOARCH=="$(GOARCH)") ][0]
QUERY_DEFAULT_PACKAGESPEC = $(call QUERY_LOCK,$(DEFAULT_PACKAGE_YQ) | $(1))
# MK is shorthand for changing to repo root and selecting a make target
# from a file in this directory. All *.mk files in this directory assume
# the current working directory is the repo root. This Makefile exists
# to invoke those mk files correctly.
MK := $(MAKE) -C $(REPO_ROOT) -f $(LOCKDIR)/
# configure load-builder-cache target to load the most specific builder layer cache
# available as an archive in the build cache.
ifneq ($(PACKAGE_SPEC_ID),)
PACKAGE_CACHE_KEY_FILE := $(shell $(call QUERY_PACKAGESPEC,.meta.builtin.PACKAGE_CACHE_KEY_FILE))
# Loading the best available archive for a specific package build.
BUILD_LAYER_ARCHIVES := $(shell $(call QUERY_PACKAGESPEC,.meta.builtin.BUILD_LAYERS[].archive))
BEST_BUILD_LAYER_ARCHIVE := $(shell cd $(REPO_ROOT) && for F in $(BUILD_LAYER_ARCHIVES); do \
if [ -f $$F ]; then echo $$F; exit 0; fi; done)
ifeq ($(BEST_BUILD_LAYER_ARCHIVE),)
load-builder-cache:
@echo "No build layer archives found in build cache. Looked for: $(BUILD_LAYER_ARCHIVES)"
else
BEST_BUILD_LAYER_NAME := $(shell $(call QUERY_PACKAGESPEC,.meta.builtin.BUILD_LAYERS[] \
| select(.archive=="$(BEST_BUILD_LAYER_ARCHIVE)") | .name))
BEST_BUILD_LAYER_LOAD_TARGET := $(BEST_BUILD_LAYER_NAME)-load
load-builder-cache:
@$(MK)layer.mk $(BEST_BUILD_LAYER_LOAD_TARGET)
endif
else ifneq ($(LAYER_SPEC_ID),)
# Loading the best avilable archive for a specific layer build.
BUILD_LAYER_ARCHIVES := $(shell $(call QUERY_LOCK,.layers[] | select(.name=="$(LAYER_SPEC_ID)") \
| .meta.builtin.LAYER_LIST[].archive))
BEST_BUILD_LAYER_ARCHIVE := $(shell cd $(REPO_ROOT) && for F in $(BUILD_LAYER_ARCHIVES); do \
if [ -f $$F ]; then echo $$F; exit 0; fi; done)
ifeq ($(BEST_BUILD_LAYER_ARCHIVE),)
load-builder-cache:
@echo "No build layer archives found in build cache. Looked for: $(BUILD_LAYER_ARCHIVES)"
else
BEST_BUILD_LAYER_NAME := $(shell $(call QUERY_LOCK,.layers[] | select(.name=="$(LAYER_SPEC_ID)") \
| .meta.builtin.LAYER_LIST[] | select(.archive=="$(BEST_BUILD_LAYER_ARCHIVE)") | .name))
BEST_BUILD_LAYER_LOAD_TARGET := $(BEST_BUILD_LAYER_NAME)-load
load-builder-cache:
@$(MK)layer.mk $(BEST_BUILD_LAYER_LOAD_TARGET)
endif
else
load-builder-cache:
@echo "You must set PACKAGE_SPEC_ID or LAYER_SPEC_ID so we know which caches to look for."
endif
commands:
@$(MAKE) -f packages.mk commands
ifeq ($(DIRTY_FILES),)
DIRTY_SOURCE_WARNING :=
else
DIRTY_SOURCE_WARNING = echo "==> SOURCE TREE IS DIRTY; $(1)"
endif
# build is a convenience target for local builds, do not use in CI.
# Instead, use `make package` specifying PACKAGE_SPEC_ID.
build:
@$(call DIRTY_SOURCE_WARNING,PERFORMING DIRTY BUILD)
@echo "==> Building default package for GOOS=$(GOOS) GOARCH=$(GOARCH)"
@ALIASES=$$($(call QUERY_DEFAULT_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@PACKAGE_SPEC_ID="$$($(call QUERY_DEFAULT_PACKAGESPEC,.packagespecid) | head -n1)"; \
COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# package-contents is a convenience target for local builds, do not use in CI.
package-contents:
@$(call DIRTY_SOURCE_WARNING,GETTING CONTENTS OF DIRTY BUILD)
@echo "==> Getting contents of default package for GOOS=$(GOOS) GOARCH=$(GOARCH)"
@ALIASES=$$($(call QUERY_DEFAULT_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@PACKAGE_SPEC_ID="$$($(call QUERY_DEFAULT_PACKAGESPEC,.packagespecid) | head -n1)"; \
COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package-contents"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# copy-package-contents is a convenience target for local builds, do not use in CI.
copy-package-contents:
@$(call DIRTY_SOURCE_WARNING,COPYING CONTENTS OF DIRTY BUILD)
@echo "==> Getting contents of default package for GOOS=$(GOOS) GOARCH=$(GOARCH)"
@ALIASES=$$($(call QUERY_DEFAULT_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@PACKAGE_SPEC_ID="$$($(call QUERY_DEFAULT_PACKAGESPEC,.packagespecid) | head -n1)"; \
COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk copy-package-contents"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# meta is a convenience target for local builds, do not use in CI.
# Instead, use `make package-meta` specifying PACKAGE_SPEC_ID.
meta:
@$(call DIRTY_SOURCE_WARNING,WRITING METADATA FOR DIRTY BUILD)
@echo "==> Writing metacdata for default package (GOOS=$(GOOS) GOARCH=$(GOARCH))"
@ALIASES=$$($(call QUERY_DEFAULT_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@PACKAGE_SPEC_ID="$$($(call QUERY_DEFAULT_PACKAGESPEC,.packagespecid) | head -n1)"; \
COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package-meta"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# build-all is a convenience target to sequentially build each package.
# It is mostly useful in the tutorial, do not use this in CI as it is much slower
# than building packages in parallel.
build-all:
@PACKAGE_SPEC_IDS="$$($(call QUERY_LOCK,.packages[] | .packagespecid))"; \
COUNT=$$(echo $$PACKAGE_SPEC_IDS | wc -w | xargs); \
echo "==> Building all $$COUNT packages sequentially."; \
for PACKAGE_SPEC_ID in $$PACKAGE_SPEC_IDS; do \
COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"; \
done
# package expects PACKAGE_SPEC_ID to already be set, use this in CI.
package:
@$(call DIRTY_SOURCE_WARNING,BUILDING DIRTY PACKAGE)
@echo "==> Building package spec $(PACKAGE_SPEC_ID)"
@ALIASES=$$($(call QUERY_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# package-meta expects PACKAGE_SPEC_ID to already be set, use this in CI.
package-meta:
@$(call DIRTY_SOURCE_WARNING,WRITING DIRTY METADATA FOR DIRTY PACKAGE)
@echo "==> Writing metadata for package $(PACKAGE_SPEC_ID)"
@ALIASES=$$($(call QUERY_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t); \
echo "$$ALIASES"
@COMMAND="PACKAGE_SOURCE_ID=$$PACKAGE_SOURCE_ID PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID $(MK)build.mk package-meta"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"
# package-meta expects PACKAGE_SPEC_ID to already be set, use this in CI.
package-meta-all:
@$(call DIRTY_SOURCE_WARNING,WRITING DIRTY METADATA FOR DIRTY PACKAGES)
@PACKAGE_SPEC_IDS="$$($(call QUERY_LOCK,.packages[] | .packagespecid))"; \
COUNT=$$(echo $$PACKAGE_SPEC_IDS | wc -w | xargs); \
echo "==> Writing $$COUNT packages' metadata..."; \
for PACKAGE_SPEC_ID in $$PACKAGE_SPEC_IDS; do \
export PACKAGE_SPEC_ID; \
FILE="$(PACKAGE_SOURCE_ID)-$${PACKAGE_SPEC_ID}.zip.meta.json"; \
OUT="$(PACKAGE_STORE)/$$FILE"; \
COMMAND="$(call QUERY_PACKAGESPEC_BY_ID,env.PACKAGE_SPEC_ID,.) > $$OUT"; \
echo "$$COMMAND"; \
$(SHELL) "$$COMMAND"; \
done
# aliases writes all alias symlinks for packages in the package store that
# match the current LOCKFILE and PRODUCT_REVISION. It does not cause a new build.
# If the package store contains no matchin binaries, then this does nothing.
aliases:
@echo "==> Writing alias symlinks for existing packages in the store."; \
cd $(REPO_ROOT); \
PACKAGE_SPEC_IDS="$$($(call QUERY_LOCK,.packages[] | .packagespecid))"; \
for PACKAGE_SPEC_ID in $$PACKAGE_SPEC_IDS; do \
PACKAGE_FILE="$$PACKAGE_SOURCE_ID-$$PACKAGE_SPEC_ID.zip"; \
PACKAGE="$(CACHE_ROOT)/packages/store/$$PACKAGE_FILE"; \
[ -f $$PACKAGE ] || continue; \
ALIASES=$$($(call QUERY_PACKAGESPEC_BY_ID,'$$PACKAGE_SPEC_ID',.aliases[] | "$(CACHE_ROOT)/packages/by-alias/\(.type)/\(.path)")); \
for A in $$ALIASES; do \
mkdir -p $$(dirname $$A); \
$(LN) -rfs $$PACKAGE $$A; \
echo "==> Alias written: $$A -> $$PACKAGE"; \
done; \
done
write-builder-cache-keys:
@echo "==> Writing build layer cache keys"
@$(MK)layer.mk write-cache-keys
write-package-cache-key:
@if [ -z "$(PACKAGE_CACHE_KEY_FILE)" ]; then echo "Must set PACKAGE_SPEC_ID"; exit 1; fi
@$(WRITE_PACKAGE_CACHE_KEY)
@echo "==> Package cache key written: $(PACKAGE_CACHE_KEY_FILE)"
# WRITE_PACKAGE_CACHE_KEY writes the package cache key for PACKAGE_SPEC_ID.
# We reference this as an environment variable, so you can override it in a
# recipe rather than relying on the global setting.
define WRITE_PACKAGE_CACHE_KEY
( \
cd $(REPO_ROOT); \
KEY="PACKAGE_SPEC_ID=$$PACKAGE_SPEC_ID"$$'\n'"PACKAGE_SOURCE_ID=$(PACKAGE_SOURCE_ID)"; \
FILE=$$(yq -r ".packages[] | select(.packagespecid==\"$$PACKAGE_SPEC_ID\") \
| .meta.builtin.PACKAGE_CACHE_KEY_FILE" < $(LOCK)); \
echo "$$FILE"; \
echo "$$KEY"; \
mkdir -p $$(dirname $$FILE); \
echo "$$KEY" > "$$FILE";\
)
endef
write-all-package-cache-keys:
@IDS="$$($(call QUERY_LOCK,.packages[].packagespecid))"; \
for PACKAGE_SPEC_ID in $$IDS; do \
$(WRITE_PACKAGE_CACHE_KEY); \
done; \
echo "==> All package cache keys written"
clean-builder-images:
@IMAGES=$$(docker images --format '{{.Repository}}:{{.Tag}}' | grep '^$(BUILDER_IMAGE_PREFIX)' || true); \
if [ -z "$$IMAGES" ]; then exit 0; fi; \
docker rmi -f $$IMAGES
clean:
@cd $(REPO_ROOT); rm -rf $(CACHE_ROOT)
clean-all: clean clean-builder-images
clean-all-prune: clean-all
docker container prune
docker image prune
RELEASER_DIR := $(REPO_ROOT)/.packagespec/release
# REQUIRE_EXPORT requires a set of make variables to be nonempty,
# exits 1 if any are not, and exports each one otherwise.
# To be used in recipe bodies.
define REQUIRE_EXPORT
$(foreach VAR,$(1),[ -n "$($(VAR))" ] || { echo "Must set $(VAR)"; exit 1; }; export $(VAR)='$($(VAR))';)
endef
# EXPORT exports each named variable, if it exists.
define EXPORT
$(foreach VAR,$(1),export $(VAR)='$($(VAR))';)
endef
# INVOKE_RELEASER_TARGET invokes the named target (first arg) in the releaser
# repository, first calling REQUIRE_EXPORT on all the named variables (second arg).
define INVOKE_RELEASER_TARGET
$(call REQUIRE_EXPORT,\
PRODUCT_REPO_LOCAL PRODUCT_REPO PRODUCT_PATH \
PRODUCT_CIRCLECI_SLUG PRODUCT_CIRCLECI_HOST RELEASE_SYSTEM_BRANCH \
PRODUCT_RELEASE_REPO SPEC LOCKDIR \
) \
( cd $(REPO_ROOT) && packagespec load -asset=PREP_TIME -lockdir "$(LOCKDIR)"; ); \
( cd $(REPO_ROOT) && packagespec load -asset=WORK_DIR -lockdir "$(LOCKDIR)"; ); \
$(MAKE) -C $(RELEASER_DIR) $(1)
endef
# RELEASE_TARGETS are targets in the release repo we pass control to
# to perform release actions.
# Note: The release repo is only available to HashiCorp employees.
RELEASE_TARGETS := build-ci stage-config stage custom-build custom-build-config orchestrator stop-orchestrator bundle
# We always rev-parse the PRODUCT_REVISION to obtain the full SHA. This is required
# for downstream processes which use it to determine part of the package name.
$(RELEASE_TARGETS): PRODUCT_REVISION := $(shell git rev-parse $${PRODUCT_REVISION:-HEAD})
$(RELEASE_TARGETS): PRODUCT_VERSION ?= 0.0.0-$(USER)-snapshot
$(RELEASE_TARGETS): RELEASE_SYSTEM_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
custom-build: PRODUCT_VERSION := $(PRODUCT_VERSION)-$(PRODUCT_REVISION)
bundle: PRODUCT_VERSION := $(shell $(call QUERY_LOCK,.packages[0].inputs.PRODUCT_VERSION))
orchestrator: PRODUCT_VERSION := $(shell $(call QUERY_LOCK,.packages[0].inputs.PRODUCT_VERSION))
stop-orchestrator: PRODUCT_VERSION := $(shell $(call QUERY_LOCK,.packages[0].inputs.PRODUCT_VERSION))
$(RELEASE_TARGETS):
@\
echo $(PRODUCT_VERSION) \
$(call REQUIRE_EXPORT,PRODUCT_REVISION PRODUCT_VERSION) \
$(call INVOKE_RELEASER_TARGET,$@)
# QUERY_TARGETS are targets in the release repo that perform queries, and are therefore
# not necessarily bound to a specific PRODUCT_VERSION or PRODUCT_REVISION.
# We still export PRODUCT_VERSION and PRODUCT_REVISION because they can be used as query
# parameters.
QUERY_TARGETS := list-staged-builds list-promoted-builds list-custom-builds watch-ci
$(QUERY_TARGETS): RELEASE_SYSTEM_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
$(QUERY_TARGETS):
@\
$(call EXPORT,PRODUCT_REVISION PRODUCT_VERSION) \
$(call INVOKE_RELEASER_TARGET,$@)
# BUNDLE_TARGETS are targets acting on specific staged bundles, identified by
# their BUNDLE_ID.
BUNDLE_TARGETS := publish-config publish inspect-staged-build workflow
$(BUNDLE_TARGETS): RELEASE_SYSTEM_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
$(BUNDLE_TARGETS):
@\
$(call REQUIRE_EXPORT,BUNDLE_ID) \
$(call INVOKE_RELEASER_TARGET,$@)

View File

@ -1,138 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
# build.mk builds the packages defined in packages.lock, first building all necessary
# builder images.
#
# NOTE: This file should always run as though it were in the repo root, so all paths
# are relative to the repo root.
# Include config.mk relative to repo root.
include $(shell git rev-parse --show-toplevel)/packages*.lock/config.mk
ifeq ($(PACKAGE_SPEC_ID),)
$(error You must set PACKAGE_SPEC_ID; 'make build' does this for you.)
endif
ifneq ($(PRODUCT_VERSION),)
$(error You cannot set PRODUCT_VERSION for local builds, did you mean PRODUCT_REVISION?)
endif
# Include the layers driver.
include $(LOCKDIR)/layer.mk
# GET_IMAGE_MARKER_FILE gets the name of the Docker image marker file
# for the named build layer.
GET_IMAGE_MARKER_FILE = $($(1)_IMAGE)
# GET_IMAGE_NAME gets the Docker image name of the build layer.
GET_IMAGE_NAME = $($(1)_IMAGE_NAME)
# Determine the top-level build layer.
BUILD_LAYER_NAME := $(shell $(call QUERY_PACKAGESPEC,.meta.builtin.BUILD_LAYERS[0].name))
BUILD_LAYER_IMAGE = $(call GET_IMAGE_MARKER_FILE,$(BUILD_LAYER_NAME))
BUILD_LAYER_IMAGE_NAME = $(call GET_IMAGE_NAME,$(BUILD_LAYER_NAME))
BUILD_COMMAND := $(shell $(call QUERY_PACKAGESPEC,.["build-command"]))
BUILD_ENV := $(shell $(call QUERY_PACKAGESPEC,.inputs | to_entries[] | "\(.key)=\(.value)"))
ALIASES := $(shell $(call QUERY_PACKAGESPEC,.aliases[] | "\(.type)/\(.path)"))
ALIASES := $(addprefix $(BY_ALIAS)/,$(ALIASES))
ifeq ($(BUILD_COMMAND),)
$(error Unable to find build command for package spec ID $(PACKAGE_SPEC_ID))
endif
ifeq ($(BUILD_ENV),)
$(error Unable to find build inputs for package spec ID $(PACKAGE_SPEC_ID))
endif
# Configure paths and filenames.
OUTPUT_DIR := $(PACKAGE_STORE)
_ := $(shell mkdir -p $(OUTPUT_DIR))
# PACKAGE_NAME is the input-addressed name of the package.
PACKAGE_NAME := $(PACKAGE_SOURCE_ID)-$(PACKAGE_SPEC_ID)
PACKAGE_ZIP_NAME := $(PACKAGE_NAME).zip
PACKAGE := $(OUTPUT_DIR)/$(PACKAGE_ZIP_NAME)
# PACKAGE_CONTENTS is used when a built package needs to be unzipped to examine
# its contents. It is a path to a directory where these contents will be unzipped
# to. This is not needed to produce builds, but is useful for post-build tasks
# when the package contents need to be checked.
PACKAGE_CONTENTS := $(PACKAGE)_contents
META_JSON_NAME := $(PACKAGE_ZIP_NAME).meta.json
META := $(OUTPUT_DIR)/$(META_JSON_NAME)
# In the container, place the output dir at root. This makes 'docker cp' easier.
CONTAINER_OUTPUT_DIR := /$(OUTPUT_DIR)
FULL_BUILD_COMMAND := export $(BUILD_ENV) && mkdir -p $(CONTAINER_OUTPUT_DIR) && $(BUILD_COMMAND)
### Docker run command configuration.
DOCKER_SHELL := /bin/bash -euo pipefail -c
DOCKER_RUN_ENV_FLAGS := \
-e PACKAGE_SOURCE_ID=$(PACKAGE_SOURCE_ID) \
-e OUTPUT_DIR=$(CONTAINER_OUTPUT_DIR) \
-e PACKAGE_ZIP_NAME=$(PACKAGE_ZIP_NAME)
BUILD_CONTAINER_NAME := build-$(PACKAGE_SPEC_ID)-$(PACKAGE_SOURCE_ID)
DOCKER_RUN_FLAGS := $(DOCKER_RUN_ENV_FLAGS) --name $(BUILD_CONTAINER_NAME)
# DOCKER_RUN_COMMAND ties everything together to build the final package as a
# single docker run invocation.
DOCKER_RUN_COMMAND = docker run $(DOCKER_RUN_FLAGS) $(BUILD_LAYER_IMAGE_NAME) $(DOCKER_SHELL) '$(FULL_BUILD_COMMAND)'
# DOCKER_CP_COMMAND copies the built artefact from the build container.
DOCKER_CP_COMMAND = docker cp $(BUILD_CONTAINER_NAME):$(CONTAINER_OUTPUT_DIR)/$(PACKAGE_ZIP_NAME) $(PACKAGE)
# package builds the package according to the set PACKAGE_SPEC_ID and PRODUCT_REVISION.
.PHONY: package
package: $(ALIASES)
@echo $(PACKAGE)
# package-contents builds the package according to PACKAGE_SPEC_ID and PRODUCT_REVISION,
# and then extracts the zip file into an adjacent directory.
.PHONY: package-contents
package-contents: $(PACKAGE_CONTENTS)
@echo "$(PACKAGE_CONTENTS)/"
# copy-package-contents allows copying the contents of a package to a specific
# directory. You must set PACKAGE_CONTENTS_DEST_DIR to this directory.
# This is useful for implementing a top-level make target that places your
# build artifacts in a well-known location.
.PHONY: copy-package-contents
copy-package-contents: $(PACKAGE_CONTENTS)
@[ -n "$(PACKAGE_CONTENTS_DEST_DIR)" ] || { \
echo "==> ERROR: Must set PACKAGE_CONTENTS_DEST_DIR"; \
exit 1; \
}; \
mkdir -p "$(PACKAGE_CONTENTS_DEST_DIR)"; \
cp -r "$(PACKAGE_CONTENTS)"/* "$(PACKAGE_CONTENTS_DEST_DIR)"
.PHONY: package-meta
package-meta: $(META)
@echo $(META)
$(META): $(LOCK)
@$(call QUERY_PACKAGESPEC,.) > $@
# PACKAGE builds the package.
$(PACKAGE): $(BUILD_LAYER_IMAGE)
@mkdir -p $$(dirname $@)
@echo "==> Building package: $@"
@echo "PACKAGE_SOURCE_ID: $(PACKAGE_SOURCE_ID)"
@echo "PACKAGE_SPEC_ID: $(PACKAGE_SPEC_ID)"
@# Print alias info.
@$(call QUERY_PACKAGESPEC,.aliases[] | "alias type:\(.type) path:\(.path)") | column -t
@docker rm -f $(BUILD_CONTAINER_NAME) > /dev/null 2>&1 || true # Speculative cleanup.
$(DOCKER_RUN_COMMAND)
$(DOCKER_CP_COMMAND)
@docker rm -f $(BUILD_CONTAINER_NAME)
$(PACKAGE_CONTENTS): $(PACKAGE)
@mkdir -p "$@" && unzip "$<" -d "$@"
# ALIASES writes the package alias links.
# ALIASES must be phony to ensure they are updated to point to the
# latest builds.
.PHONY: $(ALIASES)
$(ALIASES): $(PACKAGE)
@mkdir -p $(dir $@)
@$(LN) -rfs $(PACKAGE) $@
@echo "==> Package alias written: $@"

View File

@ -1,253 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
# config.mk contains constants and derived configuration that applies to
# building both layers and final packages.
# Only include the config once. This means we can include it in the header
# of each makefile, to allow calling them individually and when they call
# each other.
ifneq ($(CONFIG_INCLUDED),YES)
CONFIG_INCLUDED := YES
# Set SHELL to strict mode, in a way compatible with both old and new GNU make.
SHELL := /usr/bin/env bash -euo pipefail -c
REPO_ROOT := $(shell git rev-parse --show-toplevel)
# Set AUTO_INSTALL_TOOLS to YES in CI to have any missing required tools installed
# automatically.
AUTO_INSTALL_TOOLS ?= NO
define ENSURE_GITIGNORE_ALL
_ := $(shell cd "$(REPO_ROOT)" && [ -f "$(1)/.gitignore" ] || { mkdir -p "$(1)"; echo '*' > "$(1)/.gitignore"; })
endef
# CACHE_ROOT is the build cache directory.
CACHE_ROOT ?= .buildcache
_ := $(call ENSURE_GITIGNORE_ALL,$(CACHE_ROOT))
# PACKAGES_ROOT holds the package store, as well as other package aliases.
PACKAGES_ROOT := $(CACHE_ROOT)/packages
_ := $(call ENSURE_GITIGNORE_ALL,$(PACKAGES_ROOT))
# PACKAGE_STORE is where we store all the package files themselves
# addressed by their input hashes.
PACKAGE_STORE := $(PACKAGES_ROOT)/store
_ := $(call ENSURE_GITIGNORE_ALL,$(PACKAGE_STORE))
# BY_ALIAS is where we store alias symlinks to the store.
BY_ALIAS := $(PACKAGES_ROOT)/by-alias
_ := $(call ENSURE_GITIGNORE_ALL,$(BY_ALIAS))
# SPEC is the human-managed description of which packages we are able to build.
SPEC_FILE_PATTERN := packages*.yml
SPEC := $(shell cd $(REPO_ROOT); find . -mindepth 1 -maxdepth 1 -name '$(SPEC_FILE_PATTERN)')
ifneq ($(words $(SPEC)),1)
$(error Found $(words $(SPEC)) $(SPEC_FILE_PATTERN) files, need exactly 1: $(SPEC))
endif
SPEC_FILENAME := $(notdir $(SPEC))
SPEC_MODIFIER := $(SPEC_FILENAME:packages%.yml=%)
# LOCKDIR contains the lockfile and layer files.
LOCKDIR := packages$(SPEC_MODIFIER).lock
# BUILDER_IMAGE_PREFIX is used in generating layers' docker image names.
BUILDER_IMAGE_PREFIX := build-layer
# LOCK is the generated fully-expanded rendition of SPEC, for use in generating CI
# pipelines and other things.
LOCK := $(LOCKDIR)/pkgs.yml
### Utilities and constants
GIT_EXCLUDE_PREFIX := :(exclude)
# SUM generates the sha1sum of its input.
SUM := sha1sum | cut -d' ' -f1
# QUOTE_LIST wraps a list of space-separated strings in quotes.
QUOTE := $(shell echo "'")
QUOTE_LIST = $(addprefix $(QUOTE),$(addsuffix $(QUOTE),$(1)))
GIT_EXCLUDE_LIST = $(call QUOTE_LIST,$(addprefix $(GIT_EXCLUDE_PREFIX),$(1)))
### End utilities and constants.
# ALWAYS_EXCLUDE_SOURCE prevents source from these directories from taking
# part in the SOURCE_ID, or from being sent to the builder image layers.
# This is important for allowing the head of master to build other commits
# where this build system has not been vendored.
#
# Source in LOCKDIR is encoded as PACKAGE_SPEC_ID and included in paths
# and cache keys. Source in .circleci/ should not do much more than call
# code in the release/ directory, SPEC is the source of LOCKDIR.
ALWAYS_EXCLUDE_SOURCE := $(SPEC) $(LOCKDIR)/ ./packagespec.mk ./.circleci/
# ALWAYS_EXCLUD_SOURCE_GIT is git path filter parlance for the above.
ALWAYS_EXCLUDE_SOURCE_GIT := $(call GIT_EXCLUDE_LIST,$(ALWAYS_EXCLUDE_SOURCE))
YQ_PACKAGE_BY_ID = .packages[] | select(.packagespecid == "$(1)")
# YQ_PACKAGE_PATH is a yq query fragment to select the package PACKAGE_SPEC_ID.
# This may be invalid, check that PACKAGE_SPEC_ID is not empty before use.
YQ_PACKAGE_PATH := $(call YQ_PACKAGE_BY_ID,$(PACKAGE_SPEC_ID))
YQ_PACKAGE_PATH_BY_ID = $(call YQ_PACKAGE_BY_ID,$(1))
# QUERY_LOCK is a macro to query the lock file.
QUERY_LOCK = cd $(REPO_ROOT); yq -r '$(1)' < $(LOCK)
QUERY_SPEC = cd $(REPO_ROOT); yq -r '$(1)' < $(SPEC)
# QUERY_PACKAGESPEC queries the package according to the current PACKAGE_SPEC_ID.
QUERY_PACKAGESPEC = $(call QUERY_LOCK,$(YQ_PACKAGE_PATH) | $(1))
QUERY_PACKAGESPEC_BY_ID = $(call QUERY_LOCK,$(call YQ_PACKAGE_PATH_BY_ID,$(1)) | $(2))
# GIT_COMMIT_OR_TAG_REF returns the git commit or tag ref SHA that the passed
# commit-ish points to (that can be a commit, tag or branch ref).
#
# Note we used to suffix the passed commit-ish with '^{}' in order to traverse tags down
# to individual commits, in case the commit-ish is an annotated tag. However this
# makes build output confusing in case a tag ref is used rather than a commit ref.
# Therefore we now allow building tag refs, even though this means sometimes we might
# be building the same source with two different source IDs, and potentially wasting
# some potential cache hits. The tradeoff in terms of ease of use seems worth it for
# now, but this could be revisited later.
# The original of the line below was:
define GIT_COMMIT_OR_TAG_REF
git rev-parse --verify '$(1)'
endef
ifeq ($(PACKAGE_SOURCE_ID),)
# Even though layers may have different Git revisions, based on the latest
# revision of their source, we always want to
# honour either HEAD or the specified PRODUCT_REVISION for compiling the
# final binaries, as this revision is the one picked by a human to form
# the release, and may be baked into the binaries produced.
ifeq ($(PRODUCT_REVISION),)
# If PRODUCT_REVISION is empty (the default) we are concerned with building the
# current work tree, regardless of whether it is dirty or not. For local builds
# this is more convenient and more likely expected behaviour than having to commit
# just to perform a new build.
#
# Determine the PACKAGE_SOURCE_ID.
#
# Dirty package builds should never be cached because their PACKAGE_SOURCE_ID
# is not unique to the code, it just reflects the last commit ID in the git log
# prefixed with dirty_<dirty_files_sha>.
GIT_REF := HEAD
ALLOW_DIRTY ?= YES
PRODUCT_REVISION_NICE_NAME := <current-workdir>
DIRTY_FILES := $(shell cd $(REPO_ROOT) && git ls-files -o -m --exclude-standard -- $(ALWAYS_EXCLUDE_SOURCE_GIT) | xargs)
ifneq ($(DIRTY_FILES),)
DIRTY := dirty_$(shell cd $(REPO_ROOT) && cat $(DIRTY_FILES) | $(SUM) || echo FAIL)_
ifeq ($(findstring FAIL_,$(DIRTY)),FAIL_)
$(error Failed to determine dirty files sha1sum)
endif
endif
PACKAGE_SOURCE_ID := $(DIRTY)$(shell $(call GIT_COMMIT_OR_TAG_REF,$(GIT_REF)))
else
# PRODUCT_REVISION is non-empty so treat it as a git commit ref and pull files
# directly from git rather than the work tree.
GIT_REF := $(PRODUCT_REVISION)
ALLOW_DIRTY := NO
PRODUCT_REVISION_NICE_NAME := $(PRODUCT_REVISION)
PACKAGE_SOURCE_ID := $(shell if COMMIT=$$($(call GIT_COMMIT_OR_TAG_REF,$(PRODUCT_REVISION))); then echo $$COMMIT; else echo FAILED; fi)
ifeq ($(PACKAGE_SOURCE_ID),FAILED)
$(error Unable to find git ref "$(PRODUCT_REVISION)", do you need to 'git fetch' it?)
endif
endif
endif
export PRODUCT_REVISION GIT_REF ALLOW_DIRTY PACKAGE_SOURCE_ID
# REQ_TOOLS detects availability of a set of tools, and optionally auto-installs them.
define REQ_TOOLS
GROUP_NAME := $(1)
INSTALL_TOOL := $(2)
INSTALL_COMMAND := $(3)
TOOLS := $(4)
TOOL_INSTALL_LOG := $(REPO_ROOT)/$(CACHE_ROOT)/tool-install-$$(GROUP_NAME).log
_ := $$(shell mkdir -p $$(dir $$(TOOL_INSTALL_LOG)))
INSTALL_TOOL_AVAILABLE := $$(shell command -v $$(INSTALL_TOOL) > /dev/null 2>&1 && echo YES)
ATTEMPT_AUTO_INSTALL := NO
ifeq ($$(INSTALL_TOOL_AVAILABLE),YES)
ifeq ($$(AUTO_INSTALL_TOOLS),YES)
ATTEMPT_AUTO_INSTALL := YES
endif
endif
MISSING_PACKAGES := $$(shell \
for T in $$(TOOLS); do \
BIN=$$$$(echo $$$$T | cut -d':' -f1); \
if ! command -v $$$$BIN > /dev/null 2>&1; then \
echo $$$$T | cut -d':' -f2; \
fi; \
done | sort | uniq)
ifneq ($$(MISSING_PACKAGES),)
ifneq ($$(ATTEMPT_AUTO_INSTALL),YES)
$$(error You are missing required tools, please run '$$(INSTALL_COMMAND) $$(MISSING_PACKAGES)'.)
else
RESULT := $$(shell $$(INSTALL_COMMAND) $$(MISSING_PACKAGES) && echo OK > $$(TOOL_INSTALL_LOG))
ifneq ($$(shell cat $$(TOOL_INSTALL_LOG)),OK)
$$(info Failed to auto-install packages with command $$(INSTALL_COMMAND) $$(MISSING_PACKAGES))
$$(error $$(shell cat $$(TOOL_INSTALL_LOG)))
else
$$(info $$(TOOL_INSTALL_LOG))
$$(info Installed $$(GROUP_NAME) tools successfully.)
endif
endif
endif
endef
ifeq ($(shell uname),Darwin)
# On Mac, try to install things with homebrew.
BREW_TOOLS := gln:coreutils gtouch:coreutils gstat:coreutils \
gtar:gnu-tar gfind:findutils jq:jq yq:python-yq
$(eval $(call REQ_TOOLS,brew,brew,brew install,$(BREW_TOOLS)))
else
# If not mac, try to install using apt.
SUDO := $(shell which sudo 2>/dev/null || true)
APT_TOOLS := pip3:python3-pip jq:jq column:bsdmainutils
$(eval $(call REQ_TOOLS,apt,apt-get,$(SUDO) apt-get update && $(SUDO) apt-get install -y,$(APT_TOOLS)))
PIP_TOOLS := yq:yq
$(eval $(call REQ_TOOLS,pip,pip3,pip3 install,$(PIP_TOOLS)))
endif
# We rely on GNU touch, tar and ln.
# On macOS, we assume they are installed as gtouch, gtar, gln by homebrew.
ifeq ($(shell uname),Darwin)
TOUCH := gtouch
TAR := gtar
LN := gln
STAT := gstat
FIND := gfind
else
TOUCH := touch
TAR := tar
LN := ln
STAT := stat
FIND := find
endif
# Read config from the spec.
# PRODUCT_REPO is the official Git repo for this project.
PRODUCT_REPO := $(shell $(call QUERY_SPEC,.config["product-repo"]))
# PRODUCT_REPO_LOCAL is the local clone of this git repo.
PRODUCT_REPO_LOCAL := $(REPO_ROOT)
# RELEASE_REPO is the release repository for this project.
PRODUCT_RELEASE_REPO := $(shell $(call QUERY_SPEC,.config["release-repo"]))
# PRODUCT_PATH must be unique for every repo.
# A golang-style package path is ideal.
PRODUCT_PATH := $(shell $(call QUERY_SPEC,.config["product-id"]))
# PRODUCT_CIRCLECI_SLUG is the slug of this repo's CircleCI project.
PRODUCT_CIRCLECI_SLUG := $(shell $(call QUERY_SPEC,.config["circleci-project-slug"]))
# PRODUCT_CIRCLECI_HOST is the host configured to build this repo.
PRODUCT_CIRCLECI_HOST := $(shell $(call QUERY_SPEC,.config["circleci-host"]))
export ON_PUBLISH := $(shell $(call QUERY_SPEC,.config["on-publish"]))
# End including config once only.
endif

View File

@ -1,361 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
# layer.mk contains the machinery to incrementally build the builder image
# as separate layers, so each can be cached both locally and in CI. This serves
# both to speed up builds by avoiding unnecessary repetition of work already done,
# as well as to ehnance the reliability of builds by downloading external
# dependencies only once per build when necessary.
#
# The build layers themselves can be individually exported as tarballs (by calling
# make <layer-name>-save) for later inspection, for sharing, or for implementing
# on-host caching without recourse to external docker registries.
#
# To use this file, include it in another makefile, and from there you must eval
# calls to the LAYER macro with this syntax:
#
# $(eval $(call LAYER,<name>,<type>,<parent-name>,<source-include>,<source-exclude>))
#
# Each layer assumes the existence of a Dockerfile named <name>.Dockerfile in
# packages.lock/layers.
# It uses the <parent-name> to set a Docker build arg called BASE_IMAGE to the
# resultant docker image ref of the named parent layer. You should use this BASE_IMAGE
# in the FROM line in your image.
#
# There must also be a base image which has no parent, and that Dockerfile should
# use a FROM line from an explicit docker image, e.g. debian:buster.
#
# Each image is provided only the source code identified by <source-include>, minus
# any source code matched by <source-exclude>. Source code is any files which are
# present and not ignored by Git. This includes cached files, modified files and new,
# untracked files. The Dockerfile belonging to this layer is ALWAYS included in the
# source, so you don't need to manually specify that.
#
# The set of source code identified by a single image layer is used to produce its
# SOURCE_ID. The SOURCE_ID, when all the files are tracked by Git and are not modified
# equals the latest Git commit SHA that affected any of those files or directories.
# When there are any new or modified files, we take a SHA 256 sum of the latest Git
# commit affecting those files concatenated with the output of git diff and the contents
# of any untracked files, and prefix this with "dirty_". The SOURCE_ID is used as part of
# the cache key for that layer.
#
# Because different combinations of source-include and source-exclude may have been
# modified by the same commit, they may share the same source ID. Therefore, we also
# calculate the LAYER_ID which takes into account not only the current layer's source
# ID, but also its source include/exclude and the ID of its base layer. Thus any change
# in any of the inputs of any base layer invalidates the cache of all subsequent layers.
include $(shell git rev-parse --show-toplevel)/packages*.lock/config.mk
.SECONDARY:
_ := $(shell mkdir -p $(CACHE_ROOT)/source-archives)
### END BUILDER IMAGE LAYERS
## LAYER
# The LAYER macro defines all the targets for each image defined above.
#
# The phony targets are the ones we typically run ourselves or in CI, they are:
#
# <name>-debug : dump debug info for this image layer
# <name>-image : build the image for this image layer
# <name>-save : save the docker image for this layer as a tar.gz
# <name>-load : load this image from the saved tar.gz
define LAYER
LAYERS += $(1)
$(1)_NAME := $(1)
$(1)_TYPE := $(2)
$(1)_BASE := $(3)
$(1)_SOURCE_INCLUDE := $(4)
$(1)_SOURCE_EXCLUDE := $(sort $(5) $(ALWAYS_EXCLUDE_SOURCE))
$(1)_CACHE_KEY_FILE := $(REPO_ROOT)/$(6)
$(1)_IMAGE_ARCHIVE := $(REPO_ROOT)/$(7)
$(1)_CACHE_ROOT := $(CACHE_ROOT)/layers/$$($(1)_NAME)
ifneq ($$($(1)_BASE),)
$(1)_BASE_CACHE_ROOT := $(CACHE_ROOT)/layers/$$($(1)_BASE)
$(1)_BASE_ID_FILE := $$($(1)_BASE_CACHE_ROOT)/current-layer-id
$(1)_BASE_LAYER_ID := $$(shell cat $$($(1)_BASE_ID_FILE))
$(1)_BASE_CACHE := $$($(1)_BASE_CACHE_ROOT)/$$($(1)_BASE_LAYER_ID)
$(1)_BASE_IMAGE := $$($(1)_BASE_CACHE)/image.marker
$(1)_BASE_IMAGE_NAME = $$(shell cat $$($(1)_BASE_IMAGE))
endif
# If no source is included, set source ID to none.
# Note that we include the checksum of the generated Dockerfile as part of cache IDs
# so we still invalidate the cache appropriately.
ifeq ($$($(1)_SOURCE_INCLUDE),)
$(1)_SOURCE_CMD := echo ""
$(1)_SOURCE_ID := packagespec-only-$$($(1)_NAME)
$(1)_SOURCE_ID_NICE_NAME := <packagespec-only>
else
$(1)_SOURCE_GIT = $$(call QUOTE_LIST,$$($(1)_SOURCE_INCLUDE)) $$(call GIT_EXCLUDE_LIST,$$($(1)_SOURCE_EXCLUDE))
$(1)_SOURCE_COMMIT := $$(shell git rev-list -n1 $(GIT_REF) -- $$($(1)_SOURCE_GIT))
# If we allow dirty builds, generate the source ID as a function of the
# source in in the current work tree. Where the source all happens to match a Git commit,
# that commit's SHA will be the source ID.
ifeq ($(ALLOW_DIRTY),YES)
$(1)_SOURCE_CMD := { { \
git ls-files -- $$($(1)_SOURCE_GIT); \
git ls-files -m --exclude-standard -- $$($(1)_SOURCE_GIT); \
} | sort | uniq; }
$(1)_SOURCE_MODIFIED := $$(shell git ls-files -m -- $$($(1)_SOURCE_GIT) | xargs)
$(1)_SOURCE_NEW := $$(shell git ls-files -o --exclude-standard -- $$($(1)_SOURCE_GIT) | xargs)
$(1)_SOURCE_DIRTY_LIST := $$(shell echo "$$($(1)_SOURCE_MODIFIED) $$($(1)_SOURCE_NEW)" | xargs)
$(1)_SOURCE_DIRTY_SUM := $$(shell [ -z "$$($(1)_SOURCE_DIRTY_LIST)" ] || cat $$($(1)_SOURCE_DIRTY_LIST) | $(SUM))
$(1)_SOURCE_ID := $$(shell if [ -z "$$($(1)_SOURCE_DIRTY_LIST)" ]; then \
echo "$$($(1)_SOURCE_COMMIT)"; \
else \
echo -n "dirty_$$($(1)_SOURCE_DIRTY_SUM)"; \
fi)
$(1)_ID_PREFIX := $$(shell [ -z "$$($(1)_SOURCE_DIRTY_LIST)" ] || echo "dirty_")
$(1)_SOURCE_ID_NICE_NAME := $$($(1)_SOURCE_ID)
# No dirty builds allowed, so the SOURCE_ID is the git commit SHA,
# and we list files using git ls-tree.
else
$(1)_SOURCE_ID := $$($(1)_SOURCE_COMMIT)
$(1)_SOURCE_ID_NICE_NAME := $$($(1)_SOURCE_ID)
$(1)_SOURCE_CMD := git ls-tree -r --name-only $(GIT_REF) -- $$($(1)_SOURCE_GIT)
endif
endif
# LAYER_ID_CONTENTS dictates all the fields that can cause cache invalidation
# to propagate from the current layer to all dependent layers.
define $(1)_LAYER_ID_CONTENTS
BASE_LAYER_ID=$$($(1)_BASE_LAYER_ID);
LAYER_NAME=$$($(1)_NAME);
SOURCE_ID=$$($(1)_SOURCE_ID);
SOURCE_INCLUDE=$$($(1)_SOURCE_INCLUDE);
SOURCE_EXCLUDE=$$($(1)_SOURCE_EXCLUDE);
endef
$(1)_LAYER_ID_CONTENTS_FILE := $$($(1)_CACHE_ROOT)/current-layer-id-contents
$(1)_LAYER_ID_FILE := $$($(1)_CACHE_ROOT)/current-layer-id
$(1)_DOCKERFILE := $$($(1)_CACHE_ROOT)/Dockerfile
# Create cache root dir and write LAYER_ID_FILE_CONTENTS file.
_ := $$(shell \
mkdir -p $$($(1)_CACHE_ROOT); \
echo "$$($(1)_LAYER_ID_CONTENTS)" > $$($(1)_LAYER_ID_CONTENTS_FILE); \
)
$(1)_LAYER_ID := $$($(1)_ID_PREFIX)$$(shell cat $$($(1)_LAYER_ID_CONTENTS_FILE) | $(SUM))
$(1)_SOURCE_ARCHIVE := $(CACHE_ROOT)/source-archives/$$($(1)_TYPE)-$$($(1)_LAYER_ID).tar
$(1)_IMAGE_NAME := $(BUILDER_IMAGE_PREFIX)-$$($(1)_NAME):$$($(1)_LAYER_ID)
$(1)_CACHE := $(CACHE_ROOT)/layers/$$($(1)_NAME)/$$($(1)_LAYER_ID)
ifeq ($(DEBUG),YES)
$$(info ===== LAYER DEBUG INFO ($(1)) )
$$(info SOURCE_GIT=$$($(1)_SOURCE_GIT))
$$(info SOURCE_COMMIT=$$($(1)_SOURCE_COMMIT))
$$(info SOURCE_MODIFIED=$$($(1)_SOURCE_MODIFIED))
$$(info SOURCE_NEW=$$($(1)_SOURCE_NEW))
$$(info SOURCE_DIRTY_LIST=$$($(1)_SOURCE_DIRTY_LIST))
$$(info SOURCE_DIRTY_SUM=$$($(1)_SOURCE_DIRTY_SUM))
$$(info SOURCE_ID=$$($(1)_SOURCE_ID))
$$(info LAYER_ID=$$($(1)_LAYER_ID))
$$(info SOURCE_LIST=$$(shell $$($(1)_SOURCE_CMD)))
$$(info =====)
endif
# Create cache dir and write Layer ID file.
_ := $$(shell \
mkdir -p $$($(1)_CACHE); \
echo $$($(1)_LAYER_ID) > $$($(1)_LAYER_ID_FILE); \
)
$(1)_PHONY_TARGET_NAMES := debug id image save load
$(1)_PHONY_TARGETS := $$(addprefix $$($(1)_NAME)-,$$($(1)_PHONY_TARGET_NAMES))
.PHONY: $$($(1)_PHONY_TARGETS)
# File targets.
$(1)_IMAGE := $$($(1)_CACHE)/image.marker
$(1)_LAYER_REFS := $$($(1)_CACHE)/image.layer_refs
$(1)_IMAGE_TIMESTAMP := $$($(1)_CACHE)/image.created_time
$(1)_TARGETS = $$($(1)_PHONY_TARGETS)
# UPDATE_MARKER_FILE ensures the image marker file has the same timestamp as the
# docker image creation date it represents. This enables make to only rebuild it when
# it has really changed, especially after loading the image from an archive.
# It also writes a list of all the layers in this docker image's history, for use
# when saving layers out to archives for use in pre-populating Docker build caches.
define $(1)_UPDATE_MARKER_FILE
export MARKER=$$($(1)_IMAGE); \
export LAYER_REFS=$$($(1)_LAYER_REFS); \
export IMAGE=$$($(1)_IMAGE_NAME); \
export IMAGE_CREATED; \
if ! { IMAGE_CREATED="$$$$(docker inspect -f '{{.Created}}' $$$$IMAGE 2>/dev/null)"; }; then \
if [ -f "$$$$MARKER" ]; then \
echo "==> Removing stale marker file for $$$$IMAGE" 1>&2; \
rm -f $$$$MARKER; \
fi; \
exit 0; \
fi; \
if [ ! -f "$$$$MARKER" ]; then \
echo "==> Writing marker file for $$$$IMAGE (created $$$$IMAGE_CREATED)" 1>&2; \
fi; \
echo $$$$IMAGE > $$$$MARKER; \
$(TOUCH) -m -d $$$$IMAGE_CREATED $$$$MARKER; \
echo "$$$$IMAGE" > $$$$LAYER_REFS; \
docker history --no-trunc -q $$$$IMAGE | grep -Fv '<missing>' >> $$$$LAYER_REFS;
endef
## PHONY targets
$(1)-debug:
@echo "==> Debug info: $$($(1)_NAME) depends on $$($(1)_BASE)"
@echo "$(1)_TARGETS = $$($(1)_TARGETS)"
@echo "$(1)_SOURCE_CMD = $$($(1)_SOURCE_CMD)"
@echo "$(1)_CACHE = $$($(1)_CACHE)"
@echo "$(1)_DOCKERFILE = $$($(1)_DOCKERFILE)"
@echo "$(1)_SOURCE_COMMIT = $$($(1)_SOURCE_COMMIT)"
@echo "$(1)_SOURCE_ID = $$($(1)_SOURCE_ID)"
@echo "$(1)_SOURCE_MODIFIED = $$($(1)_SOURCE_MODIFIED)"
@echo "$(1)_SOURCE_NEW = $$($(1)_SOURCE_NEW)"
@echo "$(1)_IMAGE = $$($(1)_IMAGE)"
@echo "$(1)_IMAGE_TIMESTAMP = $$($(1)_IMAGE_TIMESTAMP)"
@echo "$(1)_IMAGE_ARCHIVE = $$($(1)_IMAGE_ARCHIVE)"
@echo "$(1)_BASE_IMAGE = $$($(1)_BASE_IMAGE)"
@echo
$(1)-id:
@echo $(1)-$$($(1)_SOURCE_ID)
$(1)-write-cache-key:
@mkdir -p $$(dir $$($(1)_CACHE_KEY_FILE)); \
cp $$($(1)_LAYER_ID_CONTENTS_FILE) $$($(1)_CACHE_KEY_FILE); \
echo "==> Cache key for $(1) written to $$($(1)_CACHE_KEY_FILE)"; \
cat $$($(1)_CACHE_KEY_FILE)
$(1)-image: $$($(1)_IMAGE)
@cat $$<
$(1)-layer-refs: $$($(1)_LAYER_REFS)
@echo $$<
$(1)-save: $$($(1)_IMAGE_ARCHIVE)
@echo $$<
$(1)-load:
@\
ARCHIVE=$$($(1)_IMAGE_ARCHIVE); \
IMAGE=$$($(1)_IMAGE_NAME); \
MARKER=$$($(1)_IMAGE); \
rm -f $$$$MARKER; \
echo "==> Loading $$$$IMAGE image from $$$$ARCHIVE"; \
docker load < $$$$ARCHIVE
@$$(call $(1)_UPDATE_MARKER_FILE)
## END PHONY targets
# Set the BASE_IMAGE build arg to reference the appropriate base image,
# unless there is no referenced base image.
$(1)_DOCKER_BUILD_ARGS = $$(shell [ -z "$$($(1)_BASE)" ] || echo --build-arg BASE_IMAGE=$$$$(cat $$($(1)_BASE_IMAGE)))
$(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE := $$($(1)_CACHE)/source-archive.tar
$$($(1)_DOCKERFILE):
@mkdir -p "$$(dir $$(@))"
@$$(call QUERY_LOCK,.layers[] | select(.name=="$$($(1)_NAME)").dockerfile) > "$$@"
# Build the docker image.
#
# For dirty builds, tar up a source archive from the local filesystem.
# We --ignore-failed-read so that deleted files that are not
# committed do not cause problems. This should be OK for dirty builds.
#
# For non-dirty builds, ask Git directly for a source archive.
#
# We explicitly set the TAR format to ustar because this seems more compatible
# with Docker than any other format. In future we should change this to POSIX
# once Docker supports that properly, because ustar only supports filenames
# < 256 chars which could eventually be an issue.
TAR_FORMAT := --format=ustar
export DOCKER_BUILDKIT=1
$(1)_FULL_DOCKER_BUILD_COMMAND = docker build --ssh=default -t $$($(1)_IMAGE_NAME) $$($(1)_DOCKER_BUILD_ARGS) \
-f $$($(1)_DOCKERFILE) - < $$($(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE)
$$($(1)_IMAGE): $$($(1)_BASE_IMAGE) $$($(1)_DOCKERFILE)
@$$(call $(1)_UPDATE_MARKER_FILE)
@if [ -f "$$@" ]; then exit 0; fi; \
echo "==> Building Docker image $$($(1)_IMAGE_NAME)"; \
echo " Layer name : $$($(1)_NAME)"; \
echo " Layer source ID : $$($(1)_SOURCE_ID_NICE_NAME)"; \
echo " For product revision : $(PRODUCT_REVISION_NICE_NAME)"; \
echo " For package source ID : $(PACKAGE_SOURCE_ID)"; \
if [ ! -f "$$($(1)_SOURCE_ARCHIVE)" ]; then \
if [ "$(ALLOW_DIRTY)" = "YES" ]; then \
echo "==> Building source archive from working directory: $$($(1)_SOURCE_ARCHIVE)" 1>&2; \
$$($(1)_SOURCE_CMD) | $(TAR) --create $(TAR_FORMAT) --file $$($(1)_SOURCE_ARCHIVE) --ignore-failed-read -T -; \
else \
echo "==> Building source archive from git: $$($(1)_SOURCE_ARCHIVE)" 1>&2; \
git archive --format=tar $(GIT_REF) $$($(1)_SOURCE_GIT) > $$($(1)_SOURCE_ARCHIVE); \
fi; \
fi; \
if [ ! -f "$$($(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE)" ]; then \
echo "==> Appending Dockerfile to source archive: $$($(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE)" 1>&2; \
cp $$($(1)_SOURCE_ARCHIVE) $$($(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE); \
$(TAR) --append $(TAR_FORMAT) $$($(1)_DOCKERFILE) --file $$($(1)_SOURCE_ARCHIVE_WITH_DOCKERFILE); \
fi; \
echo $$($(1)_FULL_DOCKER_BUILD_COMMAND); \
$$($(1)_FULL_DOCKER_BUILD_COMMAND); \
$$(call $(1)_UPDATE_MARKER_FILE)
# Save the docker image as a tar.gz.
$$($(1)_IMAGE_ARCHIVE): | $$($(1)_IMAGE)
@mkdir -p $$(dir $$@); \
IMAGE=$$$$(cat $$($(1)_IMAGE)); \
echo "==> Saving $(1) image to $$@"; \
docker save $$$$IMAGE \
$$$$(docker history -q --no-trunc $$$$IMAGE | grep -v missing) \
| gzip > $$@
$$($(1)_LAYER_REFS):
@echo "$$($(1)_IMAGE_NAME)" > $$@
@docker history --no-trunc -q $$($(1)_IMAGE_NAME) | grep -Fv '<missing>' >> $$@
endef
### END LAYER
# Include the generated instructions to build each layer.
include $(LOCKDIR)/layers/layers.mk
# Eagerly update the docker image marker files.
_ := $(foreach L,$(LAYERS),$(shell $(call $(L)_UPDATE_MARKER_FILE)))
# DOCKER_LAYER_LIST is used to dump the name of every docker ref in use
# by all of the current builder images. By running 'docker save' against
# this list, we end up with a tarball that can pre-populate the docker
# cache to avoid unnecessary rebuilds.
DOCKER_LAYER_LIST := $(CACHE_ROOT)/docker-layer-list
write-cache-keys: $(addsuffix -write-cache-key,$(LAYERS))
@echo "==> All cache keys written."
build-all-layers: $(addsuffix -image,$(LAYERS))
@echo "==> All builder layers built."
.PHONY: debug
debug: $(addsuffix -debug,$(LAYERS))

View File

@ -1,93 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_ID := 00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_TYPE := base
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_BASE_LAYER :=
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_SOURCE_INCLUDE :=
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_SOURCE_EXCLUDE :=
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_CACHE_KEY_FILE := .buildcache/cache-keys/base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f
LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_ARCHIVE_FILE := .buildcache/archives/00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f.tar.gz
$(eval $(call LAYER,$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_ID),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_TYPE),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_BASE_LAYER),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_SOURCE_INCLUDE),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_SOURCE_EXCLUDE),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_CACHE_KEY_FILE),$(LAYER_00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f_ARCHIVE_FILE)))
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_ID := 01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_TYPE := install-go
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_BASE_LAYER := 00-base-fc1fa0c0d0643b393dfc77cbd78580b60fc4366f
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_SOURCE_INCLUDE :=
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_SOURCE_EXCLUDE :=
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_CACHE_KEY_FILE := .buildcache/cache-keys/install-go-877bac5144c973403bc3944c0a4d0e870fbbb079
LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_ARCHIVE_FILE := .buildcache/archives/01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079.tar.gz
$(eval $(call LAYER,$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_ID),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_TYPE),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_BASE_LAYER),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_SOURCE_INCLUDE),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_SOURCE_EXCLUDE),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_CACHE_KEY_FILE),$(LAYER_01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079_ARCHIVE_FILE)))
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_ID := 02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_TYPE := install-go-tools
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_BASE_LAYER := 01-install-go-877bac5144c973403bc3944c0a4d0e870fbbb079
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_SOURCE_INCLUDE :=
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_SOURCE_EXCLUDE :=
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_CACHE_KEY_FILE := .buildcache/cache-keys/install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2
LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_ARCHIVE_FILE := .buildcache/archives/02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2.tar.gz
$(eval $(call LAYER,$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_ID),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_TYPE),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_BASE_LAYER),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_SOURCE_INCLUDE),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_SOURCE_EXCLUDE),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_CACHE_KEY_FILE),$(LAYER_02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2_ARCHIVE_FILE)))
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_ID := 03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_TYPE := set-workdir
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_BASE_LAYER := 02-install-go-tools-f4c1518e9671005726e8bf75513528512b36f5a2
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_SOURCE_INCLUDE :=
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_SOURCE_EXCLUDE :=
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_CACHE_KEY_FILE := .buildcache/cache-keys/set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53
LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_ARCHIVE_FILE := .buildcache/archives/03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53.tar.gz
$(eval $(call LAYER,$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_ID),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_TYPE),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_BASE_LAYER),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_SOURCE_INCLUDE),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_SOURCE_EXCLUDE),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_CACHE_KEY_FILE),$(LAYER_03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53_ARCHIVE_FILE)))
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_ID := 04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_TYPE := install-yarn
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_BASE_LAYER := 03-set-workdir-56f07806a01db8130a01fdbad631a2cba3ff7c53
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_SOURCE_INCLUDE :=
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_SOURCE_EXCLUDE :=
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_CACHE_KEY_FILE := .buildcache/cache-keys/install-yarn-f8c6e065673e6047441a051da1a992019ca43a86
LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_ARCHIVE_FILE := .buildcache/archives/04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86.tar.gz
$(eval $(call LAYER,$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_ID),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_TYPE),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_BASE_LAYER),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_SOURCE_INCLUDE),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_SOURCE_EXCLUDE),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_CACHE_KEY_FILE),$(LAYER_04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86_ARCHIVE_FILE)))
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_ID := 05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_TYPE := make-ui-folder
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_BASE_LAYER := 04-install-yarn-f8c6e065673e6047441a051da1a992019ca43a86
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_SOURCE_INCLUDE :=
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_SOURCE_EXCLUDE :=
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_CACHE_KEY_FILE := .buildcache/cache-keys/make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43
LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_ARCHIVE_FILE := .buildcache/archives/05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43.tar.gz
$(eval $(call LAYER,$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_ID),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_TYPE),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_BASE_LAYER),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_SOURCE_INCLUDE),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_SOURCE_EXCLUDE),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_CACHE_KEY_FILE),$(LAYER_05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43_ARCHIVE_FILE)))
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_ID := 06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_TYPE := ui-dependencies
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_BASE_LAYER := 05-make-ui-folder-c9af5b6ad4a1e4dffbb201bf29c492e6a838ff43
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_SOURCE_INCLUDE := ui/package.json ui/yarn.lock
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_SOURCE_EXCLUDE :=
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_CACHE_KEY_FILE := .buildcache/cache-keys/ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de
LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_ARCHIVE_FILE := .buildcache/archives/06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de.tar.gz
$(eval $(call LAYER,$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_ID),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_TYPE),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_BASE_LAYER),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_SOURCE_INCLUDE),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_SOURCE_EXCLUDE),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_CACHE_KEY_FILE),$(LAYER_06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de_ARCHIVE_FILE)))
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_ID := 07-build-ui-9a4c8178808477469c37460df34f28cade634794
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_TYPE := build-ui
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_BASE_LAYER := 06-ui-dependencies-ee1bc43cc876b6ac95b30d77c726ccd15ad8e4de
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_SOURCE_INCLUDE := ui/
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_SOURCE_EXCLUDE :=
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_CACHE_KEY_FILE := .buildcache/cache-keys/build-ui-9a4c8178808477469c37460df34f28cade634794
LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_ARCHIVE_FILE := .buildcache/archives/07-build-ui-9a4c8178808477469c37460df34f28cade634794.tar.gz
$(eval $(call LAYER,$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_ID),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_TYPE),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_BASE_LAYER),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_SOURCE_INCLUDE),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_SOURCE_EXCLUDE),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_CACHE_KEY_FILE),$(LAYER_07-build-ui-9a4c8178808477469c37460df34f28cade634794_ARCHIVE_FILE)))
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_ID := 08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_TYPE := go-modules
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_BASE_LAYER := 07-build-ui-9a4c8178808477469c37460df34f28cade634794
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_SOURCE_INCLUDE := go.mod go.sum */go.mod */go.sum
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_SOURCE_EXCLUDE :=
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_CACHE_KEY_FILE := .buildcache/cache-keys/go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b
LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_ARCHIVE_FILE := .buildcache/archives/08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b.tar.gz
$(eval $(call LAYER,$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_ID),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_TYPE),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_BASE_LAYER),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_SOURCE_INCLUDE),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_SOURCE_EXCLUDE),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_CACHE_KEY_FILE),$(LAYER_08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b_ARCHIVE_FILE)))
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_ID := 09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_TYPE := copy-source
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_BASE_LAYER := 08-go-modules-a1e9cc6c22e8691bbc9a3bb3e91eb05d5e97730b
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_SOURCE_INCLUDE := *.go
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_SOURCE_EXCLUDE :=
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_CACHE_KEY_FILE := .buildcache/cache-keys/copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5
LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_ARCHIVE_FILE := .buildcache/archives/09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5.tar.gz
$(eval $(call LAYER,$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_ID),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_TYPE),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_BASE_LAYER),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_SOURCE_INCLUDE),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_SOURCE_EXCLUDE),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_CACHE_KEY_FILE),$(LAYER_09-copy-source-1ab15aacac6a43f8c71defb28e9bcdefafcecfc5_ARCHIVE_FILE)))

File diff suppressed because it is too large Load Diff

View File

@ -1,249 +0,0 @@
# packages.yml
#
# packages.yml defines all the packages we are able to build for a single commit
# in this repo. A package means a single zip file containing the executable binary,
# and optionally other files if needed.
#
# packages.yml is a convenience file for the human management of large numbers of
# alternate packages, allowing default and templated values. We generate another
# artifact from this one, called packages.lock which contains the fully expanded set
# of package and layer specs. This fully expanded file is in turn used to drive the
# build process, and as data for templating CI config.
# config contains packagespec config for this repo.
config:
# product-repo is important to CI providers.
product-repo: https://github.com/hashicorp/vault.git
release-repo: https://github.com/hashicorp/vault-release.git
# product-id is used by external systems to identify this product.
# It can be any unique name, but a golang import path is ideal.
product-id: github.com/hashicorp/vault
circleci-project-slug: gh/hashicorp/vault
circleci-host: circleci.com
on-publish: create-github-release
# inputs are a set of environment variables that may affect the set of bytes produced
# for a given package. Note that a package is a zip file containing the binary, so
# the name of the binary does affect the package's bytes.
inputs:
# defaults contains default input values for each package.
# These values may be overridden on a per-package basis in the packages section.
defaults:
# PRODUCT_VERSION is the version of this product. Usually, this should be left
# as 0.0.0-snapshot. When we build a release candidate, this is overridden in
# a one-off fashion to produce that build.
# This should be used in the PACKAGE_NAME template.
PRODUCT_VERSION: 0.0.0-snapshot
# GO_VERSION is the version of the Go toolchain to use to compile this package.
GO_VERSION: 1.17.2
# YARN_VERSION is the version of Yarn to install for the UI layer.
YARN_VERSION: 1.19.1-1
# Standard golang environment variables, passed to the 'go build' command.
# You can use any standard environment variables here, any that you omit
# will be ommitted from the go build command too, meaning to use the system
# default in the build container.
CGO_ENABLED: 0
GO111MODULE: "off"
# templates contain golang template strings. Each of these is rendered per package
# using that packages values (including any default values), and then added to that
# package.
# Note that templates MAY NOT refer to each other, but may refer to any default or
# package-specific inputs.
templates:
# BINARY_NAME is the name of the executable binary we compile and package.
# It is the name users will use on the CLI to invoke the product.
BINARY_NAME: 'vault{{if eq .GOOS "windows"}}.exe{{end}}'
# PRODUCT_VERSION_MMP is just the major.minor.prerelease fields of the PRODUCT_VERSION.
# Think semantic versioning (semver), although we do not version our binaries
# using semver.
PRODUCT_VERSION_MMP: >-
{{with .PRODUCT_VERSION | strings.SplitN "-" 2}}{{index . 0}}{{end}}
# PRODUCT_VERSION_PRE is just the prerelease field of the product version (i.e. the bit
# after any -, if there is one.
PRODUCT_VERSION_PRE: >-
{{with .PRODUCT_VERSION | strings.SplitN "-" 2}}{{if gt (len .) 1}}{{index . 1}}{{else}}"''"{{end}}{{end}}
# build-command is a templated bash script to be run in the final builder container
# to produce the package. It may refer to any of the inputs, including rendered templates,
# but not meta data.
#
# The build command is passed 3 environment variables, in addition to all those specified as inputs.
#
# - PACKAGE_SOURCE_ID The source ID (usually the git commit SHA, unless build is dirty)
# - OUTPUT_DIR Directory to write the executable and zip file to (will exist already)
# - PACKAGE_ZIP_NAME The name of the package zip file to create (relative to OUTPUT_DIR)
#
# NOTE: You MUST NOT use single quotes in the build command, because at present we do no escaping.
build-command: VERSION_PKG_PATH=github.com/hashicorp/vault/sdk/version;
GO111MODULE=on
go build -v
-tags ui
-ldflags "
-X $VERSION_PKG_PATH.GitCommit=$PACKAGE_SOURCE_ID
-X $VERSION_PKG_PATH.Version={{.PRODUCT_VERSION_MMP}}
-X $VERSION_PKG_PATH.VersionPrerelease={{.PRODUCT_VERSION_PRE}}"
-o $OUTPUT_DIR/{{.BINARY_NAME}}
&& cd $OUTPUT_DIR && zip $PACKAGE_ZIP_NAME {{.BINARY_NAME}}
# packages is the full set of packages we are able to build based on a single commit
# in this repo. Each package is a map where the keys are the names of environment
# variables provided to each build (think 'go build' invocation). Each package is
# expanded by first filling in any unspecified variables with those from defaults,
# and then rendering each template and adding the result to the map.
# Each package must result in a unique PACKAGE_NAME.
#
# The fully expanded set of packages are written to packages.lock. That file
# is a useful data source for building CI/CD pipelines.
packages:
- inputs: { GOOS: darwin, GOARCH: amd64 }
- inputs: { GOOS: darwin, GOARCH: arm64 }
- inputs: { GOOS: freebsd, GOARCH: 386 }
- inputs: { GOOS: freebsd, GOARCH: amd64 }
- inputs: { GOOS: freebsd, GOARCH: arm }
- inputs: { GOOS: linux, GOARCH: 386 }
- inputs: { GOOS: linux, GOARCH: amd64 }
- inputs: { GOOS: linux, GOARCH: arm }
- inputs: { GOOS: linux, GOARCH: arm64 }
- inputs: { GOOS: netbsd, GOARCH: 386 }
- inputs: { GOOS: netbsd, GOARCH: amd64 }
- inputs: { GOOS: openbsd, GOARCH: 386 }
- inputs: { GOOS: openbsd, GOARCH: amd64 }
- inputs: { GOOS: solaris, GOARCH: amd64 }
- inputs: { GOOS: windows, GOARCH: 386 }
- inputs: { GOOS: windows, GOARCH: amd64 }
# meta defines additional custom metadata about packages. This metadata does not
# participate in the PACKAGE_SPEC_ID and so changing it does not directly change cache
# keys for layers or packages. In addition, metadata may not be overridden per-package
# and is not available to input or layer dockerfile templates.
meta:
defaults:
# No default metadata.
templates:
# BUILD_JOB_NAME is the name of a job to build this package in CI. Care must be
# taken that it is both unique within this set of packages, as well as compatible
# with the CI system's naming conventions.
BUILD_JOB_NAME: >-
{{.GOOS}}_{{.GOARCH}}_package
# BUNDLE_NAME is used in archive filenames, as well as by downstream processes.
BUNDLE_NAME: "vault_{{.PRODUCT_VERSION}}"
# package-aliases are a set of paths by which each package may be known, they are
# templates which may refer to any input or meta field defined in this file.
# Package aliases must be unique across all packages defined in this file.
# If any package-alias renders to empty, it is ignored. You can use this
# to produce aliases selectively depending on the package.
#
# Package aliases count as meta data because they do not affect the bytes produced
# per package.
#
# We use package aliases to give human-readable names to packages, and to arrange
# them in a directory hierarchy ready for further processing and distribution.
# Each alias is written as a relative symbolic link in .buildcache/packages/by-alias.
#
# At least one alias must render to a nonempty string.
package-aliases:
- type: local
template: >-
{{.BUNDLE_NAME}}_{{.GOOS}}_{{.GOARCH}}.zip
# public-hc-releases is the path to use for upload to releases.hashicorp.com
# it is empty if this package is not public (empty aliases are ignored).
- type: public-hc-releases
template: >-
vault/{{.BUNDLE_NAME}}/{{.BUNDLE_NAME}}_{{.GOOS}}_{{.GOARCH}}.zip
# Layers determines the build layers, which are individually cacheable layers
# in a linear build. Each layer contains a Dockerfile. All the layers
# together produce the final builder image used to compile binaries.
#
# The partial Dockerfiles may contain references to any of the inputs
# including rendered input templates, but may not reference meta data.
# These Dockerfiles, once rendered, count as inputs and affect the
# package spec ID of each package.
#
# The order of layers is significant. The first layer must have a FROM line, and
# forms the base image. Each subsequent layer begins from the previous one.
#
# You can control cacheability by careful use of variables and ordering.
# Try to group things which change infrequently towards the top, and
# things which change more frequently towards the bottom.
#
# If there are things you want to cache that vary between packages defined in
# this file, put them last so that the greater bulk of work can be shared.
#
# NOTE: At present, changing the names and/or adding/removing layers may
# require updating the CI template file at .circleci/config/@build-release.yml.tpl
# which references some of these layers by name.
base-image: "debian:bullseye-20210927@sha256:eaa533c53fa886f87e48b9420579e1a4698026159cc9190a02f1e9fe88f590ba"
layers:
- name: base
dockerfile: |-
RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
curl \
zip \
build-essential \
gcc-multilib \
g++-multilib \
ca-certificates \
git mercurial bzr \
gnupg \
libltdl-dev \
libltdl7 \
bash \
&& rm -rf /var/lib/apt/lists/*
- name: install-go
dockerfile: |-
ENV GOPATH /gopath
ENV GOROOT /goroot
RUN mkdir $GOROOT && mkdir $GOPATH
RUN curl https://storage.googleapis.com/golang/go{{.GO_VERSION}}.linux-amd64.tar.gz \
| tar xzf - -C $GOROOT --strip-components=1
ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
- name: install-go-tools
dockerfile: |
ENV GO111MODULE=off
RUN go get golang.org/x/tools/cmd/goimports
- name: set-workdir
dockerfile: |
ENV REPO=github.com/hashicorp/vault
ENV DIR=$GOPATH/src/$REPO
RUN mkdir -p $DIR
WORKDIR $DIR
- name: install-yarn
dockerfile: |-
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt-get update -y && apt-get install -y -q nodejs yarn={{.YARN_VERSION}} \
&& rm -rf /var/lib/apt/lists/*
- name: make-ui-folder
dockerfile: |-
RUN mkdir -p http/web_ui
- name: ui-dependencies
source-include: ui/package.json ui/yarn.lock
dockerfile: |-
RUN cd ui && yarn install
RUN cd ui && npm rebuild node-sass
- name: build-ui
source-include: ui/
dockerfile: |-
RUN { while true; do sleep 30; echo keepalive; done; } & cd ui && yarn --verbose run build
- name: go-modules
source-include: "go.mod go.sum */go.mod */go.sum"
dockerfile: |
ENV GO111MODULE=on
RUN go mod download
# The final layer must contain all the source code we've not yet included.
- name: copy-source
source-include: "*.go"

78
packagespec.mk generated
View File

@ -1,78 +0,0 @@
# ***
# WARNING: Do not EDIT or MERGE this file, it is generated by packagespec.
# ***
# packagespec.mk should be included at the end of your main Makefile,
# it provides hooks into packagespec targets, so you can run them
# from the root of your product repository.
#
# All packagespec-generated make targets assume they are invoked by
# targets in this file, which provides the necessary context for those
# other targets. Therefore, this file is not just for conveninence but
# is currently necessary to the correct functioning of Packagespec.
# Since this file is included in other Makefiles, which may or may not want
# to use bash with these options, we explicitly set the shell for specific
# targets, in this file, rather than setting the global SHELL variable.
PACKAGESPEC_SHELL := /usr/bin/env bash -euo pipefail -c
# The RUN macro is used in place of the shell builtin in this file, so that
# we can use the PACKAGESPEC_SHELL rather than the default from the Makefile
# that includes this one.
RUN = $(shell $(PACKAGESPEC_SHELL) '$1')
# This can be overridden by the calling Makefile to write config to a different path.
PACKAGESPEC_CIRCLECI_CONFIG ?= .circleci/config.yml
PACKAGESPEC_HOOK_POST_CI_CONFIG ?= echo > /dev/null
SPEC_FILE_PATTERN := packages*.yml
# SPEC is the human-managed description of which packages we are able to build.
SPEC := $(call RUN,find . -mindepth 1 -maxdepth 1 -name '$(SPEC_FILE_PATTERN)')
ifneq ($(words $(SPEC)),1)
$(error Found $(words $(SPEC)) $(SPEC_FILE_PATTERN) files, need exactly 1: $(SPEC))
endif
SPEC_FILENAME := $(notdir $(SPEC))
SPEC_MODIFIER := $(SPEC_FILENAME:packages%.yml=%)
# LOCKDIR contains the lockfile and layer files.
LOCKDIR := packages$(SPEC_MODIFIER).lock
LOCKFILE := $(LOCKDIR)/pkgs.yml
export PACKAGE_SPEC_ID LAYER_SPEC_ID PRODUCT_REVISION PRODUCT_VERSION
# PASSTHROUGH_TARGETS are convenience aliases for targets defined in $(LOCKDIR)/Makefile
PASSTHROUGH_TARGETS := \
build package-contents copy-package-contents build-all \
aliases meta package package-meta package-meta-all \
build-ci watch-ci \
stage-config stage custom-build custom-build-config\
bundle \
orchestrator stop-orchestrator \
list-custom-builds \
list-staged-builds \
list-promoted-builds \
publish-config publish \
workflow
.PHONY: $(PASSTHROUGH_TARGETS)
LOCAL_TARGETS := packages packagespec-circleci-config $(PACKAGESPEC_CIRCLECI_CONFIG)
# Set the shell for all packagespec targets.
$(PASSTHROUGH_TARGETS) $(LOCAL_TARGETS): SHELL := $(PACKAGESPEC_SHELL)
$(PASSTHROUGH_TARGETS):
@PRODUCT_REPO_ROOT="$(call RUN,git rev-parse --show-toplevel)" $(MAKE) -C $(LOCKDIR) $@
# packages regenerates build and CI config using packagespec. This is only for
# internal HashiCorp use, as it has dependencies not available externally.
.PHONY: packages
packages:
@command -v packagespec > /dev/null 2>&1 || { \
echo "Please install packagespec."; \
echo "Note: packagespec is only available to HashiCorp employees at present."; \
exit 1; \
}
@packagespec lock -circleciconfig="$(PACKAGESPEC_CIRCLECI_CONFIG)"
@$(MAKE) packagespec-circleci-config
packagespec-circleci-config:
@$(PACKAGESPEC_HOOK_POST_CI_CONFIG)

View File

@ -1,130 +0,0 @@
#!/usr/bin/env bash
# This script takes 2 positional args:
#
# 1: The version of packagespec to upgrade a branch to (e.g. 1.2.3)
# 2: The target branch to upgrade.
#
# It works in a temp directory, so does not interfere with your work tree
# or git index or config.
#
# It does this:
#
# 1. Inspects your remote config for URL remotes.
# 2. Clone this directory into a temp dir.
# 3. Sets the remotes in the clone to match the remotes you have configured.
# 4. Fetches everything from all remotes.
# 5. Determines which remote your target branch is on. If it's more than one
# remote, then exits with an error as there would be no way to choose.
# 6. Checks out a new branch named packagespec<version>/<target branch name>
# 7. Runs packagespec upgrade -version <version>
# 8. Commits the relevant paths with the message 'packagespec upgrade -version <version>'
# 9. Pushes the new branch to the same remote as the original branch.
# 0. Tells you to open a PR.
# VERSION is the packagespec version to upgrade to.
VERSION="$1"
BRANCH="$2"
FLAG="$3"
REPO_NAME="$4"
BINNAME="$0"
usage() { echo "usage: $BINNAME <packagespec version> <branch name> [-pr PRODUCT_NAME]"; }
if [ -z "$VERSION" ]; then
usage; exit 1
fi
if [ -z "$BRANCH" ]; then
usage; exit 1
fi
PR=false
if [ -n "$FLAG" ]; then
if [ "$FLAG" = "-pr" ]; then
if [ -z "$REPO_NAME" ]; then
usage; exit 1
fi
PR=true
else
usage; exit 1
fi
fi
set -euo pipefail
declare -A REMOTES
# url_remotes lists remotes along with their push URLs, where they are
# not filesystem paths (i.e. do not start with / or . ).
url_remotes() { git remote -v | grep -F "(push)" | sed -E 's/[[:space:]]+\(push\)//g' | grep -Ev "\t(/|\.)"; }
for R in $(url_remotes | cut -f1); do
REMOTES[$R]="$(url_remotes | grep -E "^$R\t" | cut -f2)"
done
for R in "${!REMOTES[@]}"; do
echo "Remote: $R = ${REMOTES[$R]}"
done
TEMP=".upgrade-packagespec"
mkdir -p "$TEMP"
echo "*" > "$TEMP/.gitignore"
CLONEDIR="$TEMP/product-repo"
if ! [ -d "$CLONEDIR/.git" ]; then
git clone . "$CLONEDIR"
fi
cd "$CLONEDIR"
echo "==> WORKING IN TEMP DIR: $CLONEDIR"
git reset --hard
# Remove existing remotes
for R in $(git remote); do git remote rm "$R"; done
# Add remotes from original checkout dir
for R in "${!REMOTES[@]}"; do
git remote add "$R" "${REMOTES[$R]}"
done
# Fetch everything from these remotes, ignore errors.
git remote update || true
BRANCH_ESCAPED="$(sed -E -e 's/\./\\./g' -e 's/\+/\\+/g' -e 's/\//\\\//g' <<< "$BRANCH")"
# Determine which remotes the branch is on.
declare -A BRANCH_REMOTES
for R in "${!REMOTES[@]}"; do
if git branch -a | grep -E "^[[:space:]]*remotes/$R/$BRANCH_ESCAPED\$"; then
TARGET_REMOTE="$R"
BRANCH_REMOTES[$R]=1
fi
done
COUNT="${#BRANCH_REMOTES[@]}"
if [ "$COUNT" -ne "1" ]; then
echo "==> ERROR: Branch $BRANCH found on $COUNT remotes; want exactly 1"
exit 1
fi
# Checkout the target update branch.
git checkout "$BRANCH"
git reset --hard "$TARGET_REMOTE/$BRANCH"
NEW_BRANCH="packagespec$VERSION/$BRANCH"
git checkout -B "$NEW_BRANCH" "$BRANCH"
COMMAND="packagespec upgrade -version $VERSION"
echo "==> Running $COMMAND"
$COMMAND
git add .circleci/ packages*.lock packagespec*
git commit -m "$COMMAND"
git push -u "$TARGET_REMOTE" "$NEW_BRANCH"
echo "==> All done: upgrade pushed to branch $NEW_BRANCH on ${REMOTES[$TARGET_REMOTE]}"
echo "==> ACTIONS FOR YOU: Open a PR with base: $BRANCH compare: $NEW_BRANCH"
if ! $PR; then
exit 0
fi
# Open browser with PR ready:
open https://github.com/hashicorp/$REPO_NAME/compare/$BRANCH...$NEW_BRANCH?expand=1

12
scripts/version.sh Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -euo pipefail
version_file=$1
version=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "${version_file}")
prerelease=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "${version_file}")
if [ -n "$prerelease" ]; then
echo "${version}-${prerelease}"
else
echo "${version}"
fi

View File

@ -543,6 +543,9 @@ func (c *Core) handleCancelableRequest(ctx context.Context, req *logical.Request
}
break
}
if token == nil {
return logical.ErrorResponse("bad token"), logical.ErrPermissionDenied
}
_, nsID := namespace.SplitIDFromString(token.(string))
if nsID != "" {
ns, err := NamespaceByID(ctx, nsID, c)

View File

@ -3348,6 +3348,9 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate(ctx context.Context, req *logic
oldEntryTokenType := entry.TokenType
if tokenTypeRaw, ok := data.Raw["token_type"]; ok {
tokenTypeStr = new(string)
if tokenTypeRaw == nil {
return logical.ErrorResponse("Invalid 'token_type' value: null"), nil
}
*tokenTypeStr = tokenTypeRaw.(string)
delete(data.Raw, "token_type")
entry.TokenType = logical.TokenTypeDefault

View File

@ -162,8 +162,8 @@ default value in the `"/sys/config/ui"` [API endpoint](/api/system/config-ui).
- `x_forwarded_for_hop_skips` `(string: "0")` The number of addresses that will be
skipped from the _rear_ of the set of hops. For instance, for a header value
of `1.2.3.4, 2.3.4.5, 3.4.5.6`, if this value is set to `"1"`, the address that
will be used as the originating client IP is `2.3.4.5`.
of `1.2.3.4, 2.3.4.5, 3.4.5.6, 4.5.6.7`, if this value is set to `"1"`, the address that
will be used as the originating client IP is `3.4.5.6`.
- `x_forwarded_for_reject_not_authorized` `(string: "true")` If set false,
if there is an X-Forwarded-For header in a connection from an unauthorized